Issue
In my C++ program, I send many small (< KB) UDP messages. When I measure the performance of such UDP sends, I see that most of them complete under 300 milliseconds, but occasionally (every ~150), it takes 3 seconds.
Note that these measurements are taken over a wi-fi network, which probably explains the occasional delays. However, if I wanted to support the wi-fi scenario, I suspect that increasing the send buffers would be my first step.
My initial research pointed me to increasing the UDP buffer size via net.core.rmem_max
and net.core.rmem_default
, which I did:
$sudo sysctl net.core.rmem_max
net.core.rmem_max = 26214400
$sudo sysctl net.core.rmem_default
net.core.rmem_default = 26214400
However, this does not seem to have any impact on my program.
Next, I tried setting the buffer size in code:
setsockopt(sock_fd, SOL_SOCKET, SO_SNDBUF, &size, size_len)
, where the size is 26214400
. This solved the issue - no more delayed packets. However, after examining the buffer size via getsockopt(socket_fd, SOL_SOCKET, SO_SNDBUF, &size, &size_len)
, I see that the buffer was set to 5228800
.
Hence, my question is: what is the proper way of increasing the UDP send buffer?
My environment:
cat /etc/os-release
PRETTY_NAME="Ubuntu 22.04.1 LTS"
NAME="Ubuntu"
VERSION_ID="22.04"
VERSION="22.04.1 LTS (Jammy Jellyfish)"
This issue only appears when I use wi-fi, which is the use case I want to support. In other words, I do not see the occasional slow sends when testing against an ethernet interface.
Here is my source code related to UDP:
#include <netinet/in.h>
#include <arpa/inet.h>
#include <stdexcept>
#include <cstring>
#include <cstdlib>
#include <cerrno>
#include <boost/log/trivial.hpp>
#include "udp_sender.h"
void soc_get_so_snd_buf(int socket_fd)
{
unsigned int size = 0;
socklen_t size_len = sizeof(size);
if(getsockopt(socket_fd, SOL_SOCKET, SO_SNDBUF, &size, &size_len) == -1 ) {
std::string err_msg = "Failed to get the socket send buffer for socket: " + std::to_string(socket_fd);
BOOST_LOG_TRIVIAL(error) << err_msg;
throw std::runtime_error(err_msg);
}
BOOST_LOG_TRIVIAL(debug) << "Send buffer size (SO_SNDBUF) is: " << size;
}
void soc_set_so_snd_buf(int sock_fd, size_t size)
{
socklen_t size_len = sizeof(size);
if (setsockopt(sock_fd, SOL_SOCKET, SO_SNDBUF, &size, size_len) == -1) {
std::string err_msg = "Failed to set the socket send buffer to " + std::to_string(size);
BOOST_LOG_TRIVIAL(error) << err_msg;
throw std::runtime_error(err_msg);
}
}
UDPSender::UDPSender(int id) : my_id(id) {
char my_ip[100];
sprintf(my_ip, "10.1.1.%d", id);
send_socket_fd = socket(AF_INET, SOCK_DGRAM, 0);
if(send_socket_fd < 0)
throw std::runtime_error("Failed to create a send socket.");
int enable = 1;
if (setsockopt(send_socket_fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int)) < 0) {
throw std::runtime_error("Failed to assign REUSADDR option.");
}
soc_get_so_snd_buf(send_socket_fd);
soc_set_so_snd_buf(send_socket_fd, 26214400); // setting this to 26214400 results in 5228800 which seems to solve my issue for now.
soc_get_so_snd_buf(send_socket_fd);
struct sockaddr_in srcAddr{};
memset(&srcAddr, 0, sizeof(srcAddr));
srcAddr.sin_family = AF_INET;
srcAddr.sin_port = htons(0); // Any port will do.
inet_pton(AF_INET, my_ip, &srcAddr.sin_addr);
if(bind(send_socket_fd, (struct sockaddr*)&srcAddr, sizeof(srcAddr)) < 0) {
BOOST_LOG_TRIVIAL(error) << "Failed to bind sending socket " << strerror(errno);
throw std::runtime_error("Failed to bind a send socket.");
}
}
UDPSender::~UDPSender() {
close(send_socket_fd);
send_socket_fd = -1;
}
ssize_t UDPSender::send(
int dest_id,
int dest_port,
char* msg,
size_t msg_size) const {
char dest_ip[100];
struct sockaddr_in dest_addr{};
sprintf(dest_ip, "10.1.1.%d", dest_id);
memset(&dest_addr, 0, sizeof(dest_addr));
dest_addr.sin_family = AF_INET;
dest_addr.sin_port = htons(dest_port);
inet_pton(AF_INET, dest_ip, &dest_addr.sin_addr);
ssize_t bytes_sent = sendto(
send_socket_fd,
msg,
msg_size,
0,
(struct sockaddr*)&dest_addr, sizeof(dest_addr));
if (bytes_sent < 0) {
std::string error_msg = "Failed to send from: " +
std::to_string(my_id) +
" to: " +
std::to_string(dest_id) + " errno: " + std::to_string(errno) + " ";
error_msg += strerror(errno);
if (errno == EPERM) {
error_msg += "; error was due iptables bock rule - expected for some targets.";
BOOST_LOG_TRIVIAL(debug) << error_msg;
return 0;
} else {
BOOST_LOG_TRIVIAL(error) << error_msg;
throw std::runtime_error(error_msg);
}
}
if (bytes_sent != msg_size) {
BOOST_LOG_TRIVIAL(error) << "Failed to send the expected number of bytes, wanted :"
<< msg_size << " sent " << bytes_sent;
throw std::runtime_error("Failed to send the expected number of bytes.");
}
return bytes_sent;
}
Tests:
#include "udp_sender.h"
#include "message_factory.h"
#include "heartbeats.h"
#include <thread>
#include <chrono>
#include "gtest/gtest.h"
#include <boost/log/trivial.hpp>
using namespace std::chrono;
TEST(UDPSenderTest, Broadcast) {
int sender_id = 0;
UDPSender sender = UDPSender(sender_id);
char serialized[65000];
Heartbeat msg = Heartbeat(sender_id, Peers{Node{2, 3}}, false);
long serialized_size = MessageFactory::serialize_heartbeat(msg, serialized);
auto start_cycle = high_resolution_clock::now();
for(int i=0; i<256; i++) {
auto start = high_resolution_clock::now();
sender.send(i, 7777, serialized, serialized_size);
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
if (duration.count() > 300) {
BOOST_LOG_TRIVIAL(error) << "Sent broadcast to " << i << " in " << duration.count() << " milliseconds";
}
}
auto stop_cycle = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop_cycle - start_cycle);
BOOST_LOG_TRIVIAL(debug) << "Completed broadcast cycle in " << duration.count() << " milliseconds";
}
Output when I am not setting the socket options and instead rely on the Linux configuration:
[ RUN ] UDPSenderTest.Broadcast
37: [2022-10-06 09:56:08.183035] [0x00007f5f15f8b740] [debug] Send buffer size (SO_SNDBUF) is: 212992
37: [2022-10-06 09:56:11.256173] [0x00007f5f15f8b740] [debug] Sent broadcast to 247 in milliseconds 3069
37: [2022-10-06 09:56:11.256590] [0x00007f5f15f8b740] [debug] Completed broadcast cycle in 3073 milliseconds
Solution
You're setting the wrong kernel parameter.
The net.core.rmem_default
and net.core.rmem_max
parameters set the default and max sizes respectively of the UDP receive buffer. The corresponding parameters for the UDP send buffer are net.core.wmem_default
and net.core.wmem_max
.
In general, the proper way to set the UDP send buffer for your program is with the SO_SNDBUF
socket option, and the value you set will be limited by the net.core.wmem_max
kernel parameter. Then if you need a value larger than the max you can increase the value of net.core.wmem_max
.
You don't typically want to modify net.core.wmem_default
or net.core.rmem_default
as that will affect other processes on the system.
Answered By - dbush Answer Checked By - David Goodson (PHPFixing Volunteer)
0 Comments:
Post a Comment
Note: Only a member of this blog may post a comment.