Skip to content

Commit c51a70e

Browse files
committed
Trying to improve throughput
1 parent b14cf3a commit c51a70e

File tree

3 files changed

+152
-23
lines changed

3 files changed

+152
-23
lines changed

Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ tracing = "^0.1"
5656
utoipa = { version = "^5", features = ["actix_extras"] }
5757
utoipa-swagger-ui = { version = "^9", features = ["actix-web"] }
5858
lazy_static = "^1.5"
59+
libc = "^0.2"
5960

6061
[target.'cfg(windows)'.build-dependencies]
6162
winres = "^0.1"

src/udp/impls/response_batch_manager.rs

Lines changed: 59 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ use std::net::SocketAddr;
22
use std::sync::{Arc, OnceLock};
33
use std::time::Duration;
44
use std::collections::{HashMap, VecDeque};
5+
use log::{debug, info};
56
use tokio::net::UdpSocket;
67
use tokio::sync::{mpsc, RwLock};
78
use tokio::time::interval;
@@ -15,24 +16,35 @@ impl ResponseBatchManager {
1516
let (sender, mut receiver) = mpsc::unbounded_channel::<QueuedResponse>();
1617

1718
tokio::spawn(async move {
18-
let mut buffer = VecDeque::with_capacity(1000);
19-
let mut timer = interval(Duration::from_millis(5));
19+
let mut buffer = VecDeque::with_capacity(1000); // Larger buffer for higher throughput
20+
let mut timer = interval(Duration::from_millis(5)); // 5ms flush interval for better responsiveness
21+
let mut stats_timer = interval(Duration::from_secs(10)); // Stats every 10 seconds
22+
let mut total_queued = 0u64;
23+
let mut total_sent = 0u64;
2024

2125
loop {
2226
tokio::select! {
2327
Some(response) = receiver.recv() => {
28+
total_queued += 1;
2429
buffer.push_back(response);
2530

2631
if buffer.len() >= 500 {
32+
debug!("Buffer full ({} items) - flushing immediately", buffer.len());
2733
Self::flush_buffer(&socket, &mut buffer).await;
2834
}
2935
}
3036

3137
_ = timer.tick() => {
3238
if !buffer.is_empty() {
39+
debug!("Timer flush - {} items in buffer", buffer.len());
3340
Self::flush_buffer(&socket, &mut buffer).await;
3441
}
3542
}
43+
44+
_ = stats_timer.tick() => {
45+
info!("Batch sender stats - Queued: {}, Current buffer: {}, Socket: {:?}",
46+
total_queued, buffer.len(), socket.local_addr());
47+
}
3648
}
3749
}
3850
});
@@ -41,12 +53,55 @@ impl ResponseBatchManager {
4153
}
4254

4355
pub(crate) fn queue_response(&self, remote_addr: SocketAddr, payload: Vec<u8>) {
44-
let _ = self.sender.send(QueuedResponse { remote_addr, payload });
56+
// Monitor queue health
57+
match self.sender.send(QueuedResponse { remote_addr, payload }) {
58+
Ok(_) => {
59+
debug!("Response queued for {}", remote_addr);
60+
}
61+
Err(e) => {
62+
// This indicates the batch sender task has died
63+
log::error!("Failed to queue response - batch sender may have crashed: {}", e);
64+
}
65+
}
4566
}
4667

4768
async fn flush_buffer(socket: &UdpSocket, buffer: &mut VecDeque<QueuedResponse>) {
69+
let batch_size = buffer.len();
70+
let mut sent_count = 0;
71+
let mut error_count = 0;
72+
4873
while let Some(response) = buffer.pop_front() {
49-
let _ = socket.send_to(&response.payload, &response.remote_addr).await;
74+
match socket.send_to(&response.payload, &response.remote_addr).await {
75+
Ok(bytes_sent) => {
76+
sent_count += 1;
77+
debug!("Sent {} bytes to {}", bytes_sent, response.remote_addr);
78+
}
79+
Err(e) => {
80+
error_count += 1;
81+
match e.kind() {
82+
std::io::ErrorKind::WouldBlock => {
83+
debug!("Send buffer full (EWOULDBLOCK) - packet dropped");
84+
}
85+
std::io::ErrorKind::Other => {
86+
if let Some(os_error) = e.raw_os_error() {
87+
match os_error {
88+
105 => debug!("ENOBUFS: No buffer space available - increase socket buffers"),
89+
111 => debug!("ECONNREFUSED: Connection refused by peer"),
90+
113 => debug!("EHOSTUNREACH: Host unreachable"),
91+
_ => debug!("Send error (OS error {}): {}", os_error, e),
92+
}
93+
} else {
94+
debug!("Send error: {}", e);
95+
}
96+
}
97+
_ => debug!("Send error: {}", e),
98+
}
99+
}
100+
}
101+
}
102+
103+
if batch_size > 0 {
104+
info!("Batch flush: {} total, {} sent, {} errors", batch_size, sent_count, error_count);
50105
}
51106
}
52107

src/udp/impls/udp_server.rs

Lines changed: 92 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -41,15 +41,46 @@ impl UdpServer {
4141
let domain = if bind_address.is_ipv4() { Domain::IPV4 } else { Domain::IPV6 };
4242
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
4343

44-
socket.set_recv_buffer_size(recv_buffer_size).map_err(tokio::io::Error::other)?;
45-
socket.set_send_buffer_size(send_buffer_size).map_err(tokio::io::Error::other)?;
44+
// Aggressive buffer sizing for high throughput
45+
let actual_recv_buffer = recv_buffer_size.max(16_777_216); // Minimum 16MB
46+
let actual_send_buffer = send_buffer_size.max(16_777_216); // Minimum 16MB
47+
48+
socket.set_recv_buffer_size(actual_recv_buffer).map_err(tokio::io::Error::other)?;
49+
socket.set_send_buffer_size(actual_send_buffer).map_err(tokio::io::Error::other)?;
4650
socket.set_reuse_address(reuse_address).map_err(tokio::io::Error::other)?;
51+
52+
// Enable SO_REUSEPORT for better load distribution across threads
53+
#[cfg(target_os = "linux")]
54+
{
55+
use socket2::TcpKeepalive;
56+
let reuse_port = 1i32;
57+
unsafe {
58+
let optval = &reuse_port as *const i32 as *const libc::c_void;
59+
if libc::setsockopt(
60+
socket.as_raw_fd(),
61+
libc::SOL_SOCKET,
62+
libc::SO_REUSEPORT,
63+
optval,
64+
std::mem::size_of::<i32>() as libc::socklen_t,
65+
) != 0 {
66+
log::warn!("Failed to set SO_REUSEPORT - continuing without it");
67+
}
68+
}
69+
}
70+
4771
socket.bind(&bind_address.into()).map_err(tokio::io::Error::other)?;
4872
socket.set_nonblocking(true).map_err(tokio::io::Error::other)?;
4973

74+
// Convert to std::net::UdpSocket, then to tokio::net::UdpSocket
5075
let std_socket: std::net::UdpSocket = socket.into();
5176
let tokio_socket = UdpSocket::from_std(std_socket)?;
5277

78+
// Log actual buffer sizes
79+
let sock_ref = socket2::SockRef::from(&tokio_socket);
80+
let actual_recv = sock_ref.recv_buffer_size().unwrap_or(0);
81+
let actual_send = sock_ref.send_buffer_size().unwrap_or(0);
82+
info!("Socket created with buffers - Recv: {} bytes, Send: {} bytes", actual_recv, actual_send);
83+
5384
Ok(UdpServer {
5485
socket: Arc::new(tokio_socket),
5586
threads,
@@ -61,32 +92,62 @@ impl UdpServer {
6192
pub async fn start(&self, rx: tokio::sync::watch::Receiver<bool>)
6293
{
6394
let threads = self.threads;
64-
for _index in 0..=threads {
95+
// Create multiple sockets for better performance using SO_REUSEPORT
96+
for thread_id in 0..threads {
6597
let socket_clone = self.socket.clone();
6698
let tracker = self.tracker.clone();
6799
let mut rx = rx.clone();
68-
let mut data = [0; 1496];
100+
69101
tokio::spawn(async move {
102+
// Larger buffer to handle burst traffic
103+
let mut data = [0; 2048]; // Increased from 1496
104+
let mut packet_count = 0u64;
105+
let mut last_stats = std::time::Instant::now();
106+
70107
loop {
71108
let udp_sock = socket_clone.local_addr().unwrap();
72109
tokio::select! {
73110
_ = rx.changed() => {
74-
info!("Stopping UDP server: {udp_sock}...");
111+
info!("Stopping UDP server thread {}: {udp_sock}...", thread_id);
75112
break;
76113
}
77-
Ok((valid_bytes, remote_addr)) = socket_clone.recv_from(&mut data) => {
78-
let payload = &data[..valid_bytes];
79-
80-
debug!("Received {} bytes from {}", payload.len(), remote_addr);
81-
debug!("{payload:?}");
82-
83-
let tracker_cloned = tracker.clone();
84-
let socket_cloned = socket_clone.clone();
85-
let payload_vec = payload.to_vec();
86-
tokio::spawn(async move {
87-
let response = UdpServer::handle_packet(remote_addr, payload_vec, tracker_cloned.clone()).await;
88-
UdpServer::send_response(tracker_cloned.clone(), socket_cloned.clone(), remote_addr, response).await;
89-
});
114+
result = socket_clone.recv_from(&mut data) => {
115+
match result {
116+
Ok((valid_bytes, remote_addr)) => {
117+
packet_count += 1;
118+
119+
// Log stats every 10k packets per thread
120+
if packet_count % 10000 == 0 {
121+
let elapsed = last_stats.elapsed();
122+
let rate = 10000.0 / elapsed.as_secs_f64();
123+
debug!("Thread {} processed 10k packets in {:?} ({:.1} pps)",
124+
thread_id, elapsed, rate);
125+
last_stats = std::time::Instant::now();
126+
}
127+
128+
let payload = &data[..valid_bytes];
129+
debug!("Thread {} received {} bytes from {}", thread_id, payload.len(), remote_addr);
130+
131+
let tracker_cloned = tracker.clone();
132+
let socket_cloned = socket_clone.clone();
133+
let payload_vec = payload.to_vec();
134+
135+
// Process immediately without extra spawning for better performance
136+
let response = UdpServer::handle_packet(remote_addr, payload_vec, tracker_cloned.clone()).await;
137+
UdpServer::send_response(tracker_cloned.clone(), socket_cloned.clone(), remote_addr, response).await;
138+
}
139+
Err(e) => {
140+
match e.kind() {
141+
std::io::ErrorKind::WouldBlock => {
142+
// This is normal for non-blocking sockets
143+
tokio::task::yield_now().await;
144+
}
145+
_ => {
146+
log::error!("Thread {} recv_from error: {}", thread_id, e);
147+
}
148+
}
149+
}
150+
}
90151
}
91152
}
92153
}
@@ -101,14 +162,16 @@ impl UdpServer {
101162
let sentry = sentry::TransactionContext::new("udp server", "send response");
102163
let transaction = sentry::start_transaction(sentry);
103164

104-
let mut buffer = Vec::with_capacity(512);
165+
// Pre-allocate buffer with exact capacity instead of MAX_PACKET_SIZE
166+
let mut buffer = Vec::with_capacity(512); // Most responses are much smaller than MAX_PACKET_SIZE
105167
let mut cursor = Cursor::new(&mut buffer);
106168

107169
match response.write(&mut cursor) {
108170
Ok(_) => {
109171
let position = cursor.position() as usize;
110172
debug!("Response bytes: {:?}", &buffer[..position]);
111173

174+
// Get batch manager for this socket and queue the response
112175
let batch_manager = ResponseBatchManager::get_for_socket(socket).await;
113176
batch_manager.queue_response(remote_addr, buffer[..position].to_vec());
114177
}
@@ -127,6 +190,7 @@ impl UdpServer {
127190

128191
#[tracing::instrument(level = "debug")]
129192
pub async fn send_packet(socket: Arc<UdpSocket>, remote_addr: &SocketAddr, payload: &[u8]) {
193+
// This method is kept for compatibility but shouldn't be used in the batched version
130194
let _ = socket.send_to(payload, remote_addr).await;
131195
}
132196

@@ -214,6 +278,7 @@ impl UdpServer {
214278
pub async fn handle_udp_announce(remote_addr: SocketAddr, request: &AnnounceRequest, tracker: Arc<TorrentTracker>) -> Result<Response, ServerError> {
215279
let config = tracker.config.tracker_config.clone();
216280

281+
// Whitelist/Blacklist checks
217282
if config.whitelist_enabled && !tracker.check_whitelist(InfoHash(request.info_hash.0)) {
218283
debug!("[UDP ERROR] Torrent Not Whitelisted");
219284
return Err(ServerError::TorrentNotWhitelisted);
@@ -223,6 +288,7 @@ impl UdpServer {
223288
return Err(ServerError::TorrentBlacklisted);
224289
}
225290

291+
// Key validation
226292
if config.keys_enabled {
227293
if request.path.len() < 50 {
228294
debug!("[UDP ERROR] Unknown Key");
@@ -246,6 +312,7 @@ impl UdpServer {
246312
}
247313
}
248314

315+
// User key validation
249316
let user_key = if config.users_enabled {
250317
let user_key_path_extract = if request.path.len() >= 91 {
251318
Some(&request.path[51..=91])
@@ -278,6 +345,7 @@ impl UdpServer {
278345
return Err(ServerError::PeerKeyNotValid);
279346
}
280347

348+
// Handle announce
281349
let torrent = match tracker.handle_announce(tracker.clone(), AnnounceQueryRequest {
282350
info_hash: InfoHash(request.info_hash.0),
283351
peer_id: PeerId(request.peer_id.0),
@@ -298,13 +366,15 @@ impl UdpServer {
298366
}
299367
};
300368

369+
// Get peers efficiently
301370
let torrent_peers = tracker.get_torrent_peers(request.info_hash, 72, TorrentPeersType::All, Some(remote_addr.ip()));
302371

303372
let (peers, peers6) = if let Some(torrent_peers_unwrapped) = torrent_peers {
304373
let mut peers = Vec::with_capacity(72);
305374
let mut peers6 = Vec::with_capacity(72);
306375
let mut count = 0;
307376

377+
// Only collect peers if not completed download
308378
if request.bytes_left.0 != 0 {
309379
if remote_addr.is_ipv4() {
310380
for torrent_peer in torrent_peers_unwrapped.seeds_ipv4.values().take(72) {
@@ -325,6 +395,7 @@ impl UdpServer {
325395
}
326396
}
327397

398+
// Collect regular peers
328399
if remote_addr.is_ipv4() {
329400
for torrent_peer in torrent_peers_unwrapped.peers_ipv4.values().take(72 - count) {
330401
if let Ok(ip) = torrent_peer.peer_addr.ip().to_string().parse::<Ipv4Addr>() {
@@ -344,6 +415,7 @@ impl UdpServer {
344415
(Vec::new(), Vec::new())
345416
};
346417

418+
// Create response
347419
let response = if remote_addr.is_ipv6() {
348420
Response::from(AnnounceResponse {
349421
transaction_id: request.transaction_id,
@@ -362,6 +434,7 @@ impl UdpServer {
362434
})
363435
};
364436

437+
// Update stats
365438
let stats_event = if remote_addr.is_ipv4() {
366439
StatsEvent::Udp4AnnouncesHandled
367440
} else {

0 commit comments

Comments
 (0)