Skip to content

Commit f3491b2

Browse files
authored
Merge pull request #51 from Power2All/v4.0.7
v4.0.7
2 parents 52677cb + 0d4076e commit f3491b2

File tree

11 files changed

+149
-132
lines changed

11 files changed

+149
-132
lines changed

Cargo.lock

Lines changed: 5 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "torrust-actix"
3-
version = "4.0.6"
3+
version = "4.0.7"
44
edition = "2021"
55
license = "AGPL-3.0"
66
authors = [

README.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,11 @@ Sentry.io support is introduced, you can enable it in the configuration and the
5959

6060
### ChangeLog
6161

62+
#### v4.0.7
63+
* Cleanup was still broken, did a big rewrite, after testing it works now as expected
64+
* Did some tokio threading correctly for core threads
65+
* Added a new configuration key, to set the threads, default for each shard (256), but can be changed
66+
6267
#### v4.0.6
6368
* Fixed some clippy issues
6469
* Found a performance issue on peers cleanup

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ FROM rust:alpine
22

33
RUN apk add git musl-dev curl pkgconfig openssl-dev openssl-libs-static
44
RUN git clone https://github.com/Power2All/torrust-actix.git /tmp/torrust-actix
5-
RUN cd /tmp/torrust-actix && git checkout tags/v4.0.6
5+
RUN cd /tmp/torrust-actix && git checkout tags/v4.0.7
66
WORKDIR /tmp/torrust-actix
77
RUN cd /tmp/torrust-actix
88
RUN cargo build --release && rm -Rf target/release/.fingerprint target/release/build target/release/deps target/release/examples target/release/incremental

src/config/impls/configuration.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ impl Configuration {
3737
request_interval_minimum: 1800,
3838
peers_timeout: 2700,
3939
peers_cleanup_interval: 900,
40+
peers_cleanup_threads: 256,
4041
total_downloads: 0,
4142
swagger: false,
4243
prometheus_id: String::from("torrust_actix")

src/config/structs/tracker_config.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ pub struct TrackerConfig {
1212
pub request_interval_minimum: u64,
1313
pub peers_timeout: u64,
1414
pub peers_cleanup_interval: u64,
15+
pub peers_cleanup_threads: u64,
1516
pub total_downloads: u64,
1617
pub swagger: bool,
1718
pub prometheus_id: String,

src/main.rs

Lines changed: 17 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use std::mem;
12
use std::net::SocketAddr;
23
use std::process::exit;
34
use std::sync::Arc;
@@ -8,6 +9,7 @@ use futures_util::future::{try_join_all, TryJoinAll};
89
use log::{error, info};
910
use parking_lot::deadlock;
1011
use sentry::ClientInitGuard;
12+
use tokio::runtime::Builder;
1113
use tokio_shutdown::Shutdown;
1214
use torrust_actix::api::api::api_service;
1315
use torrust_actix::common::common::{setup_logging, shutdown_waiting, udp_check_host_and_port_used};
@@ -82,10 +84,12 @@ fn main() -> std::io::Result<()>
8284

8385
if args.import { tracker.import(&args, tracker.clone()).await; }
8486

87+
let tokio_core = Builder::new_multi_thread().thread_name("core").worker_threads(9).enable_all().build()?;
88+
8589
let tokio_shutdown = Shutdown::new().expect("shutdown creation works on first call");
8690

8791
let deadlocks_handler = tokio_shutdown.clone();
88-
tokio::spawn(async move {
92+
tokio_core.spawn(async move {
8993
info!("[BOOT] Starting thread for deadlocks...");
9094
loop {
9195
if shutdown_waiting(Duration::from_secs(10), deadlocks_handler.clone()).await {
@@ -136,12 +140,12 @@ fn main() -> std::io::Result<()>
136140
}
137141
}
138142
if !api_futures.is_empty() {
139-
tokio::spawn(async move {
143+
tokio_core.spawn(async move {
140144
let _ = try_join_all(api_futures).await;
141145
});
142146
}
143147
if !apis_futures.is_empty() {
144-
tokio::spawn(async move {
148+
tokio_core.spawn(async move {
145149
let _ = try_join_all(apis_futures).await;
146150
});
147151
}
@@ -174,12 +178,12 @@ fn main() -> std::io::Result<()>
174178
}
175179
}
176180
if !http_futures.is_empty() {
177-
tokio::spawn(async move {
181+
tokio_core.spawn(async move {
178182
let _ = try_join_all(http_futures).await;
179183
});
180184
}
181185
if !https_futures.is_empty() {
182-
tokio::spawn(async move {
186+
tokio_core.spawn(async move {
183187
let _ = try_join_all(https_futures).await;
184188
});
185189
}
@@ -199,7 +203,7 @@ fn main() -> std::io::Result<()>
199203
let stats_handler = tokio_shutdown.clone();
200204
let tracker_spawn_stats = tracker.clone();
201205
info!("[BOOT] Starting thread for console updates with {} seconds delay...", tracker_spawn_stats.config.log_console_interval);
202-
tokio::spawn(async move {
206+
tokio_core.spawn(async move {
203207
loop {
204208
tracker_spawn_stats.set_stats(StatsEvent::TimestampSave, chrono::Utc::now().timestamp() + 60i64);
205209
if shutdown_waiting(Duration::from_secs(tracker_spawn_stats.config.log_console_interval), stats_handler.clone()).await {
@@ -217,34 +221,17 @@ fn main() -> std::io::Result<()>
217221
}
218222
});
219223

220-
let cleanup_peers_handler = tokio_shutdown.clone();
221-
let tracker_spawn_cleanup_peers = tracker.clone();
222-
info!("[BOOT] Starting thread for peers cleanup with {} seconds delay...", tracker_spawn_cleanup_peers.config.tracker_config.clone().peers_cleanup_interval);
223-
tokio::spawn(async move {
224-
loop {
225-
tracker_spawn_cleanup_peers.set_stats(StatsEvent::TimestampTimeout, chrono::Utc::now().timestamp() + tracker_spawn_cleanup_peers.config.tracker_config.clone().peers_cleanup_interval as i64);
226-
if shutdown_waiting(Duration::from_secs(tracker_spawn_cleanup_peers.config.tracker_config.clone().peers_cleanup_interval), cleanup_peers_handler.clone()).await {
227-
info!("[BOOT] Shutting down thread for peers cleanup...");
228-
return;
229-
}
230-
231-
info!("[PEERS] Checking now for dead peers.");
232-
let _ = tracker_spawn_cleanup_peers.torrent_peers_cleanup(tracker_spawn_cleanup_peers.clone(), Duration::from_secs(tracker_spawn_cleanup_peers.config.tracker_config.clone().peers_timeout), tracker_spawn_cleanup_peers.config.database.clone().persistent).await;
233-
info!("[PEERS] Peers cleaned up.");
234-
235-
if tracker_spawn_cleanup_peers.config.tracker_config.clone().users_enabled {
236-
info!("[USERS] Checking now for inactive torrents in users.");
237-
tracker_spawn_cleanup_peers.clean_user_active_torrents(Duration::from_secs(tracker_spawn_cleanup_peers.config.tracker_config.clone().peers_timeout));
238-
info!("[USERS] Inactive torrents in users cleaned up.");
239-
}
240-
}
224+
let (tracker_cleanup_clone, tokio_shutdown_cleanup_clone) = (tracker.clone(), tokio_shutdown.clone());
225+
info!("[BOOT] Starting thread for peers cleanup with {} seconds delay...", tracker_cleanup_clone.config.tracker_config.clone().peers_cleanup_interval);
226+
tokio_core.spawn(async move {
227+
tracker_cleanup_clone.clone().torrents_sharding.cleanup_threads(tracker_cleanup_clone.clone(), tokio_shutdown_cleanup_clone, Duration::from_secs(tracker_cleanup_clone.config.tracker_config.clone().peers_timeout), tracker_cleanup_clone.config.database.clone().persistent).await;
241228
});
242229

243230
if tracker.config.tracker_config.clone().keys_enabled {
244231
let cleanup_keys_handler = tokio_shutdown.clone();
245232
let tracker_spawn_cleanup_keys = tracker.clone();
246233
info!("[BOOT] Starting thread for keys cleanup with {} seconds delay...", tracker_spawn_cleanup_keys.config.tracker_config.clone().keys_cleanup_interval);
247-
tokio::spawn(async move {
234+
tokio_core.spawn(async move {
248235
loop {
249236
tracker_spawn_cleanup_keys.set_stats(StatsEvent::TimestampKeysTimeout, chrono::Utc::now().timestamp() + tracker_spawn_cleanup_keys.config.tracker_config.clone().keys_cleanup_interval as i64);
250237
if shutdown_waiting(Duration::from_secs(tracker_spawn_cleanup_keys.config.tracker_config.clone().keys_cleanup_interval), cleanup_keys_handler.clone()).await {
@@ -263,7 +250,7 @@ fn main() -> std::io::Result<()>
263250
let updates_handler = tokio_shutdown.clone();
264251
let tracker_spawn_updates = tracker.clone();
265252
info!("[BOOT] Starting thread for database updates with {} seconds delay...", tracker_spawn_updates.config.database.clone().persistent_interval);
266-
tokio::spawn(async move {
253+
tokio_core.spawn(async move {
267254
loop {
268255
tracker_spawn_updates.set_stats(StatsEvent::TimestampSave, chrono::Utc::now().timestamp() + tracker_spawn_updates.config.database.clone().persistent_interval as i64);
269256
if shutdown_waiting(Duration::from_secs(tracker_spawn_updates.config.database.clone().persistent_interval), updates_handler.clone()).await {
@@ -357,6 +344,7 @@ fn main() -> std::io::Result<()>
357344
task::sleep(Duration::from_secs(1)).await;
358345

359346
info!("Server shutting down completed");
347+
mem::forget(tokio_core);
360348
Ok(())
361349
}
362350
}

src/tracker/impls/torrent_sharding.rs

Lines changed: 110 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,19 @@
1+
use std::collections::btree_map::Entry;
12
use std::collections::BTreeMap;
3+
use std::mem;
24
use std::sync::Arc;
5+
use std::time::Duration;
6+
use log::info;
37
use parking_lot::RwLock;
8+
use tokio::runtime::Builder;
9+
use tokio_shutdown::Shutdown;
10+
use crate::common::common::shutdown_waiting;
11+
use crate::stats::enums::stats_event::StatsEvent;
412
use crate::tracker::structs::info_hash::InfoHash;
13+
use crate::tracker::structs::peer_id::PeerId;
514
use crate::tracker::structs::torrent_entry::TorrentEntry;
615
use crate::tracker::structs::torrent_sharding::TorrentSharding;
16+
use crate::tracker::structs::torrent_tracker::TorrentTracker;
717

818
#[allow(dead_code)]
919
impl TorrentSharding {
@@ -274,6 +284,105 @@ impl TorrentSharding {
274284
}
275285
}
276286

287+
pub async fn cleanup_threads(&self, torrent_tracker: Arc<TorrentTracker>, shutdown: Shutdown, peer_timeout: Duration, persistent: bool)
288+
{
289+
let tokio_threading = match torrent_tracker.clone().config.tracker_config.peers_cleanup_threads {
290+
0 => {
291+
Builder::new_current_thread().thread_name("sharding").enable_all().build().unwrap()
292+
}
293+
_ => {
294+
Builder::new_multi_thread().thread_name("sharding").worker_threads(torrent_tracker.clone().config.tracker_config.peers_cleanup_threads as usize).enable_all().build().unwrap()
295+
}
296+
};
297+
for shard in 0u8..=255u8 {
298+
let torrent_tracker_clone = torrent_tracker.clone();
299+
let shutdown_clone = shutdown.clone();
300+
tokio_threading.spawn(async move {
301+
loop {
302+
if shutdown_waiting(Duration::from_secs(torrent_tracker_clone.clone().config.tracker_config.peers_cleanup_interval), shutdown_clone.clone()).await {
303+
return;
304+
}
305+
306+
let (mut torrents, mut seeds, mut peers) = (0u64, 0u64, 0u64);
307+
let shard_data = torrent_tracker_clone.clone().torrents_sharding.get_shard_content(shard);
308+
for (info_hash, torrent_entry) in shard_data.iter() {
309+
for (peer_id, torrent_peer) in torrent_entry.seeds.iter() {
310+
if torrent_peer.updated.elapsed() > peer_timeout {
311+
let shard = torrent_tracker_clone.clone().torrents_sharding.get_shard(shard).unwrap();
312+
let mut lock = shard.write();
313+
match lock.entry(*info_hash) {
314+
Entry::Vacant(_) => {}
315+
Entry::Occupied(mut o) => {
316+
if o.get_mut().seeds.remove(&peer_id).is_some() {
317+
torrent_tracker_clone.clone().update_stats(StatsEvent::Seeds, -1);
318+
seeds += 1;
319+
};
320+
if o.get_mut().peers.remove(&peer_id).is_some() {
321+
torrent_tracker_clone.clone().update_stats(StatsEvent::Peers, -1);
322+
peers += 1;
323+
};
324+
if !persistent && o.get().seeds.is_empty() && o.get().peers.is_empty() {
325+
lock.remove(info_hash);
326+
torrent_tracker_clone.clone().update_stats(StatsEvent::Torrents, -1);
327+
torrents += 1;
328+
}
329+
}
330+
}
331+
}
332+
}
333+
for (peer_id, torrent_peer) in torrent_entry.peers.iter() {
334+
if torrent_peer.updated.elapsed() > peer_timeout {
335+
let shard = torrent_tracker_clone.clone().torrents_sharding.get_shard(shard).unwrap();
336+
let mut lock = shard.write();
337+
match lock.entry(*info_hash) {
338+
Entry::Vacant(_) => {}
339+
Entry::Occupied(mut o) => {
340+
if o.get_mut().seeds.remove(&peer_id).is_some() {
341+
torrent_tracker_clone.clone().update_stats(StatsEvent::Seeds, -1);
342+
seeds += 1;
343+
};
344+
if o.get_mut().peers.remove(&peer_id).is_some() {
345+
torrent_tracker_clone.clone().update_stats(StatsEvent::Peers, -1);
346+
peers += 1;
347+
};
348+
if !persistent && o.get().seeds.is_empty() && o.get().peers.is_empty() {
349+
lock.remove(info_hash);
350+
torrent_tracker_clone.clone().update_stats(StatsEvent::Torrents, -1);
351+
torrents += 1;
352+
}
353+
}
354+
}
355+
}
356+
}
357+
}
358+
info!("[PEERS] Shard: {} - Torrents: {} - Seeds: {} - Peers: {}", shard, torrents, seeds, peers);
359+
}
360+
});
361+
}
362+
shutdown.clone().handle().await;
363+
mem::forget(tokio_threading);
364+
}
365+
366+
#[tracing::instrument(level = "debug")]
367+
pub fn contains_torrent(&self, info_hash: InfoHash) -> bool
368+
{
369+
self.get_shard_content(info_hash.0[0]).contains_key(&info_hash)
370+
}
371+
372+
#[tracing::instrument(level = "debug")]
373+
pub fn contains_peer(&self, info_hash: InfoHash, peer_id: PeerId) -> bool
374+
{
375+
match self.get_shard_content(info_hash.0[0]).get(&info_hash) {
376+
None => { false }
377+
Some(torrent_entry) => {
378+
if torrent_entry.seeds.contains_key(&peer_id) || torrent_entry.peers.contains_key(&peer_id) {
379+
return true;
380+
}
381+
false
382+
}
383+
}
384+
}
385+
277386
#[tracing::instrument(level = "debug")]
278387
#[allow(unreachable_patterns)]
279388
pub fn get_shard(&self, shard: u8) -> Option<Arc<RwLock<BTreeMap<InfoHash, TorrentEntry>>>>
@@ -542,7 +651,7 @@ impl TorrentSharding {
542651
#[tracing::instrument(level = "debug")]
543652
pub fn get_shard_content(&self, shard: u8) -> BTreeMap<InfoHash, TorrentEntry>
544653
{
545-
self.get_shard(shard).unwrap().read().clone()
654+
self.get_shard(shard).unwrap().read_recursive().clone()
546655
}
547656

548657
#[tracing::instrument(level = "debug")]

src/tracker/impls/torrent_tracker_handlers.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,8 @@ impl TorrentTracker {
302302
let torrent_entry = match data.remove_torrent_peer(
303303
announce_query.info_hash,
304304
announce_query.peer_id,
305-
data.config.database.clone().persistent
305+
data.config.database.clone().persistent,
306+
false
306307
) {
307308
(Some(_), None) => {
308309
TorrentEntry::new()

0 commit comments

Comments
 (0)