From 8693278ddc7bd901e8e72f1313014eccbb48ae11 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Nov 2023 11:05:21 +0100 Subject: [PATCH] refactor(test): more accurate update torrent benchmarks --- benches/update_torrent.rs | 103 +++++++++++++++++++++++++++++++------- 1 file changed, 84 insertions(+), 19 deletions(-) diff --git a/benches/update_torrent.rs b/benches/update_torrent.rs index 01840deea..cc60f5b75 100644 --- a/benches/update_torrent.rs +++ b/benches/update_torrent.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use criterion::{criterion_group, criterion_main, Criterion}; -use futures::future; +use futures::stream::FuturesUnordered; +use futures::{future, StreamExt}; use once_cell::sync::Lazy; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::shared::clock::DurationSinceUnixEpoch; @@ -11,6 +12,8 @@ use torrust_tracker::tracker::peer::{Id, Peer}; use torrust_tracker::tracker::{statistics, Tracker}; use torrust_tracker_test_helpers::configuration; +use crate::future::FutureExt; + const PEER: Peer = Peer { peer_id: Id([0; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), @@ -26,10 +29,10 @@ const PEER: Peer = Peer { static INFO_HASHES: Lazy> = Lazy::new(|| (0..100).map(|i| InfoHash([i; 20])).collect()); #[allow(clippy::missing_panics_doc)] -pub fn update_single_torrent_benchmark(c: &mut Criterion) { - c.bench_function("update_single_torrent_benchmark", |b| { - let rt = tokio::runtime::Runtime::new().unwrap(); +pub fn add_a_single_torrent_benchmark(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + c.bench_function("add_a_single_torrent_benchmark", |b| { let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap()); b.to_async(&rt).iter(|| async { @@ -43,26 +46,86 @@ pub fn update_single_torrent_benchmark(c: &mut Criterion) { } #[allow(clippy::missing_panics_doc)] -pub fn update_multiple_torrents_simultaneously_benchmark(c: &mut Criterion) { - c.bench_function("update_multiple_torrents_simultaneously_benchmark", |b| { - let rt = tokio::runtime::Runtime::new().unwrap(); +pub fn add_and_update_a_single_torrent_benchmark(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + c.bench_function("add_and_update_a_single_torrent_benchmark", |b| { let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap()); b.to_async(&rt).iter(|| async { - // Call the function with each info hash in parallel - let tasks: Vec<_> = INFO_HASHES - .iter() - .map(|info_hash| { + const NUM_UPDATES: usize = 20; + let mut futures = FuturesUnordered::new(); + + for _ in 0..NUM_UPDATES { + let tracker_clone = tracker.clone(); + + let future = async move { + tracker_clone + .update_torrent_with_peer_and_get_stats(&INFO_HASHES[0], &PEER) + .await; + }; + futures.push(future.boxed()); + } + + while let Some(_) = futures.next().await {} + }); + }); +} + +#[allow(clippy::missing_panics_doc)] +pub fn add_multiple_torrents_simultaneously_benchmark(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + c.bench_function("add_multiple_torrents_simultaneously_benchmark", |b| { + let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap()); + + b.to_async(&rt).iter(|| async { + let peer = PEER.clone(); + let mut futures = FuturesUnordered::new(); + + for info_hash in INFO_HASHES.iter() { + let tracker_clone = tracker.clone(); + let peer_clone = peer.clone(); + + let future = async move { + tracker_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &peer_clone) + .await; + }; + futures.push(future.boxed()); + } + + while let Some(_) = futures.next().await {} + }); + }); +} + +#[allow(clippy::missing_panics_doc)] +pub fn add_and_update_multiple_torrents_simultaneously_benchmark(c: &mut Criterion) { + let rt = tokio::runtime::Runtime::new().unwrap(); + + c.bench_function("add_and_update_multiple_torrents_simultaneously_benchmark", |b| { + let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap()); + + b.to_async(&rt).iter(|| async { + const NUM_UPDATES: usize = 20; + let peer = PEER.clone(); + let mut futures = FuturesUnordered::new(); + + for info_hash in INFO_HASHES.iter() { + for _ in 0..NUM_UPDATES { let tracker_clone = tracker.clone(); + let peer_clone = peer.clone(); - tokio::spawn(async move { - tracker_clone.update_torrent_with_peer_and_get_stats(info_hash, &PEER).await; - }) - }) - .collect(); + let future = async move { + tracker_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &peer_clone) + .await; + }; + futures.push(future.boxed()); + } + } - future::join_all(tasks).await; + while let Some(_) = futures.next().await {} }); }); } @@ -70,8 +133,10 @@ pub fn update_multiple_torrents_simultaneously_benchmark(c: &mut Criterion) { // Use the criterion_group macro to group the benchmarks together criterion_group!( benches, - update_single_torrent_benchmark, - update_multiple_torrents_simultaneously_benchmark + add_a_single_torrent_benchmark, + add_and_update_a_single_torrent_benchmark, + add_multiple_torrents_simultaneously_benchmark, + add_and_update_multiple_torrents_simultaneously_benchmark ); // Use the criterion_main macro to run the benchmarks