Skip to content

Commit

Permalink
refactor(test): more accurate update torrent benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
mickvandijke committed Nov 2, 2023
1 parent 2f4ba9c commit 8693278
Showing 1 changed file with 84 additions and 19 deletions.
103 changes: 84 additions & 19 deletions benches/update_torrent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,17 @@ use std::sync::Arc;

use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes};
use criterion::{criterion_group, criterion_main, Criterion};
use futures::future;
use futures::stream::FuturesUnordered;
use futures::{future, StreamExt};
use once_cell::sync::Lazy;
use torrust_tracker::shared::bit_torrent::info_hash::InfoHash;
use torrust_tracker::shared::clock::DurationSinceUnixEpoch;
use torrust_tracker::tracker::peer::{Id, Peer};
use torrust_tracker::tracker::{statistics, Tracker};
use torrust_tracker_test_helpers::configuration;

use crate::future::FutureExt;

const PEER: Peer = Peer {
peer_id: Id([0; 20]),
peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080),
Expand All @@ -26,10 +29,10 @@ const PEER: Peer = Peer {
static INFO_HASHES: Lazy<Vec<InfoHash>> = Lazy::new(|| (0..100).map(|i| InfoHash([i; 20])).collect());

#[allow(clippy::missing_panics_doc)]
pub fn update_single_torrent_benchmark(c: &mut Criterion) {
c.bench_function("update_single_torrent_benchmark", |b| {
let rt = tokio::runtime::Runtime::new().unwrap();
pub fn add_a_single_torrent_benchmark(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();

c.bench_function("add_a_single_torrent_benchmark", |b| {
let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap());

b.to_async(&rt).iter(|| async {
Expand All @@ -43,35 +46,97 @@ pub fn update_single_torrent_benchmark(c: &mut Criterion) {
}

#[allow(clippy::missing_panics_doc)]
pub fn update_multiple_torrents_simultaneously_benchmark(c: &mut Criterion) {
c.bench_function("update_multiple_torrents_simultaneously_benchmark", |b| {
let rt = tokio::runtime::Runtime::new().unwrap();
pub fn add_and_update_a_single_torrent_benchmark(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();

c.bench_function("add_and_update_a_single_torrent_benchmark", |b| {
let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap());

b.to_async(&rt).iter(|| async {
// Call the function with each info hash in parallel
let tasks: Vec<_> = INFO_HASHES
.iter()
.map(|info_hash| {
const NUM_UPDATES: usize = 20;
let mut futures = FuturesUnordered::new();

for _ in 0..NUM_UPDATES {
let tracker_clone = tracker.clone();

let future = async move {
tracker_clone
.update_torrent_with_peer_and_get_stats(&INFO_HASHES[0], &PEER)
.await;
};
futures.push(future.boxed());
}

while let Some(_) = futures.next().await {}
});
});
}

#[allow(clippy::missing_panics_doc)]
pub fn add_multiple_torrents_simultaneously_benchmark(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();
c.bench_function("add_multiple_torrents_simultaneously_benchmark", |b| {
let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap());

b.to_async(&rt).iter(|| async {
let peer = PEER.clone();
let mut futures = FuturesUnordered::new();

for info_hash in INFO_HASHES.iter() {
let tracker_clone = tracker.clone();
let peer_clone = peer.clone();

let future = async move {
tracker_clone
.update_torrent_with_peer_and_get_stats(&info_hash, &peer_clone)
.await;
};
futures.push(future.boxed());
}

while let Some(_) = futures.next().await {}
});
});
}

#[allow(clippy::missing_panics_doc)]
pub fn add_and_update_multiple_torrents_simultaneously_benchmark(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();

c.bench_function("add_and_update_multiple_torrents_simultaneously_benchmark", |b| {
let tracker = Arc::new(Tracker::new(Arc::new(configuration::ephemeral()), None, statistics::Repo::new()).unwrap());

b.to_async(&rt).iter(|| async {
const NUM_UPDATES: usize = 20;
let peer = PEER.clone();
let mut futures = FuturesUnordered::new();

for info_hash in INFO_HASHES.iter() {
for _ in 0..NUM_UPDATES {
let tracker_clone = tracker.clone();
let peer_clone = peer.clone();

tokio::spawn(async move {
tracker_clone.update_torrent_with_peer_and_get_stats(info_hash, &PEER).await;
})
})
.collect();
let future = async move {
tracker_clone
.update_torrent_with_peer_and_get_stats(&info_hash, &peer_clone)
.await;
};
futures.push(future.boxed());
}
}

future::join_all(tasks).await;
while let Some(_) = futures.next().await {}
});
});
}

// Use the criterion_group macro to group the benchmarks together
criterion_group!(
benches,
update_single_torrent_benchmark,
update_multiple_torrents_simultaneously_benchmark
add_a_single_torrent_benchmark,
add_and_update_a_single_torrent_benchmark,
add_multiple_torrents_simultaneously_benchmark,
add_and_update_multiple_torrents_simultaneously_benchmark
);

// Use the criterion_main macro to run the benchmarks
Expand Down

0 comments on commit 8693278

Please sign in to comment.