Skip to content

Commit

Permalink
dev: repository benchmark uses criterion
Browse files Browse the repository at this point in the history
  • Loading branch information
da2ce7 committed Mar 25, 2024
1 parent 9a43815 commit 03883c0
Show file tree
Hide file tree
Showing 9 changed files with 363 additions and 454 deletions.
4 changes: 3 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 7 additions & 1 deletion packages/torrent-repository/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,15 @@ rust-version.workspace = true
version.workspace = true

[dependencies]
clap = { version = "4.4.8", features = ["derive"] }
futures = "0.3.29"
tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] }
torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" }
torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" }
serde = { version = "1", features = ["derive"] }

[dev-dependencies]
criterion = { version = "0", features = ["async_tokio"] }

[[bench]]
harness = false
name = "repository_benchmark"
15 changes: 0 additions & 15 deletions packages/torrent-repository/benches/helpers/args.rs

This file was deleted.

199 changes: 86 additions & 113 deletions packages/torrent-repository/benches/helpers/asyn.rs
Original file line number Diff line number Diff line change
@@ -1,182 +1,155 @@
use std::sync::Arc;
use std::time::Duration;
use std::time::{Duration, Instant};

use clap::Parser;
use futures::stream::FuturesUnordered;
use torrust_tracker_primitives::info_hash::InfoHash;
use torrust_tracker_torrent_repository::repository::RepositoryAsync;

use super::args::Args;
use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER};
use super::utils::{generate_unique_info_hashes, DEFAULT_PEER};

pub async fn add_one_torrent<V, T>(samples: usize) -> (Duration, Duration)
pub async fn add_one_torrent<V, T>(samples: u64) -> Duration
where
V: RepositoryAsync<T> + Default,
{
let mut results: Vec<Duration> = Vec::with_capacity(samples);
let start = Instant::now();

for _ in 0..samples {
let torrent_repository = V::default();

let info_hash = InfoHash([0; 20]);

let start_time = std::time::Instant::now();

torrent_repository
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
.await;

let result = start_time.elapsed();

results.push(result);
}

get_average_and_adjusted_average_from_results(results)
start.elapsed()
}

// Add one torrent ten thousand times in parallel (depending on the set worker threads)
pub async fn update_one_torrent_in_parallel<V, T>(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration)
pub async fn update_one_torrent_in_parallel<V, T>(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option<u64>) -> Duration
where
V: RepositoryAsync<T> + Default,
Arc<V>: Clone + Send + Sync + 'static,
{
let args = Args::parse();
let mut results: Vec<Duration> = Vec::with_capacity(samples);

for _ in 0..samples {
let torrent_repository = Arc::<V>::default();
let info_hash: &'static InfoHash = &InfoHash([0; 20]);
let handles = FuturesUnordered::new();

// Add the torrent/peer to the torrent repository
torrent_repository
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
.await;
let torrent_repository = Arc::<V>::default();
let info_hash: &'static InfoHash = &InfoHash([0; 20]);
let handles = FuturesUnordered::new();

let start_time = std::time::Instant::now();
// Add the torrent/peer to the torrent repository
torrent_repository
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
.await;

for _ in 0..10_000 {
let torrent_repository_clone = torrent_repository.clone();
let start = Instant::now();

let handle = runtime.spawn(async move {
torrent_repository_clone
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
.await;

if let Some(sleep_time) = args.sleep {
let start_time = std::time::Instant::now();

while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
}
});
for _ in 0..samples {
let torrent_repository_clone = torrent_repository.clone();

handles.push(handle);
}
let handle = runtime.spawn(async move {
torrent_repository_clone
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
.await;

// Await all tasks
futures::future::join_all(handles).await;
if let Some(sleep_time) = sleep {
let start_time = std::time::Instant::now();

let result = start_time.elapsed();
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
}
});

results.push(result);
handles.push(handle);
}

get_average_and_adjusted_average_from_results(results)
// Await all tasks
futures::future::join_all(handles).await;

start.elapsed()
}

// Add ten thousand torrents in parallel (depending on the set worker threads)
pub async fn add_multiple_torrents_in_parallel<V, T>(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration)
pub async fn add_multiple_torrents_in_parallel<V, T>(
runtime: &tokio::runtime::Runtime,
samples: u64,
sleep: Option<u64>,
) -> Duration
where
V: RepositoryAsync<T> + Default,
Arc<V>: Clone + Send + Sync + 'static,
{
let args = Args::parse();
let mut results: Vec<Duration> = Vec::with_capacity(samples);

for _ in 0..samples {
let torrent_repository = Arc::<V>::default();
let info_hashes = generate_unique_info_hashes(10_000);
let handles = FuturesUnordered::new();

let start_time = std::time::Instant::now();
let torrent_repository = Arc::<V>::default();
let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize"));
let handles = FuturesUnordered::new();

for info_hash in info_hashes {
let torrent_repository_clone = torrent_repository.clone();
let start = Instant::now();

let handle = runtime.spawn(async move {
torrent_repository_clone
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
.await;
for info_hash in info_hashes {
let torrent_repository_clone = torrent_repository.clone();

if let Some(sleep_time) = args.sleep {
let start_time = std::time::Instant::now();

while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
}
});

handles.push(handle);
}
let handle = runtime.spawn(async move {
torrent_repository_clone
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
.await;

// Await all tasks
futures::future::join_all(handles).await;
if let Some(sleep_time) = sleep {
let start_time = std::time::Instant::now();

let result = start_time.elapsed();
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
}
});

results.push(result);
handles.push(handle);
}

get_average_and_adjusted_average_from_results(results)
// Await all tasks
futures::future::join_all(handles).await;

start.elapsed()
}

// Async update ten thousand torrents in parallel (depending on the set worker threads)
pub async fn update_multiple_torrents_in_parallel<V, T>(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration)
pub async fn update_multiple_torrents_in_parallel<V, T>(
runtime: &tokio::runtime::Runtime,
samples: u64,
sleep: Option<u64>,
) -> Duration
where
V: RepositoryAsync<T> + Default,
Arc<V>: Clone + Send + Sync + 'static,
{
let args = Args::parse();
let mut results: Vec<Duration> = Vec::with_capacity(samples);

for _ in 0..samples {
let torrent_repository = Arc::<V>::default();
let info_hashes = generate_unique_info_hashes(10_000);
let handles = FuturesUnordered::new();

// Add the torrents/peers to the torrent repository
for info_hash in &info_hashes {
torrent_repository
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
.await;
}
let torrent_repository = Arc::<V>::default();
let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize"));
let handles = FuturesUnordered::new();

let start_time = std::time::Instant::now();

for info_hash in info_hashes {
let torrent_repository_clone = torrent_repository.clone();

let handle = runtime.spawn(async move {
torrent_repository_clone
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
.await;
// Add the torrents/peers to the torrent repository
for info_hash in &info_hashes {
torrent_repository
.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER)
.await;
}

if let Some(sleep_time) = args.sleep {
let start_time = std::time::Instant::now();
let start = Instant::now();

while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
}
});
for info_hash in info_hashes {
let torrent_repository_clone = torrent_repository.clone();

handles.push(handle);
}
let handle = runtime.spawn(async move {
torrent_repository_clone
.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER)
.await;

// Await all tasks
futures::future::join_all(handles).await;
if let Some(sleep_time) = sleep {
let start_time = std::time::Instant::now();

let result = start_time.elapsed();
while start_time.elapsed().as_nanos() < u128::from(sleep_time) {}
}
});

results.push(result);
handles.push(handle);
}

get_average_and_adjusted_average_from_results(results)
// Await all tasks
futures::future::join_all(handles).await;

start.elapsed()
}
1 change: 0 additions & 1 deletion packages/torrent-repository/benches/helpers/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
pub mod args;
pub mod asyn;
pub mod sync;
pub mod utils;
Loading

0 comments on commit 03883c0

Please sign in to comment.