From db483e8c23ad394b9030a5995f8ba61f97eb6c62 Mon Sep 17 00:00:00 2001 From: Piotr Mikulski Date: Tue, 16 Nov 2021 21:31:32 -0800 Subject: [PATCH] Reorganize imports in near-network --- chain/network/Cargo.toml | 1 + chain/network/benches/ibf.rs | 81 +++++++++++++++++++++++++++++++ core/store/benches/store_bench.rs | 81 +++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+) create mode 100644 chain/network/benches/ibf.rs create mode 100644 core/store/benches/store_bench.rs diff --git a/chain/network/Cargo.toml b/chain/network/Cargo.toml index 0ee778e8ab4..81e8220af9e 100644 --- a/chain/network/Cargo.toml +++ b/chain/network/Cargo.toml @@ -25,6 +25,7 @@ tokio = { version = "1.1", features = ["full"] } tokio-stream = { version = "0.1.2", features = ["net"] } tokio-util = { version = "0.6", features = ["codec"] } tracing = "0.1.13" +tempfile = "3" delay-detector = { path = "../../tools/delay_detector", optional = true} near-chain-configs = { path = "../../core/chain-configs" } diff --git a/chain/network/benches/ibf.rs b/chain/network/benches/ibf.rs new file mode 100644 index 00000000000..656b0ac2a25 --- /dev/null +++ b/chain/network/benches/ibf.rs @@ -0,0 +1,81 @@ +#[macro_use] +extern crate bencher; + +use bencher::{black_box, Bencher}; +use std::time::{Duration, Instant}; + +use near_network::routing::ibf_peer_set::SlotMapId; +use near_network::routing::ibf_set::IbfSet; +use near_primitives::errors::StorageError; +use near_store::create_store; +use near_store::db::DBCol::ColState; + +#[allow(dead_code)] +fn test_measure_adding_edges_to_ibf(bench: &mut Bencher) { + bench.iter(|| { + let mut a = IbfSet::::new(12); + for i in 0..40 * 8 * 3 { + a.add_edge(&(i as u64), (i + 1000000) as SlotMapId); + } + }); +} + +fn benchmark_db(bench: &mut Bencher) { + let tmp_dir = tempfile::Builder::new().prefix("_test_clear_column").tempdir().unwrap(); + + let store = create_store(tmp_dir.path()); + + // 331 75 50 46 + + let num_keys = 10000000; + let mut store_update = store.store_update(); + let mut keys: Vec> = Vec::new(); + for _x in 0..num_keys { + let key: Vec = (0..40).map(|_| rand::random::()).collect(); + + let x: u32 = rand::random(); + let x = x % 333; + let val: Vec = (0..x).map(|_| rand::random::()).collect(); + store_update.set(ColState, key.as_slice().clone(), &val); + //} + // for _x in 0..num_keys { + // let key: Vec = (0..40).map(|_| rand::random::()).collect(); + keys.push(key); + } + store_update.commit().unwrap(); + + // .. let keys = Arc::new(keys); + + bench.iter(move || { + let start = Instant::now(); + // ..let keys = keys.clone(); + let mut got = 0; + for _k in 0..keys.len() { + let r = rand::random::() % (keys.len() as u32); + let key = &keys[r as usize]; + + let val = + store.get(ColState, key.as_ref()).map_err(|_| StorageError::StorageInternalError); + + if let Ok(Some(x)) = val { + // there is a bug, only half of entries were returned :/ + //println!("{:?}", val); + // val.unwrap().unwrap(); + black_box(x); + got += 1; + } + } + let took = start.elapsed(); + println!( + "took on avg {:?} op per sec {} got {}/{}", + took / (num_keys as u32), + (num_keys as u128) * Duration::from_secs(1).as_nanos() / took.as_nanos(), + got, + keys.len() + ); + }); +} + +benchmark_group!(benches, benchmark_db); + +benchmark_main!(benches); diff --git a/core/store/benches/store_bench.rs b/core/store/benches/store_bench.rs new file mode 100644 index 00000000000..656b0ac2a25 --- /dev/null +++ b/core/store/benches/store_bench.rs @@ -0,0 +1,81 @@ +#[macro_use] +extern crate bencher; + +use bencher::{black_box, Bencher}; +use std::time::{Duration, Instant}; + +use near_network::routing::ibf_peer_set::SlotMapId; +use near_network::routing::ibf_set::IbfSet; +use near_primitives::errors::StorageError; +use near_store::create_store; +use near_store::db::DBCol::ColState; + +#[allow(dead_code)] +fn test_measure_adding_edges_to_ibf(bench: &mut Bencher) { + bench.iter(|| { + let mut a = IbfSet::::new(12); + for i in 0..40 * 8 * 3 { + a.add_edge(&(i as u64), (i + 1000000) as SlotMapId); + } + }); +} + +fn benchmark_db(bench: &mut Bencher) { + let tmp_dir = tempfile::Builder::new().prefix("_test_clear_column").tempdir().unwrap(); + + let store = create_store(tmp_dir.path()); + + // 331 75 50 46 + + let num_keys = 10000000; + let mut store_update = store.store_update(); + let mut keys: Vec> = Vec::new(); + for _x in 0..num_keys { + let key: Vec = (0..40).map(|_| rand::random::()).collect(); + + let x: u32 = rand::random(); + let x = x % 333; + let val: Vec = (0..x).map(|_| rand::random::()).collect(); + store_update.set(ColState, key.as_slice().clone(), &val); + //} + // for _x in 0..num_keys { + // let key: Vec = (0..40).map(|_| rand::random::()).collect(); + keys.push(key); + } + store_update.commit().unwrap(); + + // .. let keys = Arc::new(keys); + + bench.iter(move || { + let start = Instant::now(); + // ..let keys = keys.clone(); + let mut got = 0; + for _k in 0..keys.len() { + let r = rand::random::() % (keys.len() as u32); + let key = &keys[r as usize]; + + let val = + store.get(ColState, key.as_ref()).map_err(|_| StorageError::StorageInternalError); + + if let Ok(Some(x)) = val { + // there is a bug, only half of entries were returned :/ + //println!("{:?}", val); + // val.unwrap().unwrap(); + black_box(x); + got += 1; + } + } + let took = start.elapsed(); + println!( + "took on avg {:?} op per sec {} got {}/{}", + took / (num_keys as u32), + (num_keys as u128) * Duration::from_secs(1).as_nanos() / took.as_nanos(), + got, + keys.len() + ); + }); +} + +benchmark_group!(benches, benchmark_db); + +benchmark_main!(benches);