Skip to content

Commit

Permalink
refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
pmnoxx committed Nov 18, 2021
1 parent db483e8 commit 76a6261
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 120 deletions.
1 change: 0 additions & 1 deletion chain/network/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ tokio = { version = "1.1", features = ["full"] }
tokio-stream = { version = "0.1.2", features = ["net"] }
tokio-util = { version = "0.6", features = ["codec"] }
tracing = "0.1.13"
tempfile = "3"

delay-detector = { path = "../../tools/delay_detector", optional = true}
near-chain-configs = { path = "../../core/chain-configs" }
Expand Down
64 changes: 2 additions & 62 deletions chain/network/benches/ibf.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
#[macro_use]
extern crate bencher;

use bencher::{black_box, Bencher};
use std::time::{Duration, Instant};
use bencher::Bencher;

use near_network::routing::ibf_peer_set::SlotMapId;
use near_network::routing::ibf_set::IbfSet;
use near_primitives::errors::StorageError;
use near_store::create_store;
use near_store::db::DBCol::ColState;

#[allow(dead_code)]
fn test_measure_adding_edges_to_ibf(bench: &mut Bencher) {
Expand All @@ -20,62 +16,6 @@ fn test_measure_adding_edges_to_ibf(bench: &mut Bencher) {
});
}

fn benchmark_db(bench: &mut Bencher) {
let tmp_dir = tempfile::Builder::new().prefix("_test_clear_column").tempdir().unwrap();

let store = create_store(tmp_dir.path());

// 331 75 50 46

let num_keys = 10000000;
let mut store_update = store.store_update();
let mut keys: Vec<Vec<u8>> = Vec::new();
for _x in 0..num_keys {
let key: Vec<u8> = (0..40).map(|_| rand::random::<u8>()).collect();

let x: u32 = rand::random();
let x = x % 333;
let val: Vec<u8> = (0..x).map(|_| rand::random::<u8>()).collect();
store_update.set(ColState, key.as_slice().clone(), &val);
//}
// for _x in 0..num_keys {
// let key: Vec<u8> = (0..40).map(|_| rand::random::<u8>()).collect();
keys.push(key);
}
store_update.commit().unwrap();

// .. let keys = Arc::new(keys);

bench.iter(move || {
let start = Instant::now();
// ..let keys = keys.clone();
let mut got = 0;
for _k in 0..keys.len() {
let r = rand::random::<u32>() % (keys.len() as u32);
let key = &keys[r as usize];

let val =
store.get(ColState, key.as_ref()).map_err(|_| StorageError::StorageInternalError);

if let Ok(Some(x)) = val {
// there is a bug, only half of entries were returned :/
//println!("{:?}", val);
// val.unwrap().unwrap();
black_box(x);
got += 1;
}
}
let took = start.elapsed();
println!(
"took on avg {:?} op per sec {} got {}/{}",
took / (num_keys as u32),
(num_keys as u128) * Duration::from_secs(1).as_nanos() / took.as_nanos(),
got,
keys.len()
);
});
}

benchmark_group!(benches, benchmark_db);
benchmark_group!(benches, test_measure_adding_edges_to_ibf);

benchmark_main!(benches);
114 changes: 57 additions & 57 deletions core/store/benches/store_bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,80 +2,80 @@
extern crate bencher;

use bencher::{black_box, Bencher};
use std::time::{Duration, Instant};

use near_network::routing::ibf_peer_set::SlotMapId;
use near_network::routing::ibf_set::IbfSet;
use near_primitives::borsh::maybestd::sync::Arc;
use near_primitives::errors::StorageError;
use near_store::create_store;
use near_store::db::DBCol::ColState;
use near_store::{create_store, Store};
use std::time::{Duration, Instant};

#[allow(dead_code)]
fn test_measure_adding_edges_to_ibf(bench: &mut Bencher) {
bench.iter(|| {
let mut a = IbfSet::<u64>::new(12);
for i in 0..40 * 8 * 3 {
a.add_edge(&(i as u64), (i + 1000000) as SlotMapId);
}
});
}

fn benchmark_db(bench: &mut Bencher) {
let tmp_dir = tempfile::Builder::new().prefix("_test_clear_column").tempdir().unwrap();

let store = create_store(tmp_dir.path());

// 331 75 50 46

// try to write to db `10m` keys and then read all of them in random order
fn benchmark_write_then_read_successful(bench: &mut Bencher) {
let store = create_store_in_random_folder();
let num_keys = 10000000;
let mut store_update = store.store_update();
let mut keys: Vec<Vec<u8>> = Vec::new();
for _x in 0..num_keys {
let key: Vec<u8> = (0..40).map(|_| rand::random::<u8>()).collect();

let x: u32 = rand::random();
let x = x % 333;
let val: Vec<u8> = (0..x).map(|_| rand::random::<u8>()).collect();
store_update.set(ColState, key.as_slice().clone(), &val);
//}
// for _x in 0..num_keys {
// let key: Vec<u8> = (0..40).map(|_| rand::random::<u8>()).collect();
keys.push(key);
}
store_update.commit().unwrap();

// .. let keys = Arc::new(keys);
let keys = generate_keys(num_keys);
write_to_db(&store, &keys);

bench.iter(move || {
let start = Instant::now();
// ..let keys = keys.clone();
let mut got = 0;
for _k in 0..keys.len() {
let r = rand::random::<u32>() % (keys.len() as u32);
let key = &keys[r as usize];

let val =
store.get(ColState, key.as_ref()).map_err(|_| StorageError::StorageInternalError);

if let Ok(Some(x)) = val {
// there is a bug, only half of entries were returned :/
//println!("{:?}", val);
// val.unwrap().unwrap();
black_box(x);
got += 1;
}
}
let read_records = read_from_db(&store, &keys);
let took = start.elapsed();
println!(
"took on avg {:?} op per sec {} got {}/{}",
took / (num_keys as u32),
(num_keys as u128) * Duration::from_secs(1).as_nanos() / took.as_nanos(),
got,
read_records,
keys.len()
);
});
}

benchmark_group!(benches, benchmark_db);
// create `Store` in a random folder
fn create_store_in_random_folder() -> Arc<Store> {
let tmp_dir = tempfile::Builder::new().prefix("_test_clear_column").tempdir().unwrap();
let store = create_store(tmp_dir.path());
store
}

// generate `count` random 40 bytes keys
fn generate_keys(count: usize) -> Vec<Vec<u8>> {
let mut res: Vec<Vec<u8>> = Vec::new();
for _k in 0..count {
let key: Vec<u8> = (0..40).map(|_| rand::random::<u8>()).collect();

res.push(key)
}
res
}

// read from DB keys in random order
fn read_from_db(store: &Arc<Store>, keys: &Vec<Vec<u8>>) -> usize {
let mut read = 0;
for _k in 0..keys.len() {
let r = rand::random::<u32>() % (keys.len() as u32);
let key = &keys[r as usize];

let val = store.get(ColState, key.as_ref()).map_err(|_| StorageError::StorageInternalError);

if let Ok(Some(x)) = val {
black_box(x);
read += 1;
}
}
read
}

// write a value of random size between 0..333 for each key to db.
fn write_to_db(store: &Arc<Store>, keys: &[Vec<u8>]) {
let mut store_update = store.store_update();
for key in keys.iter() {
let x: u32 = rand::random::<u32>() % 333;
let val: Vec<u8> = (0..x).map(|_| rand::random::<u8>()).collect();
store_update.set(ColState, key.as_slice().clone(), &val);
}
store_update.commit().unwrap();
}

benchmark_group!(benches, benchmark_write_then_read_successful);

benchmark_main!(benches);

0 comments on commit 76a6261

Please sign in to comment.