diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml
index 9f774aab2719..2d3779e403bf 100644
--- a/.gitlab/pipeline/test.yml
+++ b/.gitlab/pipeline/test.yml
@@ -23,8 +23,10 @@ test-linux-stable:
- echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}"
# add experimental to features after https://github.com/paritytech/substrate/pull/14502 is merged
# "upgrade_version_checks_should_work" is currently failing
+ # Filtered by deps to exclude subsystem regression tests that we run in another job
- |
time cargo nextest run \
+ --filter-expr 'not deps(/polkadot-subsystem-bench/)' \
--workspace \
--locked \
--release \
@@ -69,7 +71,8 @@ test-linux-stable-runtime-benchmarks:
# but still want to have debug assertions.
RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings"
script:
- - time cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet
+ # Filtered by deps to exclude subsystem regression tests that we run in another job
+ - time cargo nextest run --filter-expr 'not deps(/polkadot-subsystem-bench/)' --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet
# can be used to run all tests
# test-linux-stable-all:
diff --git a/Cargo.lock b/Cargo.lock
index 991541ee84ee..b12006bbcf73 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -12269,6 +12269,7 @@ dependencies = [
"polkadot-node-subsystem-util",
"polkadot-primitives",
"polkadot-primitives-test-helpers",
+ "polkadot-subsystem-bench",
"rand",
"sc-network",
"schnellru",
@@ -12300,6 +12301,7 @@ dependencies = [
"polkadot-node-subsystem-util",
"polkadot-primitives",
"polkadot-primitives-test-helpers",
+ "polkadot-subsystem-bench",
"rand",
"sc-network",
"schnellru",
diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml
index 432501ed23fb..182d92cb1631 100644
--- a/polkadot/node/network/availability-distribution/Cargo.toml
+++ b/polkadot/node/network/availability-distribution/Cargo.toml
@@ -36,3 +36,14 @@ sc-network = { path = "../../../../substrate/client/network" }
futures-timer = "3.0.2"
assert_matches = "1.4.0"
polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" }
+polkadot-subsystem-bench = { path = "../../subsystem-bench" }
+
+
+[[test]]
+name = "availability-distribution-regression-bench"
+path = "tests/availability-distribution-regression-bench.rs"
+harness = false
+required-features = ["subsystem-benchmarks"]
+
+[features]
+subsystem-benchmarks = []
diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs
new file mode 100644
index 000000000000..f2872f3c72ba
--- /dev/null
+++ b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs
@@ -0,0 +1,113 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! availability-read regression tests
+//!
+//! TODO: Explain the test case after configuration adjusted to Kusama
+//!
+//! Subsystems involved:
+//! - availability-distribution
+//! - bitfield-distribution
+//! - availability-store
+
+use polkadot_subsystem_bench::{
+ availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState},
+ configuration::{PeerLatency, TestConfiguration},
+ usage::BenchmarkUsage,
+};
+
+const BENCH_COUNT: usize = 3;
+const WARM_UP_COUNT: usize = 20;
+const WARM_UP_PRECISION: f64 = 0.01;
+
+fn main() -> Result<(), String> {
+ let mut messages = vec![];
+
+ // TODO: Adjust the test configurations to Kusama values
+ let mut config = TestConfiguration::default();
+ config.latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 });
+ config.n_validators = 1000;
+ config.n_cores = 200;
+ config.max_validators_per_core = 5;
+ config.min_pov_size = 5120;
+ config.max_pov_size = 5120;
+ config.peer_bandwidth = 52428800;
+ config.bandwidth = 52428800;
+ config.connectivity = 75;
+ config.num_blocks = 3;
+ config.generate_pov_sizes();
+
+ warm_up(config.clone())?;
+ let usage = benchmark(config.clone());
+
+ messages.extend(usage.check_network_usage(&[
+ ("Received from peers", 4330.0, 0.05),
+ ("Sent to peers", 15900.0, 0.05),
+ ]));
+ messages.extend(usage.check_cpu_usage(&[
+ ("availability-distribution", 0.025, 0.05),
+ ("bitfield-distribution", 0.085, 0.05),
+ ("availability-store", 0.180, 0.05),
+ ]));
+
+ if messages.is_empty() {
+ Ok(())
+ } else {
+ eprintln!("{}", messages.join("\n"));
+ Err("Regressions found".to_string())
+ }
+}
+
+fn warm_up(config: TestConfiguration) -> Result<(), String> {
+ println!("Warming up...");
+ let mut prev_run: Option = None;
+ for _ in 0..WARM_UP_COUNT {
+ let curr = run(config.clone());
+ if let Some(ref prev) = prev_run {
+ let av_distr_diff =
+ curr.cpu_usage_diff(prev, "availability-distribution").expect("Must exist");
+ let bitf_distr_diff =
+ curr.cpu_usage_diff(prev, "bitfield-distribution").expect("Must exist");
+ let av_store_diff =
+ curr.cpu_usage_diff(prev, "availability-store").expect("Must exist");
+ if av_distr_diff < WARM_UP_PRECISION &&
+ bitf_distr_diff < WARM_UP_PRECISION &&
+ av_store_diff < WARM_UP_PRECISION
+ {
+ return Ok(())
+ }
+ }
+ prev_run = Some(curr);
+ }
+
+ Err("Can't warm up".to_string())
+}
+
+fn benchmark(config: TestConfiguration) -> BenchmarkUsage {
+ println!("Benchmarking...");
+ let usages: Vec = (0..BENCH_COUNT).map(|_| run(config.clone())).collect();
+ let usage = BenchmarkUsage::average(&usages);
+ println!("{}", usage);
+ usage
+}
+
+fn run(config: TestConfiguration) -> BenchmarkUsage {
+ let mut state = TestState::new(&config);
+ let (mut env, _protocol_config) =
+ prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false);
+ env.runtime()
+ .block_on(benchmark_availability_write("data_availability_write", &mut env, state))
+}
diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml
index 9eddf5c86d2e..12b6ce7a0571 100644
--- a/polkadot/node/network/availability-recovery/Cargo.toml
+++ b/polkadot/node/network/availability-recovery/Cargo.toml
@@ -41,6 +41,13 @@ sc-network = { path = "../../../../substrate/client/network" }
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" }
+polkadot-subsystem-bench = { path = "../../subsystem-bench" }
+
+[[test]]
+name = "availability-recovery-regression-bench"
+path = "tests/availability-recovery-regression-bench.rs"
+harness = false
+required-features = ["subsystem-benchmarks"]
[features]
subsystem-benchmarks = []
diff --git a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs
new file mode 100644
index 000000000000..beb063e7ae0d
--- /dev/null
+++ b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs
@@ -0,0 +1,103 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! availability-write regression tests
+//!
+//! TODO: Explain the test case after configuration adjusted to Kusama
+//!
+//! Subsystems involved:
+//! - availability-recovery
+
+use polkadot_subsystem_bench::{
+ availability::{
+ benchmark_availability_read, prepare_test, DataAvailabilityReadOptions,
+ TestDataAvailability, TestState,
+ },
+ configuration::{PeerLatency, TestConfiguration},
+ usage::BenchmarkUsage,
+};
+
+const BENCH_COUNT: usize = 3;
+const WARM_UP_COUNT: usize = 10;
+const WARM_UP_PRECISION: f64 = 0.01;
+
+fn main() -> Result<(), String> {
+ let mut messages = vec![];
+
+ // TODO: Adjust the test configurations to Kusama values
+ let options = DataAvailabilityReadOptions { fetch_from_backers: true };
+ let mut config = TestConfiguration::default();
+ config.latency = Some(PeerLatency { mean_latency_ms: 100, std_dev: 1.0 });
+ config.n_validators = 300;
+ config.n_cores = 20;
+ config.min_pov_size = 5120;
+ config.max_pov_size = 5120;
+ config.peer_bandwidth = 52428800;
+ config.bandwidth = 52428800;
+ config.num_blocks = 3;
+ config.connectivity = 90;
+ config.generate_pov_sizes();
+
+ warm_up(config.clone(), options.clone())?;
+ let usage = benchmark(config.clone(), options.clone());
+
+ messages.extend(usage.check_network_usage(&[
+ ("Received from peers", 102400.000, 0.05),
+ ("Sent to peers", 0.335, 0.05),
+ ]));
+ messages.extend(usage.check_cpu_usage(&[("availability-recovery", 3.850, 0.05)]));
+
+ if messages.is_empty() {
+ Ok(())
+ } else {
+ eprintln!("{}", messages.join("\n"));
+ Err("Regressions found".to_string())
+ }
+}
+
+fn warm_up(config: TestConfiguration, options: DataAvailabilityReadOptions) -> Result<(), String> {
+ println!("Warming up...");
+ let mut prev_run: Option = None;
+ for _ in 0..WARM_UP_COUNT {
+ let curr = run(config.clone(), options.clone());
+ if let Some(ref prev) = prev_run {
+ let diff = curr.cpu_usage_diff(prev, "availability-recovery").expect("Must exist");
+ if diff < WARM_UP_PRECISION {
+ return Ok(())
+ }
+ }
+ prev_run = Some(curr);
+ }
+
+ Err("Can't warm up".to_string())
+}
+
+fn benchmark(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage {
+ println!("Benchmarking...");
+ let usages: Vec =
+ (0..BENCH_COUNT).map(|_| run(config.clone(), options.clone())).collect();
+ let usage = BenchmarkUsage::average(&usages);
+ println!("{}", usage);
+ usage
+}
+
+fn run(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage {
+ let mut state = TestState::new(&config);
+ let (mut env, _protocol_config) =
+ prepare_test(config.clone(), &mut state, TestDataAvailability::Read(options), false);
+ env.runtime()
+ .block_on(benchmark_availability_read("data_availability_read", &mut env, state))
+}
diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml
index 7de91d8cd5de..726e7de4587c 100644
--- a/polkadot/node/subsystem-bench/Cargo.toml
+++ b/polkadot/node/subsystem-bench/Cargo.toml
@@ -8,9 +8,13 @@ license.workspace = true
readme = "README.md"
publish = false
+[lib]
+name = "polkadot_subsystem_bench"
+path = "src/lib/lib.rs"
+
[[bin]]
name = "subsystem-bench"
-path = "src/subsystem-bench.rs"
+path = "src/cli/subsystem-bench.rs"
# Prevent rustdoc error. Already documented from top-level Cargo.toml.
doc = false
diff --git a/polkadot/node/subsystem-bench/README.md b/polkadot/node/subsystem-bench/README.md
index e090a0392cb7..3aac2810ad58 100644
--- a/polkadot/node/subsystem-bench/README.md
+++ b/polkadot/node/subsystem-bench/README.md
@@ -1,6 +1,6 @@
# Subsystem benchmark client
-Run parachain consensus stress and performance tests on your development machine.
+Run parachain consensus stress and performance tests on your development machine or in CI.
## Motivation
@@ -32,7 +32,8 @@ a local Grafana/Prometheus stack is needed.
### Run Prometheus, Pyroscope and Graphana in Docker
-If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana on your machine.
+If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana
+on your machine.
```bash
cd polkadot/node/subsystem-bench/docker
@@ -95,39 +96,16 @@ If you are running the servers in Docker, use the following URLs:
Follow [this guide](https://grafana.com/docs/grafana/latest/dashboards/manage-dashboards/#export-and-import-dashboards)
to import the dashboards from the repository `grafana` folder.
-## How to run a test
-
-To run a test, you need to first choose a test objective. Currently, we support the following:
-
-```
-target/testnet/subsystem-bench --help
-The almighty Subsystem Benchmark Tool™️
-
-Usage: subsystem-bench [OPTIONS]
-
-Commands:
- data-availability-read Benchmark availability recovery strategies
+### Standard test options
```
+$ subsystem-bench --help
+Usage: subsystem-bench [OPTIONS]
-Note: `test-sequence` is a special test objective that wraps up an arbitrary number of test objectives. It is tipically
- used to run a suite of tests defined in a `yaml` file like in this [example](examples/availability_read.yaml).
+Arguments:
+ Path to the test sequence configuration file
-### Standard test options
-
-```
- --network The type of network to be emulated [default: ideal] [possible values: ideal, healthy,
- degraded]
- --n-cores Number of cores to fetch availability for [default: 100]
- --n-validators Number of validators to fetch chunks from [default: 500]
- --min-pov-size The minimum pov size in KiB [default: 5120]
- --max-pov-size The maximum pov size bytes [default: 5120]
- -n, --num-blocks The number of blocks the test is going to run [default: 1]
- -p, --peer-bandwidth The bandwidth of emulated remote peers in KiB
- -b, --bandwidth The bandwidth of our node in KiB
- --connectivity Emulated peer connection ratio [0-100]
- --peer-mean-latency Mean remote peer latency in milliseconds [0-5000]
- --peer-latency-std-dev Remote peer latency standard deviation
+Options:
--profile Enable CPU Profiling with Pyroscope
--pyroscope-url Pyroscope Server URL [default: http://localhost:4040]
--pyroscope-sample-rate Pyroscope Sample Rate [default: 113]
@@ -135,27 +113,17 @@ Note: `test-sequence` is a special test objective that wraps up an arbitrary num
-h, --help Print help
```
-These apply to all test objectives, except `test-sequence` which relies on the values being specified in a file.
-
-### Test objectives
-
-Each test objective can have it's specific configuration options, in contrast with the standard test options.
+## How to run a test
-For `data-availability-read` the recovery strategy to be used is configurable.
+To run a test, you need to use a path to a test objective:
```
-target/testnet/subsystem-bench data-availability-read --help
-Benchmark availability recovery strategies
-
-Usage: subsystem-bench data-availability-read [OPTIONS]
-
-Options:
- -f, --fetch-from-backers Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU
- as we don't need to re-construct from chunks. Tipically this is only faster if nodes
- have enough bandwidth
- -h, --help Print help
+target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_read.yaml
```
+Note: test objectives may be wrapped up into a test sequence.
+It is tipically used to run a suite of tests like in this [example](examples/availability_read.yaml).
+
### Understanding the test configuration
A single test configuration `TestConfiguration` struct applies to a single run of a certain test objective.
@@ -175,36 +143,65 @@ the test is started.
### Example run
-Let's run an availabilty read test which will recover availability for 10 cores with max PoV size on a 500
+Let's run an availabilty read test which will recover availability for 200 cores with max PoV size on a 1000
node validator network.
+
+
```
- target/testnet/subsystem-bench --n-cores 10 data-availability-read
-[2023-11-28T09:01:59Z INFO subsystem_bench::core::display] n_validators = 500, n_cores = 10, pov_size = 5120 - 5120,
- latency = None
-[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880
-[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Created test environment.
-[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Pre-generating 10 candidates.
-[2023-11-28T09:02:01Z INFO subsystem-bench::core] Initializing network emulation for 500 peers.
-[2023-11-28T09:02:01Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999
-[2023-11-28T09:02:01Z INFO subsystem-bench::availability] Current block 1/1
-[2023-11-28T09:02:01Z INFO subsystem_bench::availability] 10 recoveries pending
-[2023-11-28T09:02:04Z INFO subsystem_bench::availability] Block time 3231ms
-[2023-11-28T09:02:04Z INFO subsystem-bench::availability] Sleeping till end of block (2768ms)
-[2023-11-28T09:02:07Z INFO subsystem_bench::availability] All blocks processed in 6001ms
-[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Throughput: 51200 KiB/block
-[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Block time: 6001 ms
-[2023-11-28T09:02:07Z INFO subsystem_bench::availability]
-
- Total received from network: 66 MiB
- Total sent to network: 58 KiB
- Total subsystem CPU usage 4.16s
- CPU usage per block 4.16s
- Total test environment CPU usage 0.00s
- CPU usage per block 0.00s
+target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_write.yaml
+[2024-02-19T14:10:32.981Z INFO subsystem_bench] Sequence contains 1 step(s)
+[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] Step 1/1
+[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] [objective = DataAvailabilityWrite] n_validators = 1000, n_cores = 200, pov_size = 5120 - 5120, connectivity = 75, latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 })
+[2024-02-19T14:10:32.982Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880
+[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Created test environment.
+[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Pre-generating 600 candidates.
+[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] Initializing emulation for a 1000 peer network.
+[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] connectivity 75%, latency Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 })
+[2024-02-19T14:10:34.098Z INFO subsystem-bench::network] Network created, connected validator count 749
+[2024-02-19T14:10:34.099Z INFO subsystem-bench::availability] Seeding availability store with candidates ...
+[2024-02-19T14:10:34.100Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999
+[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Done
+[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Current block #1
+[2024-02-19T14:10:34.389Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ...
+[2024-02-19T14:10:34.625Z INFO subsystem-bench::availability] All chunks received in 237ms
+[2024-02-19T14:10:34.626Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed
+[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All bitfields processed
+[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All work for block completed in 1322ms
+[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] Current block #2
+[2024-02-19T14:10:35.712Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ...
+[2024-02-19T14:10:35.947Z INFO subsystem-bench::availability] All chunks received in 236ms
+[2024-02-19T14:10:35.947Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed
+[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All bitfields processed
+[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All work for block completed in 1328ms
+[2024-02-19T14:10:37.039Z INFO subsystem-bench::availability] Current block #3
+[2024-02-19T14:10:37.040Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ...
+[2024-02-19T14:10:37.276Z INFO subsystem-bench::availability] All chunks received in 237ms
+[2024-02-19T14:10:37.276Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed
+[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All bitfields processed
+[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All work for block completed in 1323ms
+[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All blocks processed in 3974ms
+[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] Avg block time: 1324 ms
+[2024-02-19T14:10:38.362Z INFO parachain::availability-store] received `Conclude` signal, exiting
+[2024-02-19T14:10:38.362Z INFO parachain::bitfield-distribution] Conclude
+[2024-02-19T14:10:38.362Z INFO subsystem-bench::network] Downlink channel closed, network interface task exiting
+
+polkadot/node/subsystem-bench/examples/availability_write.yaml #1 DataAvailabilityWrite
+
+Network usage, KiB total per block
+Received from peers 12922.000 4307.333
+Sent to peers 47705.000 15901.667
+
+CPU usage, seconds total per block
+availability-distribution 0.045 0.015
+bitfield-distribution 0.104 0.035
+availability-store 0.304 0.101
+Test environment 3.213 1.071
```
-`Block time` in the context of `data-availability-read` has a different meaning. It measures the amount of time it
+
+
+`Block time` in the current context has a different meaning. It measures the amount of time it
took the subsystem to finish processing all of the messages sent in the context of the current test block.
### Test logs
@@ -233,8 +230,9 @@ Since the execution will be very slow, it's recommended not to run it together w
benchmark results into account. A report is saved in a file `cachegrind_report.txt`.
Example run results:
+
```
-$ target/testnet/subsystem-bench --n-cores 10 --cache-misses data-availability-read
+$ target/testnet/subsystem-bench --cache-misses cache-misses-data-availability-read.yaml
$ cat cachegrind_report.txt
I refs: 64,622,081,485
I1 misses: 3,018,168
@@ -275,7 +273,7 @@ happy and negative scenarios (low bandwidth, network errors and low connectivity
To faster write a new test objective you need to use some higher level wrappers and logic: `TestEnvironment`,
`TestConfiguration`, `TestAuthorities`, `NetworkEmulator`. To create the `TestEnvironment` you will
-need to also build an `Overseer`, but that should be easy using the mockups for subsystems in`core::mock`.
+need to also build an `Overseer`, but that should be easy using the mockups for subsystems in `mock`.
### Mocking
diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml
index 758c7fbbf112..146da57d44c4 100644
--- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml
+++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml
@@ -1,14 +1,14 @@
TestConfiguration:
# Test 1
- objective: !ApprovalVoting
- last_considered_tranche: 89
coalesce_mean: 3.0
coalesce_std_dev: 1.0
+ enable_assignments_v2: true
+ last_considered_tranche: 89
stop_when_approved: true
coalesce_tranche_diff: 12
- workdir_prefix: "/tmp/"
- enable_assignments_v2: true
num_no_shows_per_candidate: 10
+ workdir_prefix: "/tmp/"
n_validators: 500
n_cores: 100
min_pov_size: 1120
diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml
index 9eeeefc53a42..6b17e62c20aa 100644
--- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml
+++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml
@@ -7,11 +7,10 @@ TestConfiguration:
last_considered_tranche: 89
stop_when_approved: false
coalesce_tranche_diff: 12
- workdir_prefix: "/tmp"
num_no_shows_per_candidate: 0
+ workdir_prefix: "/tmp"
n_validators: 500
n_cores: 100
- n_included_candidates: 100
min_pov_size: 1120
max_pov_size: 5120
peer_bandwidth: 524288000000
diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml
index 370bb31a5c4c..e946c28e8ef5 100644
--- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml
+++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml
@@ -7,8 +7,8 @@ TestConfiguration:
last_considered_tranche: 89
stop_when_approved: true
coalesce_tranche_diff: 12
- workdir_prefix: "/tmp/"
num_no_shows_per_candidate: 0
+ workdir_prefix: "/tmp/"
n_validators: 500
n_cores: 100
min_pov_size: 1120
diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml
index 30b9ac8dc50f..8f4b050e72f2 100644
--- a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml
+++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml
@@ -7,8 +7,8 @@ TestConfiguration:
last_considered_tranche: 89
stop_when_approved: false
coalesce_tranche_diff: 12
- workdir_prefix: "/tmp/"
num_no_shows_per_candidate: 0
+ workdir_prefix: "/tmp/"
n_validators: 500
n_cores: 100
min_pov_size: 1120
diff --git a/polkadot/node/subsystem-bench/src/availability/cli.rs b/polkadot/node/subsystem-bench/src/availability/cli.rs
deleted file mode 100644
index 65df8c1552aa..000000000000
--- a/polkadot/node/subsystem-bench/src/availability/cli.rs
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) Parity Technologies (UK) Ltd.
-// This file is part of Polkadot.
-
-// Polkadot is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Polkadot is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Polkadot. If not, see .
-
-use serde::{Deserialize, Serialize};
-
-#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)]
-#[value(rename_all = "kebab-case")]
-#[non_exhaustive]
-pub enum NetworkEmulation {
- Ideal,
- Healthy,
- Degraded,
-}
-
-#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)]
-#[clap(rename_all = "kebab-case")]
-#[allow(missing_docs)]
-pub struct DataAvailabilityReadOptions {
- #[clap(short, long, default_value_t = false)]
- /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as
- /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have
- /// enough bandwidth.
- pub fetch_from_backers: bool,
-}
diff --git a/polkadot/node/subsystem-bench/src/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs
similarity index 62%
rename from polkadot/node/subsystem-bench/src/subsystem-bench.rs
rename to polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs
index 0803f175474e..deb351360d74 100644
--- a/polkadot/node/subsystem-bench/src/subsystem-bench.rs
+++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs
@@ -14,54 +14,32 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-//! A tool for running subsystem benchmark tests designed for development and
-//! CI regression testing.
-
-use approval::{bench_approvals, ApprovalsOptions};
-use availability::{
- cli::{DataAvailabilityReadOptions, NetworkEmulation},
- prepare_test, TestState,
-};
+//! A tool for running subsystem benchmark tests
+//! designed for development and CI regression testing.
+
use clap::Parser;
-use clap_num::number_range;
use color_eyre::eyre;
use colored::Colorize;
-use core::{
- configuration::TestConfiguration,
- display::display_configuration,
- environment::{TestEnvironment, GENESIS_HASH},
-};
+use polkadot_subsystem_bench::{approval, availability, configuration};
use pyroscope::PyroscopeAgent;
use pyroscope_pprofrs::{pprof_backend, PprofConfig};
use serde::{Deserialize, Serialize};
use std::path::Path;
-mod approval;
-mod availability;
-mod core;
mod valgrind;
-const LOG_TARGET: &str = "subsystem-bench";
-
-fn le_100(s: &str) -> Result {
- number_range(s, 0, 100)
-}
-
-fn le_5000(s: &str) -> Result {
- number_range(s, 0, 5000)
-}
+const LOG_TARGET: &str = "subsystem-bench::cli";
/// Supported test objectives
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
#[command(rename_all = "kebab-case")]
pub enum TestObjective {
/// Benchmark availability recovery strategies.
- DataAvailabilityRead(DataAvailabilityReadOptions),
+ DataAvailabilityRead(availability::DataAvailabilityReadOptions),
/// Benchmark availability and bitfield distribution.
DataAvailabilityWrite,
/// Benchmark the approval-voting and approval-distribution subsystems.
- ApprovalVoting(ApprovalsOptions),
- Unimplemented,
+ ApprovalVoting(approval::ApprovalsOptions),
}
impl std::fmt::Display for TestObjective {
@@ -73,39 +51,37 @@ impl std::fmt::Display for TestObjective {
Self::DataAvailabilityRead(_) => "DataAvailabilityRead",
Self::DataAvailabilityWrite => "DataAvailabilityWrite",
Self::ApprovalVoting(_) => "ApprovalVoting",
- Self::Unimplemented => "Unimplemented",
}
)
}
}
-#[derive(Debug, Parser)]
-#[allow(missing_docs)]
-struct BenchCli {
- #[arg(long, value_enum, ignore_case = true, default_value_t = NetworkEmulation::Ideal)]
- /// The type of network to be emulated
- pub network: NetworkEmulation,
-
- #[clap(short, long)]
- /// The bandwidth of emulated remote peers in KiB
- pub peer_bandwidth: Option,
-
- #[clap(short, long)]
- /// The bandwidth of our node in KiB
- pub bandwidth: Option,
-
- #[clap(long, value_parser=le_100)]
- /// Emulated peer connection ratio [0-100].
- pub connectivity: Option,
+/// The test input parameters
+#[derive(Clone, Debug, Serialize, Deserialize)]
+struct CliTestConfiguration {
+ /// Test Objective
+ pub objective: TestObjective,
+ /// Test Configuration
+ #[serde(flatten)]
+ pub test_config: configuration::TestConfiguration,
+}
- #[clap(long, value_parser=le_5000)]
- /// Mean remote peer latency in milliseconds [0-5000].
- pub peer_mean_latency: Option,
+#[derive(Serialize, Deserialize)]
+pub struct TestSequence {
+ #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))]
+ test_configurations: Vec,
+}
- #[clap(long, value_parser=le_5000)]
- /// Remote peer latency standard deviation
- pub peer_latency_std_dev: Option,
+impl TestSequence {
+ fn new_from_file(path: &Path) -> std::io::Result {
+ let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8");
+ Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA"))
+ }
+}
+#[derive(Debug, Parser)]
+#[allow(missing_docs)]
+struct BenchCli {
#[clap(long, default_value_t = false)]
/// Enable CPU Profiling with Pyroscope
pub profile: bool,
@@ -122,10 +98,6 @@ struct BenchCli {
/// Enable Cache Misses Profiling with Valgrind. Linux only, Valgrind must be in the PATH
pub cache_misses: bool,
- #[clap(long, default_value_t = false)]
- /// Shows the output in YAML format
- pub yaml_output: bool,
-
#[arg(required = true)]
/// Path to the test sequence configuration file
pub path: String,
@@ -148,49 +120,60 @@ impl BenchCli {
None
};
- let test_sequence = core::configuration::TestSequence::new_from_file(Path::new(&self.path))
+ let test_sequence = TestSequence::new_from_file(Path::new(&self.path))
.expect("File exists")
- .into_vec();
+ .test_configurations;
let num_steps = test_sequence.len();
gum::info!("{}", format!("Sequence contains {} step(s)", num_steps).bright_purple());
- for (index, test_config) in test_sequence.into_iter().enumerate() {
- let benchmark_name = format!("{} #{} {}", &self.path, index + 1, test_config.objective);
- gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),);
- display_configuration(&test_config);
- let usage = match test_config.objective {
- TestObjective::DataAvailabilityRead(ref _opts) => {
- let mut state = TestState::new(&test_config);
- let (mut env, _protocol_config) = prepare_test(test_config, &mut state);
+ for (index, CliTestConfiguration { objective, mut test_config }) in
+ test_sequence.into_iter().enumerate()
+ {
+ let benchmark_name = format!("{} #{} {}", &self.path, index + 1, objective);
+ gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),);
+ gum::info!(target: LOG_TARGET, "[{}] {}", format!("objective = {:?}", objective).green(), test_config);
+ test_config.generate_pov_sizes();
+
+ let usage = match objective {
+ TestObjective::DataAvailabilityRead(opts) => {
+ let mut state = availability::TestState::new(&test_config);
+ let (mut env, _protocol_config) = availability::prepare_test(
+ test_config,
+ &mut state,
+ availability::TestDataAvailability::Read(opts),
+ true,
+ );
env.runtime().block_on(availability::benchmark_availability_read(
&benchmark_name,
&mut env,
state,
))
},
- TestObjective::ApprovalVoting(ref options) => {
- let (mut env, state) =
- approval::prepare_test(test_config.clone(), options.clone());
- env.runtime().block_on(bench_approvals(&benchmark_name, &mut env, state))
- },
TestObjective::DataAvailabilityWrite => {
- let mut state = TestState::new(&test_config);
- let (mut env, _protocol_config) = prepare_test(test_config, &mut state);
+ let mut state = availability::TestState::new(&test_config);
+ let (mut env, _protocol_config) = availability::prepare_test(
+ test_config,
+ &mut state,
+ availability::TestDataAvailability::Write,
+ true,
+ );
env.runtime().block_on(availability::benchmark_availability_write(
&benchmark_name,
&mut env,
state,
))
},
- TestObjective::Unimplemented => todo!(),
- };
-
- let output = if self.yaml_output {
- serde_yaml::to_string(&vec![usage])?
- } else {
- usage.to_string()
+ TestObjective::ApprovalVoting(ref options) => {
+ let (mut env, state) =
+ approval::prepare_test(test_config.clone(), options.clone(), true);
+ env.runtime().block_on(approval::bench_approvals(
+ &benchmark_name,
+ &mut env,
+ state,
+ ))
+ },
};
- println!("{}", output);
+ println!("{}", usage);
}
if let Some(agent_running) = agent_running {
diff --git a/polkadot/node/subsystem-bench/src/valgrind.rs b/polkadot/node/subsystem-bench/src/cli/valgrind.rs
similarity index 100%
rename from polkadot/node/subsystem-bench/src/valgrind.rs
rename to polkadot/node/subsystem-bench/src/cli/valgrind.rs
diff --git a/polkadot/node/subsystem-bench/src/approval/helpers.rs b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs
similarity index 99%
rename from polkadot/node/subsystem-bench/src/approval/helpers.rs
rename to polkadot/node/subsystem-bench/src/lib/approval/helpers.rs
index 623d91848f53..af5ff5aa1fac 100644
--- a/polkadot/node/subsystem-bench/src/approval/helpers.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-use crate::core::configuration::TestAuthorities;
+use crate::configuration::TestAuthorities;
use itertools::Itertools;
use polkadot_node_core_approval_voting::time::{Clock, SystemClock, Tick};
use polkadot_node_network_protocol::{
diff --git a/polkadot/node/subsystem-bench/src/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs
similarity index 97%
rename from polkadot/node/subsystem-bench/src/approval/message_generator.rs
rename to polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs
index a71034013247..c1b31a509f6d 100644
--- a/polkadot/node/subsystem-bench/src/approval/message_generator.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs
@@ -18,15 +18,12 @@ use crate::{
approval::{
helpers::{generate_babe_epoch, generate_topology},
test_message::{MessagesBundle, TestMessageInfo},
- ApprovalTestState, BlockTestData, GeneratedState, BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET,
- SLOT_DURATION_MILLIS,
+ ApprovalTestState, ApprovalsOptions, BlockTestData, GeneratedState,
+ BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, SLOT_DURATION_MILLIS,
},
- core::{
- configuration::{TestAuthorities, TestConfiguration},
- mock::runtime_api::session_info_for_peers,
- NODE_UNDER_TEST,
- },
- ApprovalsOptions, TestObjective,
+ configuration::{TestAuthorities, TestConfiguration},
+ mock::runtime_api::session_info_for_peers,
+ NODE_UNDER_TEST,
};
use futures::SinkExt;
use itertools::Itertools;
@@ -132,11 +129,7 @@ impl PeerMessagesGenerator {
options: &ApprovalsOptions,
) -> String {
let mut fingerprint = options.fingerprint();
- let mut exclude_objective = configuration.clone();
- // The objective contains the full content of `ApprovalOptions`, we don't want to put all of
- // that in fingerprint, so execlute it because we add it manually see above.
- exclude_objective.objective = TestObjective::Unimplemented;
- let configuration_bytes = bincode::serialize(&exclude_objective).unwrap();
+ let configuration_bytes = bincode::serialize(&configuration).unwrap();
fingerprint.extend(configuration_bytes);
let mut sha1 = sha1::Sha1::new();
sha1.update(fingerprint);
diff --git a/polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs b/polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs
similarity index 100%
rename from polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs
rename to polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs
diff --git a/polkadot/node/subsystem-bench/src/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs
similarity index 97%
rename from polkadot/node/subsystem-bench/src/approval/mod.rs
rename to polkadot/node/subsystem-bench/src/lib/approval/mod.rs
index f07912de1887..450faf06123f 100644
--- a/polkadot/node/subsystem-bench/src/approval/mod.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs
@@ -24,25 +24,21 @@ use crate::{
mock_chain_selection::MockChainSelection,
test_message::{MessagesBundle, TestMessageInfo},
},
- core::{
- configuration::TestAuthorities,
- environment::{
- BenchmarkUsage, TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT,
- },
- mock::{
- chain_api::{ChainApiState, MockChainApi},
- dummy_builder,
- network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx},
- runtime_api::MockRuntimeApi,
- AlwaysSupportsParachains, TestSyncOracle,
- },
- network::{
- new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface,
- NetworkInterfaceReceiver,
- },
- NODE_UNDER_TEST,
+ configuration::{TestAuthorities, TestConfiguration},
+ dummy_builder,
+ environment::{TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT},
+ mock::{
+ chain_api::{ChainApiState, MockChainApi},
+ network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx},
+ runtime_api::MockRuntimeApi,
+ AlwaysSupportsParachains, TestSyncOracle,
},
- TestConfiguration,
+ network::{
+ new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface,
+ NetworkInterfaceReceiver,
+ },
+ usage::BenchmarkUsage,
+ NODE_UNDER_TEST,
};
use colored::Colorize;
use futures::channel::oneshot;
@@ -472,11 +468,9 @@ impl ApprovalTestState {
impl HandleNetworkMessage for ApprovalTestState {
fn handle(
&self,
- _message: crate::core::network::NetworkMessage,
- _node_sender: &mut futures::channel::mpsc::UnboundedSender<
- crate::core::network::NetworkMessage,
- >,
- ) -> Option {
+ _message: crate::network::NetworkMessage,
+ _node_sender: &mut futures::channel::mpsc::UnboundedSender,
+ ) -> Option {
self.total_sent_messages_from_node
.as_ref()
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
@@ -841,8 +835,14 @@ fn build_overseer(
pub fn prepare_test(
config: TestConfiguration,
options: ApprovalsOptions,
+ with_prometheus_endpoint: bool,
) -> (TestEnvironment, ApprovalTestState) {
- prepare_test_inner(config, TestEnvironmentDependencies::default(), options)
+ prepare_test_inner(
+ config,
+ TestEnvironmentDependencies::default(),
+ options,
+ with_prometheus_endpoint,
+ )
}
/// Build the test environment for an Approval benchmark.
@@ -850,6 +850,7 @@ fn prepare_test_inner(
config: TestConfiguration,
dependencies: TestEnvironmentDependencies,
options: ApprovalsOptions,
+ with_prometheus_endpoint: bool,
) -> (TestEnvironment, ApprovalTestState) {
gum::info!("Prepare test state");
let state = ApprovalTestState::new(&config, options, &dependencies);
@@ -878,6 +879,7 @@ fn prepare_test_inner(
overseer,
overseer_handle,
state.test_authorities.clone(),
+ with_prometheus_endpoint,
),
state,
)
diff --git a/polkadot/node/subsystem-bench/src/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs
similarity index 98%
rename from polkadot/node/subsystem-bench/src/approval/test_message.rs
rename to polkadot/node/subsystem-bench/src/lib/approval/test_message.rs
index 8aaabc3426c8..63e383509be9 100644
--- a/polkadot/node/subsystem-bench/src/approval/test_message.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs
@@ -15,9 +15,8 @@
// along with Polkadot. If not, see .
use crate::{
- approval::{BlockTestData, CandidateTestData},
- core::configuration::TestAuthorities,
- ApprovalsOptions,
+ approval::{ApprovalsOptions, BlockTestData, CandidateTestData},
+ configuration::TestAuthorities,
};
use itertools::Itertools;
use parity_scale_codec::{Decode, Encode};
diff --git a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs
similarity index 94%
rename from polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs
rename to polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs
index 261dbd0376c7..3300def2235e 100644
--- a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs
+++ b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-use crate::core::{environment::TestEnvironmentDependencies, mock::TestSyncOracle};
+use crate::{environment::TestEnvironmentDependencies, mock::TestSyncOracle};
use polkadot_node_core_av_store::{AvailabilityStoreSubsystem, Config};
use polkadot_node_metrics::metrics::Metrics;
use polkadot_node_subsystem_util::database::Database;
diff --git a/polkadot/node/subsystem-bench/src/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs
similarity index 94%
rename from polkadot/node/subsystem-bench/src/availability/mod.rs
rename to polkadot/node/subsystem-bench/src/lib/availability/mod.rs
index ad9a17ff8f47..f012a5a907ed 100644
--- a/polkadot/node/subsystem-bench/src/availability/mod.rs
+++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs
@@ -15,22 +15,18 @@
// along with Polkadot. If not, see .
use crate::{
- core::{
- configuration::TestConfiguration,
- environment::{BenchmarkUsage, TestEnvironmentDependencies},
- mock::{
- av_store,
- av_store::MockAvailabilityStore,
- chain_api::{ChainApiState, MockChainApi},
- dummy_builder,
- network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx},
- runtime_api,
- runtime_api::MockRuntimeApi,
- AlwaysSupportsParachains,
- },
- network::new_network,
+ configuration::TestConfiguration,
+ dummy_builder,
+ environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH},
+ mock::{
+ av_store::{self, MockAvailabilityStore},
+ chain_api::{ChainApiState, MockChainApi},
+ network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx},
+ runtime_api::{self, MockRuntimeApi},
+ AlwaysSupportsParachains,
},
- TestEnvironment, TestObjective, GENESIS_HASH,
+ network::new_network,
+ usage::BenchmarkUsage,
};
use av_store::NetworkAvailabilityState;
use av_store_helpers::new_av_store;
@@ -73,14 +69,30 @@ use sc_network::{
PeerId,
};
use sc_service::SpawnTaskHandle;
+use serde::{Deserialize, Serialize};
use sp_core::H256;
use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant};
mod av_store_helpers;
-pub(crate) mod cli;
const LOG_TARGET: &str = "subsystem-bench::availability";
+#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)]
+#[clap(rename_all = "kebab-case")]
+#[allow(missing_docs)]
+pub struct DataAvailabilityReadOptions {
+ #[clap(short, long, default_value_t = false)]
+ /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as
+ /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have
+ /// enough bandwidth.
+ pub fetch_from_backers: bool,
+}
+
+pub enum TestDataAvailability {
+ Read(DataAvailabilityReadOptions),
+ Write,
+}
+
fn build_overseer_for_availability_read(
spawn_task_handle: SpawnTaskHandle,
runtime_api: MockRuntimeApi,
@@ -141,14 +153,24 @@ fn build_overseer_for_availability_write(
pub fn prepare_test(
config: TestConfiguration,
state: &mut TestState,
+ mode: TestDataAvailability,
+ with_prometheus_endpoint: bool,
) -> (TestEnvironment, Vec) {
- prepare_test_inner(config, state, TestEnvironmentDependencies::default())
+ prepare_test_inner(
+ config,
+ state,
+ mode,
+ TestEnvironmentDependencies::default(),
+ with_prometheus_endpoint,
+ )
}
fn prepare_test_inner(
config: TestConfiguration,
state: &mut TestState,
+ mode: TestDataAvailability,
dependencies: TestEnvironmentDependencies,
+ with_prometheus_endpoint: bool,
) -> (TestEnvironment, Vec) {
// Generate test authorities.
let test_authorities = config.generate_authorities();
@@ -216,8 +238,8 @@ fn prepare_test_inner(
let network_bridge_rx =
network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg.clone()));
- let (overseer, overseer_handle) = match &state.config().objective {
- TestObjective::DataAvailabilityRead(options) => {
+ let (overseer, overseer_handle) = match &mode {
+ TestDataAvailability::Read(options) => {
let use_fast_path = options.fetch_from_backers;
let subsystem = if use_fast_path {
@@ -247,7 +269,7 @@ fn prepare_test_inner(
&dependencies,
)
},
- TestObjective::DataAvailabilityWrite => {
+ TestDataAvailability::Write => {
let availability_distribution = AvailabilityDistributionSubsystem::new(
test_authorities.keyring.keystore(),
IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver },
@@ -284,9 +306,6 @@ fn prepare_test_inner(
&dependencies,
)
},
- _ => {
- unimplemented!("Invalid test objective")
- },
};
(
@@ -297,6 +316,7 @@ fn prepare_test_inner(
overseer,
overseer_handle,
test_authorities,
+ with_prometheus_endpoint,
),
req_cfgs,
)
@@ -326,10 +346,6 @@ pub struct TestState {
}
impl TestState {
- fn config(&self) -> &TestConfiguration {
- &self.config
- }
-
pub fn next_candidate(&mut self) -> Option {
let candidate = self.candidates.next();
let candidate_hash = candidate.as_ref().unwrap().hash();
diff --git a/polkadot/node/subsystem-bench/src/core/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs
similarity index 83%
rename from polkadot/node/subsystem-bench/src/core/configuration.rs
rename to polkadot/node/subsystem-bench/src/lib/configuration.rs
index 00be2a86b173..c76933085271 100644
--- a/polkadot/node/subsystem-bench/src/core/configuration.rs
+++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs
@@ -16,7 +16,7 @@
//! Test configuration definition and helpers.
-use crate::{core::keyring::Keyring, TestObjective};
+use crate::keyring::Keyring;
use itertools::Itertools;
use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId};
use rand::thread_rng;
@@ -24,17 +24,7 @@ use rand_distr::{Distribution, Normal, Uniform};
use sc_network::PeerId;
use serde::{Deserialize, Serialize};
use sp_consensus_babe::AuthorityId;
-use std::{collections::HashMap, path::Path};
-
-pub fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize {
- random_uniform_sample(min_pov_size, max_pov_size)
-}
-
-fn random_uniform_sample + From>(min_value: T, max_value: T) -> T {
- Uniform::from(min_value.into()..=max_value.into())
- .sample(&mut thread_rng())
- .into()
-}
+use std::collections::HashMap;
/// Peer networking latency configuration.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
@@ -87,8 +77,6 @@ fn default_no_show_slots() -> usize {
/// The test input parameters
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TestConfiguration {
- /// The test objective
- pub objective: TestObjective,
/// Number of validators
pub n_validators: usize,
/// Number of cores
@@ -115,7 +103,7 @@ pub struct TestConfiguration {
pub max_pov_size: usize,
/// Randomly sampled pov_sizes
#[serde(skip)]
- pov_sizes: Vec,
+ pub pov_sizes: Vec,
/// The amount of bandiwdth remote validators have.
#[serde(default = "default_bandwidth")]
pub peer_bandwidth: usize,
@@ -133,56 +121,32 @@ pub struct TestConfiguration {
pub num_blocks: usize,
}
-fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec {
- (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect()
-}
-
-#[derive(Serialize, Deserialize)]
-pub struct TestSequence {
- #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))]
- test_configurations: Vec,
-}
-
-impl TestSequence {
- pub fn into_vec(self) -> Vec {
- self.test_configurations
- .into_iter()
- .map(|mut config| {
- config.pov_sizes =
- generate_pov_sizes(config.n_cores, config.min_pov_size, config.max_pov_size);
- config
- })
- .collect()
- }
-}
-
-impl TestSequence {
- pub fn new_from_file(path: &Path) -> std::io::Result {
- let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8");
- Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA"))
+impl Default for TestConfiguration {
+ fn default() -> Self {
+ Self {
+ n_validators: Default::default(),
+ n_cores: Default::default(),
+ needed_approvals: default_needed_approvals(),
+ zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(),
+ relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(),
+ n_delay_tranches: default_n_delay_tranches(),
+ no_show_slots: default_no_show_slots(),
+ max_validators_per_core: default_backing_group_size(),
+ min_pov_size: default_pov_size(),
+ max_pov_size: default_pov_size(),
+ pov_sizes: Default::default(),
+ peer_bandwidth: default_bandwidth(),
+ bandwidth: default_bandwidth(),
+ latency: Default::default(),
+ connectivity: default_connectivity(),
+ num_blocks: Default::default(),
+ }
}
}
-/// Helper struct for authority related state.
-#[derive(Clone)]
-pub struct TestAuthorities {
- pub keyring: Keyring,
- pub validator_public: Vec,
- pub validator_authority_id: Vec,
- pub validator_babe_id: Vec,
- pub validator_assignment_id: Vec,
- pub key_seeds: Vec,
- pub peer_ids: Vec,
- pub peer_id_to_authority: HashMap,
-}
-
impl TestConfiguration {
- #[allow(unused)]
- pub fn write_to_disk(&self) {
- // Serialize a slice of configurations
- let yaml = serde_yaml::to_string(&TestSequence { test_configurations: vec![self.clone()] })
- .unwrap();
- std::fs::write("last_test.yaml", yaml).unwrap();
+ pub fn generate_pov_sizes(&mut self) {
+ self.pov_sizes = generate_pov_sizes(self.n_cores, self.min_pov_size, self.max_pov_size);
}
pub fn pov_sizes(&self) -> &[usize] {
@@ -239,6 +203,33 @@ impl TestConfiguration {
}
}
+fn random_uniform_sample + From>(min_value: T, max_value: T) -> T {
+ Uniform::from(min_value.into()..=max_value.into())
+ .sample(&mut thread_rng())
+ .into()
+}
+
+fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize {
+ random_uniform_sample(min_pov_size, max_pov_size)
+}
+
+fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec {
+ (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect()
+}
+
+/// Helper struct for authority related state.
+#[derive(Clone)]
+pub struct TestAuthorities {
+ pub keyring: Keyring,
+ pub validator_public: Vec,
+ pub validator_authority_id: Vec,
+ pub validator_babe_id: Vec,
+ pub validator_assignment_id: Vec,
+ pub key_seeds: Vec,
+ pub peer_ids: Vec,
+ pub peer_id_to_authority: HashMap,
+}
+
/// Sample latency (in milliseconds) from a normal distribution with parameters
/// specified in `maybe_peer_latency`.
pub fn random_latency(maybe_peer_latency: Option<&PeerLatency>) -> usize {
diff --git a/polkadot/node/subsystem-bench/src/core/display.rs b/polkadot/node/subsystem-bench/src/lib/display.rs
similarity index 89%
rename from polkadot/node/subsystem-bench/src/core/display.rs
rename to polkadot/node/subsystem-bench/src/lib/display.rs
index 13a349382e2f..b153d54a7c36 100644
--- a/polkadot/node/subsystem-bench/src/core/display.rs
+++ b/polkadot/node/subsystem-bench/src/lib/display.rs
@@ -19,7 +19,7 @@
//!
//! Currently histogram buckets are skipped.
-use crate::{TestConfiguration, LOG_TARGET};
+use crate::configuration::TestConfiguration;
use colored::Colorize;
use prometheus::{
proto::{MetricFamily, MetricType},
@@ -27,6 +27,8 @@ use prometheus::{
};
use std::fmt::Display;
+const LOG_TARGET: &str = "subsystem-bench::display";
+
#[derive(Default, Debug)]
pub struct MetricCollection(Vec);
@@ -85,6 +87,7 @@ impl Display for MetricCollection {
Ok(())
}
}
+
#[derive(Debug, Clone)]
pub struct TestMetric {
name: String,
@@ -184,15 +187,16 @@ pub fn parse_metrics(registry: &Registry) -> MetricCollection {
test_metrics.into()
}
-pub fn display_configuration(test_config: &TestConfiguration) {
- gum::info!(
- "[{}] {}, {}, {}, {}, {}",
- format!("objective = {:?}", test_config.objective).green(),
- format!("n_validators = {}", test_config.n_validators).blue(),
- format!("n_cores = {}", test_config.n_cores).blue(),
- format!("pov_size = {} - {}", test_config.min_pov_size, test_config.max_pov_size)
- .bright_black(),
- format!("connectivity = {}", test_config.connectivity).bright_black(),
- format!("latency = {:?}", test_config.latency).bright_black(),
- );
+impl Display for TestConfiguration {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "{}, {}, {}, {}, {}",
+ format!("n_validators = {}", self.n_validators).blue(),
+ format!("n_cores = {}", self.n_cores).blue(),
+ format!("pov_size = {} - {}", self.min_pov_size, self.max_pov_size).bright_black(),
+ format!("connectivity = {}", self.connectivity).bright_black(),
+ format!("latency = {:?}", self.latency).bright_black(),
+ )
+ }
}
diff --git a/polkadot/node/subsystem-bench/src/core/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs
similarity index 88%
rename from polkadot/node/subsystem-bench/src/core/environment.rs
rename to polkadot/node/subsystem-bench/src/lib/environment.rs
index ca4c41cf45f9..958ed50d0894 100644
--- a/polkadot/node/subsystem-bench/src/core/environment.rs
+++ b/polkadot/node/subsystem-bench/src/lib/environment.rs
@@ -17,13 +17,11 @@
//! Test environment implementation
use crate::{
- core::{
- configuration::TestAuthorities, mock::AlwaysSupportsParachains,
- network::NetworkEmulatorHandle,
- },
- TestConfiguration,
+ configuration::{TestAuthorities, TestConfiguration},
+ mock::AlwaysSupportsParachains,
+ network::NetworkEmulatorHandle,
+ usage::{BenchmarkUsage, ResourceUsage},
};
-use colored::Colorize;
use core::time::Duration;
use futures::{Future, FutureExt};
use polkadot_node_subsystem::{messages::AllMessages, Overseer, SpawnGlue, TimeoutExt};
@@ -33,7 +31,6 @@ use polkadot_node_subsystem_util::metrics::prometheus::{
};
use polkadot_overseer::{BlockInfo, Handle as OverseerHandle};
use sc_service::{SpawnTaskHandle, TaskManager};
-use serde::{Deserialize, Serialize};
use std::net::{Ipv4Addr, SocketAddr};
use tokio::runtime::Handle;
@@ -204,6 +201,7 @@ impl TestEnvironment {
overseer: Overseer, AlwaysSupportsParachains>,
overseer_handle: OverseerHandle,
authorities: TestAuthorities,
+ with_prometheus_endpoint: bool,
) -> Self {
let metrics = TestEnvironmentMetrics::new(&dependencies.registry)
.expect("Metrics need to be registered");
@@ -211,19 +209,21 @@ impl TestEnvironment {
let spawn_handle = dependencies.task_manager.spawn_handle();
spawn_handle.spawn_blocking("overseer", "overseer", overseer.run().boxed());
- let registry_clone = dependencies.registry.clone();
- dependencies.task_manager.spawn_handle().spawn_blocking(
- "prometheus",
- "test-environment",
- async move {
- prometheus_endpoint::init_prometheus(
- SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999),
- registry_clone,
- )
- .await
- .unwrap();
- },
- );
+ if with_prometheus_endpoint {
+ let registry_clone = dependencies.registry.clone();
+ dependencies.task_manager.spawn_handle().spawn_blocking(
+ "prometheus",
+ "test-environment",
+ async move {
+ prometheus_endpoint::init_prometheus(
+ SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999),
+ registry_clone,
+ )
+ .await
+ .unwrap();
+ },
+ );
+ }
TestEnvironment {
runtime_handle: dependencies.runtime.handle().clone(),
@@ -411,41 +411,3 @@ impl TestEnvironment {
usage
}
}
-
-#[derive(Debug, Serialize, Deserialize)]
-pub struct BenchmarkUsage {
- benchmark_name: String,
- network_usage: Vec,
- cpu_usage: Vec,
-}
-
-impl std::fmt::Display for BenchmarkUsage {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- write!(
- f,
- "\n{}\n\n{}\n{}\n\n{}\n{}\n",
- self.benchmark_name.purple(),
- format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(),
- self.network_usage
- .iter()
- .map(|v| v.to_string())
- .collect::>()
- .join("\n"),
- format!("{:<32}{:>12}{:>12}", "CPU usage in seconds", "total", "per block").blue(),
- self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n")
- )
- }
-}
-
-#[derive(Debug, Serialize, Deserialize)]
-pub struct ResourceUsage {
- resource_name: String,
- total: f64,
- per_block: f64,
-}
-
-impl std::fmt::Display for ResourceUsage {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block)
- }
-}
diff --git a/polkadot/node/subsystem-bench/src/core/keyring.rs b/polkadot/node/subsystem-bench/src/lib/keyring.rs
similarity index 100%
rename from polkadot/node/subsystem-bench/src/core/keyring.rs
rename to polkadot/node/subsystem-bench/src/lib/keyring.rs
diff --git a/polkadot/node/subsystem-bench/src/core/mod.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs
similarity index 88%
rename from polkadot/node/subsystem-bench/src/core/mod.rs
rename to polkadot/node/subsystem-bench/src/lib/lib.rs
index 764184c5b377..d06f2822a895 100644
--- a/polkadot/node/subsystem-bench/src/core/mod.rs
+++ b/polkadot/node/subsystem-bench/src/lib/lib.rs
@@ -15,11 +15,14 @@
// along with Polkadot. If not, see .
// The validator index that represent the node that is under test.
-pub(crate) const NODE_UNDER_TEST: u32 = 0;
+pub const NODE_UNDER_TEST: u32 = 0;
-pub(crate) mod configuration;
+pub mod approval;
+pub mod availability;
+pub mod configuration;
pub(crate) mod display;
pub(crate) mod environment;
pub(crate) mod keyring;
pub(crate) mod mock;
pub(crate) mod network;
+pub mod usage;
diff --git a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs
similarity index 99%
rename from polkadot/node/subsystem-bench/src/core/mock/av_store.rs
rename to polkadot/node/subsystem-bench/src/lib/mock/av_store.rs
index 0a7725c91e04..41c4fe2cbadc 100644
--- a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs
+++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs
@@ -16,7 +16,7 @@
//! A generic av store subsystem mockup suitable to be used in benchmarks.
-use crate::core::network::{HandleNetworkMessage, NetworkMessage};
+use crate::network::{HandleNetworkMessage, NetworkMessage};
use futures::{channel::oneshot, FutureExt};
use parity_scale_codec::Encode;
use polkadot_node_network_protocol::request_response::{
diff --git a/polkadot/node/subsystem-bench/src/core/mock/chain_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs
similarity index 100%
rename from polkadot/node/subsystem-bench/src/core/mock/chain_api.rs
rename to polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs
diff --git a/polkadot/node/subsystem-bench/src/core/mock/dummy.rs b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs
similarity index 100%
rename from polkadot/node/subsystem-bench/src/core/mock/dummy.rs
rename to polkadot/node/subsystem-bench/src/lib/mock/dummy.rs
diff --git a/polkadot/node/subsystem-bench/src/core/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs
similarity index 97%
rename from polkadot/node/subsystem-bench/src/core/mock/mod.rs
rename to polkadot/node/subsystem-bench/src/lib/mock/mod.rs
index 46fdeb196c01..6dda9a47d398 100644
--- a/polkadot/node/subsystem-bench/src/core/mock/mod.rs
+++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs
@@ -34,9 +34,10 @@ impl HeadSupportsParachains for AlwaysSupportsParachains {
}
// An orchestra with dummy subsystems
+#[macro_export]
macro_rules! dummy_builder {
($spawn_task_handle: ident, $metrics: ident) => {{
- use $crate::core::mock::dummy::*;
+ use $crate::mock::dummy::*;
// Initialize a mock overseer.
// All subsystem except approval_voting and approval_distribution are mock subsystems.
@@ -72,7 +73,6 @@ macro_rules! dummy_builder {
.spawner(SpawnGlue($spawn_task_handle))
}};
}
-pub(crate) use dummy_builder;
#[derive(Clone)]
pub struct TestSyncOracle {}
diff --git a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs
similarity index 99%
rename from polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs
rename to polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs
index 4682c7ec79ae..d598f6447d3d 100644
--- a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs
+++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs
@@ -17,7 +17,7 @@
//! Mocked `network-bridge` subsystems that uses a `NetworkInterface` to access
//! the emulated network.
-use crate::core::{
+use crate::{
configuration::TestAuthorities,
network::{NetworkEmulatorHandle, NetworkInterfaceReceiver, NetworkMessage, RequestExt},
};
diff --git a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs
similarity index 99%
rename from polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs
rename to polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs
index 0dd76efcbaf0..53faf562f03c 100644
--- a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs
+++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs
@@ -16,7 +16,7 @@
//! A generic runtime api subsystem mockup suitable to be used in benchmarks.
-use crate::core::configuration::{TestAuthorities, TestConfiguration};
+use crate::configuration::{TestAuthorities, TestConfiguration};
use bitvec::prelude::BitVec;
use futures::FutureExt;
use itertools::Itertools;
diff --git a/polkadot/node/subsystem-bench/src/core/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs
similarity index 99%
rename from polkadot/node/subsystem-bench/src/core/network.rs
rename to polkadot/node/subsystem-bench/src/lib/network.rs
index e9124726d7c0..1e09441792d5 100644
--- a/polkadot/node/subsystem-bench/src/core/network.rs
+++ b/polkadot/node/subsystem-bench/src/lib/network.rs
@@ -33,7 +33,7 @@
// |
// Subsystems under test
-use crate::core::{
+use crate::{
configuration::{random_latency, TestAuthorities, TestConfiguration},
environment::TestEnvironmentDependencies,
NODE_UNDER_TEST,
diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs
new file mode 100644
index 000000000000..b83ef7d98d91
--- /dev/null
+++ b/polkadot/node/subsystem-bench/src/lib/usage.rs
@@ -0,0 +1,146 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Test usage implementation
+
+use colored::Colorize;
+use serde::{Deserialize, Serialize};
+use std::collections::HashMap;
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct BenchmarkUsage {
+ pub benchmark_name: String,
+ pub network_usage: Vec,
+ pub cpu_usage: Vec,
+}
+
+impl std::fmt::Display for BenchmarkUsage {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ write!(
+ f,
+ "\n{}\n\n{}\n{}\n\n{}\n{}\n",
+ self.benchmark_name.purple(),
+ format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(),
+ self.network_usage
+ .iter()
+ .map(|v| v.to_string())
+ .collect::>()
+ .join("\n"),
+ format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(),
+ self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n")
+ )
+ }
+}
+
+impl BenchmarkUsage {
+ pub fn average(usages: &[Self]) -> Self {
+ let all_network_usages: Vec<&ResourceUsage> =
+ usages.iter().flat_map(|v| &v.network_usage).collect();
+ let all_cpu_usage: Vec<&ResourceUsage> = usages.iter().flat_map(|v| &v.cpu_usage).collect();
+
+ Self {
+ benchmark_name: usages.first().map(|v| v.benchmark_name.clone()).unwrap_or_default(),
+ network_usage: ResourceUsage::average_by_resource_name(&all_network_usages),
+ cpu_usage: ResourceUsage::average_by_resource_name(&all_cpu_usage),
+ }
+ }
+
+ pub fn check_network_usage(&self, checks: &[ResourceUsageCheck]) -> Vec {
+ check_usage(&self.benchmark_name, &self.network_usage, checks)
+ }
+
+ pub fn check_cpu_usage(&self, checks: &[ResourceUsageCheck]) -> Vec {
+ check_usage(&self.benchmark_name, &self.cpu_usage, checks)
+ }
+
+ pub fn cpu_usage_diff(&self, other: &Self, resource_name: &str) -> Option {
+ let self_res = self.cpu_usage.iter().find(|v| v.resource_name == resource_name);
+ let other_res = other.cpu_usage.iter().find(|v| v.resource_name == resource_name);
+
+ match (self_res, other_res) {
+ (Some(self_res), Some(other_res)) => Some(self_res.diff(other_res)),
+ _ => None,
+ }
+ }
+}
+
+fn check_usage(
+ benchmark_name: &str,
+ usage: &[ResourceUsage],
+ checks: &[ResourceUsageCheck],
+) -> Vec {
+ checks
+ .iter()
+ .filter_map(|check| {
+ check_resource_usage(usage, check)
+ .map(|message| format!("{}: {}", benchmark_name, message))
+ })
+ .collect()
+}
+
+fn check_resource_usage(
+ usage: &[ResourceUsage],
+ (resource_name, base, precision): &ResourceUsageCheck,
+) -> Option {
+ if let Some(usage) = usage.iter().find(|v| v.resource_name == *resource_name) {
+ let diff = (base - usage.per_block).abs() / base;
+ if diff < *precision {
+ None
+ } else {
+ Some(format!(
+ "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {}",
+ resource_name, base, precision, usage.per_block
+ ))
+ }
+ } else {
+ Some(format!("The resource `{}` is not found", resource_name))
+ }
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct ResourceUsage {
+ pub resource_name: String,
+ pub total: f64,
+ pub per_block: f64,
+}
+
+impl std::fmt::Display for ResourceUsage {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block)
+ }
+}
+
+impl ResourceUsage {
+ fn average_by_resource_name(usages: &[&Self]) -> Vec {
+ let mut by_name: HashMap> = Default::default();
+ for usage in usages {
+ by_name.entry(usage.resource_name.clone()).or_default().push(usage);
+ }
+ let mut average = vec![];
+ for (resource_name, values) in by_name {
+ let total = values.iter().map(|v| v.total).sum::() / values.len() as f64;
+ let per_block = values.iter().map(|v| v.per_block).sum::() / values.len() as f64;
+ average.push(Self { resource_name, total, per_block });
+ }
+ average
+ }
+
+ fn diff(&self, other: &Self) -> f64 {
+ (self.per_block - other.per_block).abs() / self.per_block
+ }
+}
+
+type ResourceUsageCheck<'a> = (&'a str, f64, f64);