From a1303ff507db4fbd6468e6bee5572279aba0e377 Mon Sep 17 00:00:00 2001 From: Christopher Berner Date: Tue, 21 Nov 2023 07:26:14 -0800 Subject: [PATCH] Tune fuzzer for better throughput This does not affect coverage metrics and makes it ~3x faster --- fuzz/fuzz_targets/fuzz_redb.rs | 2 +- justfile | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fuzz/fuzz_targets/fuzz_redb.rs b/fuzz/fuzz_targets/fuzz_redb.rs index a52509da..aaf285ce 100644 --- a/fuzz/fuzz_targets/fuzz_redb.rs +++ b/fuzz/fuzz_targets/fuzz_redb.rs @@ -15,7 +15,7 @@ use redb::backends::FileBackend; use crate::FuzzerSavepoint::{Ephemeral, NotYetDurablePersistent, Persistent}; // These slow down the fuzzer, so don't create too many -const MAX_PERSISTENT_SAVEPOINTS: usize = 20; +const MAX_PERSISTENT_SAVEPOINTS: usize = 10; // Table to count which transactions have been successfully committed so that the reference BtreeMap can be kept in sync const COUNTER_TABLE: TableDefinition<(), u64> = TableDefinition::new("transaction_counter"); const TABLE_DEF: TableDefinition = TableDefinition::new("fuzz_table"); diff --git a/justfile b/justfile index 8a674775..28a2fc6c 100644 --- a/justfile +++ b/justfile @@ -37,13 +37,13 @@ watch +args='test': cargo watch --clear --exec "{{args}}" fuzz: pre - cargo fuzz run --sanitizer=none fuzz_redb -- -max_len=100000 + cargo fuzz run --sanitizer=none fuzz_redb -- -max_len=10000 fuzz_cmin: - cargo fuzz cmin --sanitizer=none fuzz_redb -- -max_len=100000 + cargo fuzz cmin --sanitizer=none fuzz_redb -- -max_len=10000 fuzz_ci: pre - cargo fuzz run --sanitizer=none fuzz_redb -- -max_len=100000 -max_total_time=60 + cargo fuzz run --sanitizer=none fuzz_redb -- -max_len=10000 -max_total_time=60 fuzz_coverage: pre #!/usr/bin/env bash