From a4353ead1c3bc791d5a16b47e212367f155df337 Mon Sep 17 00:00:00 2001 From: igor-aptos <110557261+igor-aptos@users.noreply.github.com> Date: Mon, 18 Mar 2024 19:27:07 -0700 Subject: [PATCH] [quorum store] constrain txn pull size to sender_max_total_txns (#12532) (#12550) ## Description We were not constraining on the sender side, even though we check on the receiver side. It's an oversight. We hadn't run into this previously because the backpressure configs meant we never tried to pull more than the sender/receiver side limit. --- consensus/src/quorum_store/batch_generator.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/consensus/src/quorum_store/batch_generator.rs b/consensus/src/quorum_store/batch_generator.rs index 11a8373d69dc8..6a868578d9f41 100644 --- a/consensus/src/quorum_store/batch_generator.rs +++ b/consensus/src/quorum_store/batch_generator.rs @@ -405,7 +405,11 @@ impl BatchGenerator { let dynamic_pull_max_txn = std::cmp::max( (since_last_non_empty_pull_ms as f64 / 1000.0 * dynamic_pull_txn_per_s as f64) as u64, 1); - let batches = self.handle_scheduled_pull(dynamic_pull_max_txn).await; + let pull_max_txn = std::cmp::min( + dynamic_pull_max_txn, + self.config.sender_max_total_txns as u64, + ); + let batches = self.handle_scheduled_pull(pull_max_txn).await; if !batches.is_empty() { last_non_empty_pull = tick_start;