From 0c99de7c4b9931989824f66dab83cc644578a75c Mon Sep 17 00:00:00 2001 From: Innokentii Sennovskii Date: Tue, 27 Feb 2024 17:42:00 +0000 Subject: [PATCH] fix: Fix races in slab allocator and lookup tables and add prepending for op_queues (#4754) 1. Fixes a race condition in lookup multitable initialisation 2. Fixes a race condition in slab allocator 3. Adds an option to prepend the EccOpQueue to another one (useful, if we ever need to execute circuits in parallel) The first 2 issues appeared when I tried to run goblin_ultra circuit construction in parallel. It led to race conditions. ECCOpQueue holds instructions emitted by goblin_ultra when we "promise" to that a particular non-native operation (addition/multiplication) is performed correctly, but we don't want to prove it right now. We aggregate these instructions and then send them to Translator and ECCVM. However, nothing is really stopping us from precomputing the next function circuit, while we are doing the recursive kernel circuit construction apart from joining these queues together. So this PR adds two functions: one for prepending a queue to the queue emitted by the circuit (so we can insert previous instructions) and one for swapping two queues (so we can easily take the new queue out of the circuit and store it in IVC without copying data). --- .../barretenberg/common/slab_allocator.cpp | 11 ++- .../eccvm/eccvm_builder_types.hpp | 1 + .../proof_system/op_queue/ecc_op_queue.hpp | 67 +++++++++++++++++++ .../op_queue/ecc_op_queue.test.cpp | 46 +++++++++++++ .../plookup_tables/plookup_tables.cpp | 21 ++++-- 5 files changed, 140 insertions(+), 6 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp b/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp index 545cfaf3f11..fb787c918d6 100644 --- a/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp +++ b/barretenberg/cpp/src/barretenberg/common/slab_allocator.cpp @@ -22,7 +22,10 @@ bool allocator_destroyed = false; // Slabs that are being manually managed by the user. // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) std::unordered_map> manual_slabs; - +#ifndef NO_MULTITHREADING +// The manual slabs unordered map is not thread-safe, so we need to manage access to it when multithreaded. +std::mutex manual_slabs_mutex; +#endif template inline void dbg_info(Args... args) { #if LOGGING == 1 @@ -219,6 +222,9 @@ std::shared_ptr get_mem_slab(size_t size) void* get_mem_slab_raw(size_t size) { auto slab = get_mem_slab(size); +#ifndef NO_MULTITHREADING + std::unique_lock lock(manual_slabs_mutex); +#endif manual_slabs[slab.get()] = slab; return slab.get(); } @@ -229,6 +235,9 @@ void free_mem_slab_raw(void* p) aligned_free(p); return; } +#ifndef NO_MULTITHREADING + std::unique_lock lock(manual_slabs_mutex); +#endif manual_slabs.erase(p); } } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp index 99f7103afcf..96873b6fd92 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp @@ -32,6 +32,7 @@ template struct VMOperation { res += static_cast(reset); return res; } + bool operator==(const VMOperation& other) const = default; }; template struct ScalarMul { uint32_t pc; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp b/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp index a4d5d3642eb..c264cf8e1ea 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp @@ -38,6 +38,73 @@ class ECCOpQueue { Point get_accumulator() { return accumulator; } + /** + * @brief Prepend the information from the previous queue (used before accumulation/merge proof to be able to run + * circuit construction separately) + * + * @param previous + */ + void prepend_previous_queue(const ECCOpQueue& previous) + { + // Allocate enough space + std::vector raw_ops_updated(raw_ops.size() + previous.raw_ops.size()); + // Copy the previous raw ops to the beginning of the new vector + std::copy(previous.raw_ops.begin(), previous.raw_ops.end(), raw_ops_updated.begin()); + // Copy the raw ops from current queue after the ones from the previous queue (concatenate them) + std::copy(raw_ops.begin(), + raw_ops.end(), + std::next(raw_ops_updated.begin(), static_cast(previous.raw_ops.size()))); + + // Swap raw_ops underlying storage + raw_ops.swap(raw_ops_updated); + // Do the same 3 operations for ultra_ops + for (size_t i = 0; i < 4; i++) { + // Allocate new vector + std::vector current_ultra_op(ultra_ops[i].size() + previous.ultra_ops[i].size()); + // Copy the previous ultra ops to the beginning of the new vector + std::copy(previous.ultra_ops[i].begin(), previous.ultra_ops[i].end(), current_ultra_op.begin()); + // Copy the ultra ops from current queue after the ones from the previous queue (concatenate them) + std::copy(ultra_ops[i].begin(), + ultra_ops[i].end(), + std::next(current_ultra_op.begin(), static_cast(previous.ultra_ops[i].size()))); + // Swap storage + ultra_ops[i].swap(current_ultra_op); + } + // Update sizes + current_ultra_ops_size += previous.ultra_ops[0].size(); + previous_ultra_ops_size += previous.ultra_ops[0].size(); + // Update commitments + ultra_ops_commitments = previous.ultra_ops_commitments; + previous_ultra_ops_commitments = previous.previous_ultra_ops_commitments; + } + + /** + * @brief Enable using std::swap on queues + * + */ + friend void swap(ECCOpQueue& lhs, ECCOpQueue& rhs) + { + // Swap vectors + lhs.raw_ops.swap(rhs.raw_ops); + for (size_t i = 0; i < 4; i++) { + lhs.ultra_ops[i].swap(rhs.ultra_ops[i]); + } + // Swap sizes + size_t temp = lhs.current_ultra_ops_size; + lhs.current_ultra_ops_size = rhs.current_ultra_ops_size; + rhs.current_ultra_ops_size = temp; + temp = lhs.previous_ultra_ops_size; + lhs.previous_ultra_ops_size = rhs.previous_ultra_ops_size; + rhs.previous_ultra_ops_size = temp; + // Swap commitments + auto commit_temp = lhs.previous_ultra_ops_commitments; + lhs.previous_ultra_ops_commitments = rhs.previous_ultra_ops_commitments; + rhs.previous_ultra_ops_commitments = commit_temp; + commit_temp = lhs.ultra_ops_commitments; + lhs.ultra_ops_commitments = rhs.ultra_ops_commitments; + rhs.ultra_ops_commitments = commit_temp; + } + /** * @brief Set the current and previous size of the ultra_ops transcript * diff --git a/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.test.cpp b/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.test.cpp index 7b13cce43f3..d7a69547f1b 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.test.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.test.cpp @@ -37,3 +37,49 @@ TEST(ECCOpQueueTest, InternalAccumulatorCorrectness) // Adding an equality op should reset the accumulator to zero (the point at infinity) EXPECT_TRUE(op_queue.get_accumulator().is_point_at_infinity()); } + +TEST(ECCOpQueueTest, PrependAndSwapTests) +{ + using point = g1::affine_element; + using scalar = fr; + + // Compute a simple point accumulation natively + auto P1 = point::random_element(); + auto P2 = point::random_element(); + auto z = scalar::random_element(); + + // Add operations to a + ECCOpQueue op_queue_a; + op_queue_a.add_accumulate(P1 + P1); + op_queue_a.mul_accumulate(P2, z + z); + + // Add different operations to b + ECCOpQueue op_queue_b; + op_queue_b.mul_accumulate(P2, z); + op_queue_b.add_accumulate(P1); + + // Add same operations as to a + ECCOpQueue op_queue_c; + op_queue_c.add_accumulate(P1 + P1); + op_queue_c.mul_accumulate(P2, z + z); + + // Swap b with a + std::swap(op_queue_b, op_queue_a); + + // Check b==c + for (size_t i = 0; i < op_queue_c.raw_ops.size(); i++) { + EXPECT_EQ(op_queue_b.raw_ops[i], op_queue_c.raw_ops[i]); + } + + // Prepend b to a + op_queue_a.prepend_previous_queue(op_queue_b); + + // Append same operations as now in a to c + op_queue_c.mul_accumulate(P2, z); + op_queue_c.add_accumulate(P1); + + // Check a==c + for (size_t i = 0; i < op_queue_c.raw_ops.size(); i++) { + EXPECT_EQ(op_queue_a.raw_ops[i], op_queue_c.raw_ops[i]); + } +} \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp index 6a2257bf102..649eee60317 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/plookup_tables.cpp @@ -1,6 +1,6 @@ #include "plookup_tables.hpp" #include "barretenberg/common/constexpr_utils.hpp" - +#include namespace bb::plookup { using namespace bb; @@ -10,10 +10,21 @@ namespace { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) std::array MULTI_TABLES; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -bool inited = false; +bool initialised = false; +#ifndef NO_MULTITHREADING +// The multitables initialisation procedure is not thread-sage, so we need to make sure only 1 thread gets to initialize +// them. +std::mutex multi_table_mutex; +#endif void init_multi_tables() { +#ifndef NO_MULTITHREADING + std::unique_lock lock(multi_table_mutex); +#endif + if (initialised) { + return; + } MULTI_TABLES[MultiTableId::SHA256_CH_INPUT] = sha256_tables::get_choose_input_table(MultiTableId::SHA256_CH_INPUT); MULTI_TABLES[MultiTableId::SHA256_MAJ_INPUT] = sha256_tables::get_majority_input_table(MultiTableId::SHA256_MAJ_INPUT); @@ -96,14 +107,14 @@ void init_multi_tables() keccak_tables::Rho<8, i>::get_rho_output_table(MultiTableId::KECCAK_NORMALIZE_AND_ROTATE); }); MULTI_TABLES[MultiTableId::HONK_DUMMY_MULTI] = dummy_tables::get_honk_dummy_multitable(); + initialised = true; } } // namespace - const MultiTable& create_table(const MultiTableId id) { - if (!inited) { + if (!initialised) { init_multi_tables(); - inited = true; + initialised = true; } return MULTI_TABLES[id]; }