From 37d7cd784bc6dfe366d1eabc2b7be8cca4359f7b Mon Sep 17 00:00:00 2001 From: Lucas Xia Date: Thu, 14 Nov 2024 12:40:27 -0500 Subject: [PATCH 1/5] feat: split up eccvm proof into two proofs (#9914) Splits the IPA proof from the rest of the ECCVM proof. We want the IPA proof to be separate from the rest of the ECCVM proof so we don't have to run IPA accumulation in the tube and base rollup circuits. --- .../benchmark/goblin_bench/eccvm.bench.cpp | 2 +- .../commitment_schemes/ipa/ipa.hpp | 1 + .../eccvm/eccvm_composer.test.cpp | 4 +- .../src/barretenberg/eccvm/eccvm_flavor.hpp | 65 +++++++++++++------ .../src/barretenberg/eccvm/eccvm_prover.cpp | 14 ++-- .../src/barretenberg/eccvm/eccvm_prover.hpp | 11 ++-- .../eccvm/eccvm_transcript.test.cpp | 42 +++++++++--- .../src/barretenberg/eccvm/eccvm_verifier.cpp | 7 +- .../src/barretenberg/eccvm/eccvm_verifier.hpp | 3 +- .../cpp/src/barretenberg/goblin/types.hpp | 5 +- .../honk/proof_system/types/proof.hpp | 6 ++ .../eccvm_recursive_verifier.cpp | 8 ++- .../eccvm_recursive_verifier.hpp | 3 +- .../eccvm_recursive_verifier.test.cpp | 4 +- .../goblin_recursive_verifier.test.cpp | 2 +- 15 files changed, 121 insertions(+), 56 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/goblin_bench/eccvm.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/goblin_bench/eccvm.bench.cpp index f796d893068..af7ca5ea43d 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/goblin_bench/eccvm.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/goblin_bench/eccvm.bench.cpp @@ -61,7 +61,7 @@ void eccvm_prove(State& state) noexcept Builder builder = generate_trace(target_num_gates); ECCVMProver prover(builder); for (auto _ : state) { - auto proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); }; } diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp index 4ba2091dccf..3a0afa05a06 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp @@ -142,6 +142,7 @@ template class IPA { size_t poly_length = polynomial.size(); + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1150): Hash more things here. // Step 1. // Send polynomial degree + 1 = d to the verifier transcript->send_to_verifier("IPA:poly_degree_plus_1", static_cast(poly_length)); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_composer.test.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_composer.test.cpp index 4fdd99e5610..364c602b7d2 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_composer.test.cpp @@ -63,7 +63,7 @@ TEST_F(ECCVMTests, BaseCase) { ECCVMCircuitBuilder builder = generate_circuit(&engine); ECCVMProver prover(builder); - HonkProof proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); ECCVMVerifier verifier(prover.key); bool verified = verifier.verify_proof(proof); @@ -79,7 +79,7 @@ TEST_F(ECCVMTests, EqFails) builder.op_queue->num_transcript_rows++; ECCVMProver prover(builder); - auto proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); ECCVMVerifier verifier(prover.key); bool verified = verifier.verify_proof(proof); ASSERT_FALSE(verified); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp index 778b43551f4..a636fbb5452 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp @@ -951,11 +951,6 @@ class ECCVMFlavor { FF translation_eval_z1; FF translation_eval_z2; Commitment shplonk_q2_comm; - uint32_t ipa_poly_degree; - std::vector ipa_l_comms; - std::vector ipa_r_comms; - Commitment ipa_G_0_eval; - FF ipa_a_0_eval; Transcript() = default; @@ -1188,20 +1183,6 @@ class ECCVMFlavor { NativeTranscript::template deserialize_from_buffer(NativeTranscript::proof_data, num_frs_read); shplonk_q2_comm = NativeTranscript::template deserialize_from_buffer(proof_data, num_frs_read); - - ipa_poly_degree = NativeTranscript::template deserialize_from_buffer(NativeTranscript::proof_data, - num_frs_read); - - for (size_t i = 0; i < CONST_ECCVM_LOG_N; ++i) { - ipa_l_comms.emplace_back(NativeTranscript::template deserialize_from_buffer( - NativeTranscript::proof_data, num_frs_read)); - ipa_r_comms.emplace_back(NativeTranscript::template deserialize_from_buffer( - NativeTranscript::proof_data, num_frs_read)); - } - ipa_G_0_eval = NativeTranscript::template deserialize_from_buffer(NativeTranscript::proof_data, - num_frs_read); - ipa_a_0_eval = - NativeTranscript::template deserialize_from_buffer(NativeTranscript::proof_data, num_frs_read); } void serialize_full_transcript() @@ -1344,6 +1325,52 @@ class ECCVMFlavor { NativeTranscript::template serialize_to_buffer(shplonk_q2_comm, NativeTranscript::proof_data); + ASSERT(NativeTranscript::proof_data.size() == old_proof_length); + } + }; + + /** + * @brief Derived class that defines proof structure for ECCVM IPA proof, as well as supporting functions. + * + */ + class IPATranscript : public NativeTranscript { + public: + uint32_t ipa_poly_degree; + std::vector ipa_l_comms; + std::vector ipa_r_comms; + Commitment ipa_G_0_eval; + FF ipa_a_0_eval; + + IPATranscript() = default; + + IPATranscript(const HonkProof& proof) + : NativeTranscript(proof) + {} + + void deserialize_full_transcript() + { + // take current proof and put them into the struct + size_t num_frs_read = 0; + ipa_poly_degree = NativeTranscript::template deserialize_from_buffer(NativeTranscript::proof_data, + num_frs_read); + + for (size_t i = 0; i < CONST_ECCVM_LOG_N; ++i) { + ipa_l_comms.emplace_back(NativeTranscript::template deserialize_from_buffer( + NativeTranscript::proof_data, num_frs_read)); + ipa_r_comms.emplace_back(NativeTranscript::template deserialize_from_buffer( + NativeTranscript::proof_data, num_frs_read)); + } + ipa_G_0_eval = NativeTranscript::template deserialize_from_buffer(NativeTranscript::proof_data, + num_frs_read); + ipa_a_0_eval = + NativeTranscript::template deserialize_from_buffer(NativeTranscript::proof_data, num_frs_read); + } + + void serialize_full_transcript() + { + size_t old_proof_length = NativeTranscript::proof_data.size(); + NativeTranscript::proof_data.clear(); + NativeTranscript::template serialize_to_buffer(ipa_poly_degree, NativeTranscript::proof_data); for (size_t i = 0; i < CONST_ECCVM_LOG_N; ++i) { NativeTranscript::template serialize_to_buffer(ipa_l_comms[i], NativeTranscript::proof_data); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp index f16243fbb1c..99f1350f920 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp @@ -12,8 +12,11 @@ namespace bb { -ECCVMProver::ECCVMProver(CircuitBuilder& builder, const std::shared_ptr& transcript) +ECCVMProver::ECCVMProver(CircuitBuilder& builder, + const std::shared_ptr& transcript, + const std::shared_ptr& ipa_transcript) : transcript(transcript) + , ipa_transcript(ipa_transcript) { PROFILE_THIS_NAME("ECCVMProver(CircuitBuilder&)"); @@ -183,7 +186,7 @@ void ECCVMProver::execute_pcs_rounds() const OpeningClaim batch_opening_claim = Shplonk::prove(key->commitment_key, opening_claims, transcript); // Compute the opening proof for the batched opening claim with the univariate PCS - PCS::compute_opening_proof(key->commitment_key, batch_opening_claim, transcript); + PCS::compute_opening_proof(key->commitment_key, batch_opening_claim, ipa_transcript); // Produce another challenge passed as input to the translator verifier translation_batching_challenge_v = transcript->template get_challenge("Translation:batching_challenge"); @@ -191,13 +194,12 @@ void ECCVMProver::execute_pcs_rounds() vinfo("computed opening proof"); } -HonkProof ECCVMProver::export_proof() +ECCVMProof ECCVMProver::export_proof() { - proof = transcript->export_proof(); - return proof; + return { transcript->export_proof(), ipa_transcript->export_proof() }; } -HonkProof ECCVMProver::construct_proof() +ECCVMProof ECCVMProver::construct_proof() { PROFILE_THIS_NAME("ECCVMProver::construct_proof"); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp index 88b58903803..dab9d015ab2 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp @@ -27,7 +27,8 @@ class ECCVMProver { using CircuitBuilder = typename Flavor::CircuitBuilder; explicit ECCVMProver(CircuitBuilder& builder, - const std::shared_ptr& transcript = std::make_shared()); + const std::shared_ptr& transcript = std::make_shared(), + const std::shared_ptr& ipa_transcript = std::make_shared()); BB_PROFILE void execute_preamble_round(); BB_PROFILE void execute_wire_commitments_round(); @@ -37,10 +38,11 @@ class ECCVMProver { BB_PROFILE void execute_pcs_rounds(); BB_PROFILE void execute_transcript_consistency_univariate_opening_round(); - HonkProof export_proof(); - HonkProof construct_proof(); + ECCVMProof export_proof(); + ECCVMProof construct_proof(); std::shared_ptr transcript; + std::shared_ptr ipa_transcript; TranslationEvaluations translation_evaluations; @@ -62,9 +64,6 @@ class ECCVMProver { FF translation_batching_challenge_v; // to be rederived by the translator verifier SumcheckOutput sumcheck_output; - - private: - HonkProof proof; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp index 79d85d60929..f562f36c6f8 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp @@ -205,6 +205,19 @@ class ECCVMTranscriptTests : public ::testing::Test { manifest_expected.add_challenge(round, "Shplonk:z"); round++; + manifest_expected.add_challenge(round, "Translation:batching_challenge"); + + return manifest_expected; + } + + TranscriptManifest construct_eccvm_ipa_manifest() + { + TranscriptManifest manifest_expected; + // Size of types is number of bb::frs needed to represent the type + size_t frs_per_Fr = bb::field_conversion::calc_num_bn254_frs(); + size_t frs_per_G = bb::field_conversion::calc_num_bn254_frs(); + size_t frs_per_uint32 = bb::field_conversion::calc_num_bn254_frs(); + size_t round = 0; manifest_expected.add_entry(round, "IPA:poly_degree_plus_1", frs_per_uint32); manifest_expected.add_challenge(round, "IPA:generator_challenge"); @@ -220,8 +233,6 @@ class ECCVMTranscriptTests : public ::testing::Test { round++; manifest_expected.add_entry(round, "IPA:G_0", frs_per_G); manifest_expected.add_entry(round, "IPA:a_0", frs_per_Fr); - manifest_expected.add_challenge(round, "Translation:batching_challenge"); - return manifest_expected; } @@ -271,7 +282,7 @@ TEST_F(ECCVMTranscriptTests, ProverManifestConsistency) // Automatically generate a transcript manifest by constructing a proof ECCVMProver prover(builder); - auto proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); // Check that the prover generated manifest agrees with the manifest hard coded in this suite auto manifest_expected = this->construct_eccvm_honk_manifest(prover.key->circuit_size); @@ -281,6 +292,15 @@ TEST_F(ECCVMTranscriptTests, ProverManifestConsistency) for (size_t round = 0; round < manifest_expected.size(); ++round) { ASSERT_EQ(prover_manifest[round], manifest_expected[round]) << "Prover manifest discrepency in round " << round; } + + auto ipa_manifest_expected = this->construct_eccvm_ipa_manifest(); + auto prover_ipa_manifest = prover.ipa_transcript->get_manifest(); + + // Note: a manifest can be printed using manifest.print() + for (size_t round = 0; round < ipa_manifest_expected.size(); ++round) { + ASSERT_EQ(prover_ipa_manifest[round], ipa_manifest_expected[round]) + << "IPA prover manifest discrepency in round " << round; + } } /** @@ -295,7 +315,7 @@ TEST_F(ECCVMTranscriptTests, VerifierManifestConsistency) // Automatically generate a transcript manifest in the prover by constructing a proof ECCVMProver prover(builder); - auto proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); // Automatically generate a transcript manifest in the verifier by verifying a proof ECCVMVerifier verifier(prover.key); @@ -347,24 +367,28 @@ TEST_F(ECCVMTranscriptTests, StructureTest) // Automatically generate a transcript manifest by constructing a proof ECCVMProver prover(builder); - auto proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); ECCVMVerifier verifier(prover.key); EXPECT_TRUE(verifier.verify_proof(proof)); // try deserializing and serializing with no changes and check proof is still valid prover.transcript->deserialize_full_transcript(); prover.transcript->serialize_full_transcript(); - EXPECT_TRUE( - verifier.verify_proof(prover.transcript->proof_data)); // we have changed nothing so proof is still valid + EXPECT_TRUE(verifier.verify_proof( + { prover.transcript->proof_data, + prover.ipa_transcript->proof_data })); // we have changed nothing so proof is still valid typename Flavor::Commitment one_group_val = Flavor::Commitment::one(); auto rand_val = Flavor::FF::random_element(); prover.transcript->transcript_Px_comm = one_group_val * rand_val; // choose random object to modify EXPECT_TRUE(verifier.verify_proof( - prover.transcript->proof_data)); // we have not serialized it back to the proof so it should still be fine + { prover.transcript->proof_data, prover.ipa_transcript->proof_data })); // we have not serialized it back to the + // proof so it should still be fine prover.transcript->serialize_full_transcript(); - EXPECT_FALSE(verifier.verify_proof(prover.transcript->proof_data)); // the proof is now wrong after serializing it + EXPECT_FALSE( + verifier.verify_proof({ prover.transcript->proof_data, + prover.ipa_transcript->proof_data })); // the proof is now wrong after serializing it prover.transcript->deserialize_full_transcript(); EXPECT_EQ(static_cast(prover.transcript->transcript_Px_comm), diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp index 7924f578387..210b9ee6ac4 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp @@ -8,7 +8,7 @@ namespace bb { /** * @brief This function verifies an ECCVM Honk proof for given program settings. */ -bool ECCVMVerifier::verify_proof(const HonkProof& proof) +bool ECCVMVerifier::verify_proof(const ECCVMProof& proof) { using Curve = typename Flavor::Curve; using Shplemini = ShpleminiVerifier_; @@ -16,7 +16,8 @@ bool ECCVMVerifier::verify_proof(const HonkProof& proof) using OpeningClaim = OpeningClaim; RelationParameters relation_parameters; - transcript = std::make_shared(proof); + transcript = std::make_shared(proof.pre_ipa_proof); + ipa_transcript = std::make_shared(proof.ipa_proof); VerifierCommitments commitments{ key }; CommitmentLabels commitment_labels; @@ -127,7 +128,7 @@ bool ECCVMVerifier::verify_proof(const HonkProof& proof) Shplonk::reduce_verification(key->pcs_verification_key->get_g1_identity(), opening_claims, transcript); const bool batched_opening_verified = - PCS::reduce_verify(key->pcs_verification_key, batch_opening_claim, transcript); + PCS::reduce_verify(key->pcs_verification_key, batch_opening_claim, ipa_transcript); vinfo("eccvm sumcheck verified?: ", sumcheck_verified.value()); vinfo("batch opening verified?: ", batched_opening_verified); return sumcheck_verified.value() && batched_opening_verified; diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp index 10e49d08a76..c772d99b517 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.hpp @@ -22,10 +22,11 @@ class ECCVMVerifier { explicit ECCVMVerifier(const std::shared_ptr& proving_key) : ECCVMVerifier(std::make_shared(proving_key)){}; - bool verify_proof(const HonkProof& proof); + bool verify_proof(const ECCVMProof& proof); std::shared_ptr key; std::map commitments; std::shared_ptr transcript; + std::shared_ptr ipa_transcript; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/goblin/types.hpp b/barretenberg/cpp/src/barretenberg/goblin/types.hpp index c9c13d81e93..ab3f772d17d 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/types.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/types.hpp @@ -15,13 +15,14 @@ struct GoblinProof { using FF = MegaFlavor::FF; HonkProof merge_proof; - HonkProof eccvm_proof; + ECCVMProof eccvm_proof; HonkProof translator_proof; ECCVMProver::TranslationEvaluations translation_evaluations; size_t size() const { - return merge_proof.size() + eccvm_proof.size() + translator_proof.size() + TranslationEvaluations::size(); + return merge_proof.size() + eccvm_proof.pre_ipa_proof.size() + eccvm_proof.ipa_proof.size() + + translator_proof.size() + TranslationEvaluations::size(); }; MSGPACK_FIELDS(merge_proof, eccvm_proof, translator_proof, translation_evaluations); diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/types/proof.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/types/proof.hpp index 297c5c2abbc..2f1777adb90 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/types/proof.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/types/proof.hpp @@ -12,6 +12,12 @@ static constexpr size_t HONK_PROOF_PUBLIC_INPUT_OFFSET = 3; static constexpr size_t PUBLIC_INPUTS_SIZE_INDEX = 1; using HonkProof = std::vector; // this can be fr? +struct ECCVMProof { + HonkProof pre_ipa_proof; + HonkProof ipa_proof; + + MSGPACK_FIELDS(pre_ipa_proof, ipa_proof); +}; template using StdlibProof = std::vector>; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp index e6f677aeb3b..869e11b4920 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp @@ -16,7 +16,7 @@ ECCVMRecursiveVerifier_::ECCVMRecursiveVerifier_( /** * @brief This function verifies an ECCVM Honk proof for given program settings up to sumcheck. */ -template void ECCVMRecursiveVerifier_::verify_proof(const HonkProof& proof) +template void ECCVMRecursiveVerifier_::verify_proof(const ECCVMProof& proof) { using Curve = typename Flavor::Curve; using Shplemini = ShpleminiVerifier_; @@ -25,8 +25,10 @@ template void ECCVMRecursiveVerifier_::verify_proof(co RelationParameters relation_parameters; - StdlibProof stdlib_proof = bb::convert_proof_to_witness(builder, proof); + StdlibProof stdlib_proof = bb::convert_proof_to_witness(builder, proof.pre_ipa_proof); + StdlibProof stdlib_ipa_proof = bb::convert_proof_to_witness(builder, proof.ipa_proof); transcript = std::make_shared(stdlib_proof); + ipa_transcript = std::make_shared(stdlib_ipa_proof); VerifierCommitments commitments{ key }; CommitmentLabels commitment_labels; @@ -140,7 +142,7 @@ template void ECCVMRecursiveVerifier_::verify_proof(co // TODO(https://github.com/AztecProtocol/barretenberg/issues/1142): Handle this return value correctly. const typename PCS::VerifierAccumulator batched_opening_accumulator = - PCS::reduce_verify(batch_opening_claim, transcript); + PCS::reduce_verify(batch_opening_claim, ipa_transcript); ASSERT(sumcheck_verified); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp index 6a590ebba40..58103e9644b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.hpp @@ -21,11 +21,12 @@ template class ECCVMRecursiveVerifier_ { const std::shared_ptr& native_verifier_key); // TODO(https://github.com/AztecProtocol/barretenberg/issues/991): switch recursive verifiers to StdlibProof - void verify_proof(const HonkProof& proof); + void verify_proof(const ECCVMProof& proof); std::shared_ptr key; Builder* builder; std::shared_ptr transcript; + std::shared_ptr ipa_transcript; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp index 1c2d82719aa..0aae8567845 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.test.cpp @@ -77,7 +77,7 @@ template class ECCVMRecursiveTests : public ::testing { InnerBuilder builder = generate_circuit(&engine); InnerProver prover(builder); - auto proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); auto verification_key = std::make_shared(prover.key); info("ECCVM Recursive Verifier"); @@ -128,7 +128,7 @@ template class ECCVMRecursiveTests : public ::testing InnerBuilder builder = generate_circuit(&engine); builder.op_queue->add_erroneous_equality_op_for_testing(); InnerProver prover(builder); - auto proof = prover.construct_proof(); + ECCVMProof proof = prover.construct_proof(); auto verification_key = std::make_shared(prover.key); OuterBuilder outer_circuit; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp index 1449b0695b4..80ee773f2df 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/goblin_verifier/goblin_recursive_verifier.test.cpp @@ -112,7 +112,7 @@ TEST_F(GoblinRecursiveVerifierTests, ECCVMFailure) auto [proof, verifier_input] = create_goblin_prover_output(); // Tamper with the ECCVM proof - for (auto& val : proof.eccvm_proof) { + for (auto& val : proof.eccvm_proof.pre_ipa_proof) { if (val > 0) { // tamper by finding the tenth non-zero value and incrementing it by 1 // tamper by finding the first non-zero value // and incrementing it by 1 From e07cac7fee501a752d98ebf749f6cf31a3ff74af Mon Sep 17 00:00:00 2001 From: iakovenkos <105737703+iakovenkos@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:45:51 +0100 Subject: [PATCH 2/5] feat: removed redundant scalar muls from the verifiers using shplemini (#9392) * Reduced the number of scalar multiplications to be performed by the native and recursive verifiers running shplemini * Slightly re-shuffled the entities in Translator and ECCVM, so that entitied to be shifted and their shifts form contiguous ranges * This is useful for amortizing the verification costs in the case of ZK sumcheck * The Translator recursive verifier circuit is now around 820K gates as opposed to 1700K. For other Flavors, the numbers are not as dramatic, but there's still around -10% in scalar muls and the sizes of recursive verifiers. --- .../commitment_schemes/ipa/ipa.test.cpp | 82 +++++++ .../commitment_schemes/kzg/kzg.test.cpp | 95 ++++++++ .../commitment_schemes/shplonk/shplemini.hpp | 94 +++++++- .../shplonk/shplemini.test.cpp | 1 + .../shplemini.test.cpp | 16 +- .../src/barretenberg/eccvm/eccvm_flavor.hpp | 184 ++++++++------- .../eccvm/eccvm_transcript.test.cpp | 50 ++-- .../src/barretenberg/eccvm/eccvm_verifier.cpp | 1 + .../flavor/repeated_commitments_data.hpp | 36 +++ .../eccvm_verifier/eccvm_recursive_flavor.hpp | 2 +- .../eccvm_recursive_verifier.cpp | 2 + .../decider_recursive_verifier.cpp | 17 +- .../ultra_recursive_verifier.cpp | 2 + .../translator_recursive_flavor.hpp | 2 + .../translator_recursive_verifier.cpp | 4 + .../stdlib_circuit_builders/mega_flavor.hpp | 13 +- .../mega_recursive_flavor.hpp | 3 + .../stdlib_circuit_builders/ultra_flavor.hpp | 17 +- .../ultra_recursive_flavor.hpp | 2 + .../translator_circuit_builder.hpp | 31 +-- .../translator_vm/translator_flavor.hpp | 218 ++++++++++-------- .../translator_vm/translator_verifier.cpp | 2 + .../ultra_honk/decider_verifier.cpp | 1 + 23 files changed, 634 insertions(+), 241 deletions(-) create mode 100644 barretenberg/cpp/src/barretenberg/flavor/repeated_commitments_data.hpp diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp index 95d3ac76d03..3e40157661a 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp @@ -330,3 +330,85 @@ TEST_F(IPATest, ShpleminiIPAWithShift) EXPECT_EQ(result, true); } +/** + * @brief Test the behaviour of the method ShpleminiVerifier::remove_shifted_commitments + * + */ +TEST_F(IPATest, ShpleminiIPAShiftsRemoval) +{ + using IPA = IPA; + using ShplonkProver = ShplonkProver_; + using ShpleminiVerifier = ShpleminiVerifier_; + using GeminiProver = GeminiProver_; + + const size_t n = 8; + const size_t log_n = 3; + + // Generate multilinear polynomials, their commitments (genuine and mocked) and evaluations (genuine) at a random + // point. + auto mle_opening_point = this->random_evaluation_point(log_n); // sometimes denoted 'u' + auto poly1 = Polynomial::random(n); + auto poly2 = Polynomial::random(n, /*shiftable*/ 1); + auto poly3 = Polynomial::random(n, /*shiftable*/ 1); + auto poly4 = Polynomial::random(n); + + Commitment commitment1 = this->commit(poly1); + Commitment commitment2 = this->commit(poly2); + Commitment commitment3 = this->commit(poly3); + Commitment commitment4 = this->commit(poly4); + + std::vector unshifted_commitments = { commitment1, commitment2, commitment3, commitment4 }; + std::vector shifted_commitments = { commitment2, commitment3 }; + auto eval1 = poly1.evaluate_mle(mle_opening_point); + auto eval2 = poly2.evaluate_mle(mle_opening_point); + auto eval3 = poly3.evaluate_mle(mle_opening_point); + auto eval4 = poly4.evaluate_mle(mle_opening_point); + + auto eval2_shift = poly2.evaluate_mle(mle_opening_point, true); + auto eval3_shift = poly3.evaluate_mle(mle_opening_point, true); + + auto prover_transcript = NativeTranscript::prover_init_empty(); + + // Run the full prover PCS protocol: + + // Compute: + // - (d+1) opening pairs: {r, \hat{a}_0}, {-r^{2^i}, a_i}, i = 0, ..., d-1 + // - (d+1) Fold polynomials Fold_{r}^(0), Fold_{-r}^(0), and Fold^(i), i = 0, ..., d-1 + auto prover_opening_claims = GeminiProver::prove(n, + RefArray{ poly1, poly2, poly3, poly4 }, + RefArray{ poly2, poly3 }, + mle_opening_point, + this->ck(), + prover_transcript); + + const auto opening_claim = ShplonkProver::prove(this->ck(), prover_opening_claims, prover_transcript); + IPA::compute_opening_proof(this->ck(), opening_claim, prover_transcript); + + // the index of the first commitment to a polynomial to be shifted in the union of unshifted_commitments and + // shifted_commitments. in our case, it is poly2 + const size_t to_be_shifted_commitments_start = 1; + // the index of the first commitment to a shifted polynomial in the union of unshifted_commitments and + // shifted_commitments. in our case, it is the second occurence of poly2 + const size_t shifted_commitments_start = 4; + // number of shifted polynomials + const size_t num_shifted_commitments = 2; + const RepeatedCommitmentsData repeated_commitments = + RepeatedCommitmentsData(to_be_shifted_commitments_start, shifted_commitments_start, num_shifted_commitments); + // since commitments to poly2, poly3 and their shifts are the same group elements, we simply combine the scalar + // multipliers of commitment2 and commitment3 in one place and remove the entries of the commitments and scalars + // vectors corresponding to the "shifted" commitment + auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript); + + auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(n, + RefVector(unshifted_commitments), + RefVector(shifted_commitments), + RefArray{ eval1, eval2, eval3, eval4 }, + RefArray{ eval2_shift, eval3_shift }, + mle_opening_point, + this->vk()->get_g1_identity(), + verifier_transcript, + repeated_commitments); + + auto result = IPA::reduce_verify_batch_opening_claim(batch_opening_claim, this->vk(), verifier_transcript); + EXPECT_EQ(result, true); +} diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp index 38d685c3a4a..a50c0a8919a 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp @@ -318,6 +318,7 @@ TYPED_TEST(KZGTest, ShpleminiKzgWithShiftAndConcatenation) mle_opening_point, this->vk()->get_g1_identity(), verifier_transcript, + {}, /* libra commitments = */ {}, /* libra evaluations = */ {}, to_vector_of_ref_vectors(concatenation_groups_commitments), @@ -327,5 +328,99 @@ TYPED_TEST(KZGTest, ShpleminiKzgWithShiftAndConcatenation) EXPECT_EQ(this->vk()->pairing_check(pairing_points[0], pairing_points[1]), true); } +TYPED_TEST(KZGTest, ShpleminiKzgShiftsRemoval) +{ + using ShplonkProver = ShplonkProver_; + using GeminiProver = GeminiProver_; + using ShpleminiVerifier = ShpleminiVerifier_; + using KZG = KZG; + using Fr = typename TypeParam::ScalarField; + using Commitment = typename TypeParam::AffineElement; + using Polynomial = typename bb::Polynomial; + + const size_t n = 16; + const size_t log_n = 4; + // Generate multilinear polynomials, their commitments (genuine and mocked) and evaluations (genuine) at a random + // point. + auto mle_opening_point = this->random_evaluation_point(log_n); // sometimes denoted 'u' + auto poly1 = Polynomial::random(n); + auto poly2 = Polynomial::random(n, 1); + auto poly3 = Polynomial::random(n, 1); + auto poly4 = Polynomial::random(n); + + Commitment commitment1 = this->commit(poly1); + Commitment commitment2 = this->commit(poly2); + Commitment commitment3 = this->commit(poly3); + Commitment commitment4 = this->commit(poly4); + std::vector unshifted_commitments = { commitment1, commitment2, commitment3, commitment4 }; + std::vector shifted_commitments = { commitment2, commitment3 }; + auto eval1 = poly1.evaluate_mle(mle_opening_point); + auto eval2 = poly2.evaluate_mle(mle_opening_point); + auto eval3 = poly3.evaluate_mle(mle_opening_point); + auto eval4 = poly4.evaluate_mle(mle_opening_point); + auto eval2_shift = poly2.evaluate_mle(mle_opening_point, true); + auto eval3_shift = poly3.evaluate_mle(mle_opening_point, true); + + // Collect multilinear evaluations for input to prover + // std::vector multilinear_evaluations = { eval1, eval2, eval3, eval4, eval2_shift, eval3_shift }; + + auto prover_transcript = NativeTranscript::prover_init_empty(); + + // Run the full prover PCS protocol: + + // Compute: + // - (d+1) opening pairs: {r, \hat{a}_0}, {-r^{2^i}, a_i}, i = 0, ..., d-1 + // - (d+1) Fold polynomials Fold_{r}^(0), Fold_{-r}^(0), and Fold^(i), i = 0, ..., d-1 + auto prover_opening_claims = GeminiProver::prove(n, + RefArray{ poly1, poly2, poly3, poly4 }, + RefArray{ poly2, poly3 }, + mle_opening_point, + this->ck(), + prover_transcript); + + // Shplonk prover output: + // - opening pair: (z_challenge, 0) + // - witness: polynomial Q - Q_z + const auto opening_claim = ShplonkProver::prove(this->ck(), prover_opening_claims, prover_transcript); + + // KZG prover: + // - Adds commitment [W] to transcript + KZG::compute_opening_proof(this->ck(), opening_claim, prover_transcript); + + // Run the full verifier PCS protocol with genuine opening claims (genuine commitment, genuine evaluation) + + auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript); + // the index of the first commitment to a polynomial to be shifted in the union of unshifted_commitments and + // shifted_commitments. in our case, it is poly2 + const size_t to_be_shifted_commitments_start = 1; + // the index of the first commitment to a shifted polynomial in the union of unshifted_commitments and + // shifted_commitments. in our case, it is the second occurence of poly2 + const size_t shifted_commitments_start = 4; + // number of shifted polynomials + const size_t num_shifted_commitments = 2; + // since commitments to poly2, poly3 and their shifts are the same group elements, we simply combine the scalar + // multipliers of commitment2 and commitment3 in one place and remove the entries of the commitments and scalars + // vectors corresponding to the "shifted" commitment + const RepeatedCommitmentsData repeated_commitments = + RepeatedCommitmentsData(to_be_shifted_commitments_start, shifted_commitments_start, num_shifted_commitments); + + // Gemini verifier output: + // - claim: d+1 commitments to Fold_{r}^(0), Fold_{-r}^(0), Fold^(l), d+1 evaluations a_0_pos, a_l, l = 0:d-1 + const auto batch_opening_claim = + ShpleminiVerifier::compute_batch_opening_claim(n, + RefVector(unshifted_commitments), + RefVector(shifted_commitments), + RefArray{ eval1, eval2, eval3, eval4 }, + RefArray{ eval2_shift, eval3_shift }, + mle_opening_point, + this->vk()->get_g1_identity(), + verifier_transcript, + repeated_commitments); + + const auto pairing_points = KZG::reduce_verify_batch_opening_claim(batch_opening_claim, verifier_transcript); + + // Final pairing check: e([Q] - [Q_z] + z[W], [1]_2) = e([W], [x]_2) + EXPECT_EQ(this->vk()->pairing_check(pairing_points[0], pairing_points[1]), true); +} } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp index 2045714d9c2..83ced38c1f4 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.hpp @@ -4,6 +4,7 @@ #include "barretenberg/commitment_schemes/gemini/gemini_impl.hpp" #include "barretenberg/commitment_schemes/shplonk/shplonk.hpp" #include "barretenberg/commitment_schemes/verification_key.hpp" +#include "barretenberg/flavor/repeated_commitments_data.hpp" #include "barretenberg/transcript/transcript.hpp" namespace bb { @@ -132,6 +133,7 @@ template class ShpleminiVerifier_ { const std::vector& multivariate_challenge, const Commitment& g1_identity, const std::shared_ptr& transcript, + const RepeatedCommitmentsData& repeated_commitments = {}, RefSpan libra_univariate_commitments = {}, const std::vector& libra_univariate_evaluations = {}, const std::vector>& concatenation_group_commitments = {}, @@ -288,6 +290,8 @@ template class ShpleminiVerifier_ { commitments.emplace_back(g1_identity); scalars.emplace_back(constant_term_accumulator); + remove_repeated_commitments(commitments, scalars, repeated_commitments, has_zk); + // For ZK flavors, the sumcheck output contains the evaluations of Libra univariates that submitted to the // ShpleminiVerifier, otherwise this argument is set to be empty if (has_zk) { @@ -493,13 +497,93 @@ template class ShpleminiVerifier_ { } } + /** + * @brief Combines scalars of repeating commitments to reduce the number of scalar multiplications performed by the + * verifier. + * + * @details The Shplemini verifier gets the access to multiple groups of commitments, some of which are duplicated + * because they correspond to polynomials whose shifts also evaluated or used in concatenation groups in + * Translator. This method combines the scalars associated with these repeating commitments, reducing the total + * number of scalar multiplications required during the verification. + * + * More specifically, the Shplemini verifier receives two or three groups of commitments: get_unshifted() and + * get_to_be_shifted() in the case of Ultra, Mega, and ECCVM Flavors; and get_unshifted_without_concatenated(), + * get_to_be_shifted(), and get_groups_to_be_concatenated() in the case of the TranslatorFlavor. The commitments are + * then placed in this specific order in a BatchOpeningClaim object containing a vector of commitments and a vector + * of scalars. The ranges with repeated commitments belong to the Flavors. This method iterates over these ranges + * and sums the scalar multipliers corresponding to the same group element. After combining the scalars, we erase + * corresponding entries in both vectors. + * + */ + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1151) Avoid erasing vector elements. + static void remove_repeated_commitments(std::vector& commitments, + std::vector& scalars, + const RepeatedCommitmentsData& repeated_commitments, + bool has_zk) + { + // We started populating commitments and scalars by adding Shplonk:Q commitmment and the corresponding scalar + // factor 1. In the case of ZK, we also added Gemini:masking_poly_comm before populating the vector with + // commitments to prover polynomials + const size_t offset = has_zk ? 2 : 1; + + // Extract the indices from the container, which is normally created in a given Flavor + const size_t& first_range_to_be_shifted_start = repeated_commitments.first_range_to_be_shifted_start + offset; + const size_t& first_range_shifted_start = repeated_commitments.first_range_shifted_start + offset; + const size_t& first_range_size = repeated_commitments.first_range_size; + + const size_t& second_range_to_be_shifted_start = repeated_commitments.second_range_to_be_shifted_start + offset; + const size_t& second_range_shifted_start = repeated_commitments.second_range_shifted_start + offset; + const size_t& second_range_size = repeated_commitments.second_range_size; + + // Iterate over the first range of to-be-shifted scalars and their shifted counterparts + for (size_t i = 0; i < first_range_size; i++) { + size_t idx_to_be_shifted = i + first_range_to_be_shifted_start; + size_t idx_shifted = i + first_range_shifted_start; + scalars[idx_to_be_shifted] = scalars[idx_to_be_shifted] + scalars[idx_shifted]; + } + + // Iterate over the second range of to-be-shifted precomputed scalars and their shifted counterparts (if + // provided) + for (size_t i = 0; i < second_range_size; i++) { + size_t idx_to_be_shifted = i + second_range_to_be_shifted_start; + size_t idx_shifted = i + second_range_shifted_start; + scalars[idx_to_be_shifted] = scalars[idx_to_be_shifted] + scalars[idx_shifted]; + } + + if (second_range_shifted_start > first_range_shifted_start) { + // Erase the shifted scalars and commitments from the second range (if provided) + for (size_t i = 0; i < second_range_size; ++i) { + scalars.erase(scalars.begin() + static_cast(second_range_shifted_start)); + commitments.erase(commitments.begin() + static_cast(second_range_shifted_start)); + } + + // Erase the shifted scalars and commitments from the first range + for (size_t i = 0; i < first_range_size; ++i) { + scalars.erase(scalars.begin() + static_cast(first_range_shifted_start)); + commitments.erase(commitments.begin() + static_cast(first_range_shifted_start)); + } + } else { + // Erase the shifted scalars and commitments from the first range + for (size_t i = 0; i < first_range_size; ++i) { + scalars.erase(scalars.begin() + static_cast(first_range_shifted_start)); + commitments.erase(commitments.begin() + static_cast(first_range_shifted_start)); + } + // Erase the shifted scalars and commitments from the second range (if provided) + for (size_t i = 0; i < second_range_size; ++i) { + scalars.erase(scalars.begin() + static_cast(second_range_shifted_start)); + commitments.erase(commitments.begin() + static_cast(second_range_shifted_start)); + } + } + } + /** * @brief Add the opening data corresponding to Libra masking univariates to the batched opening claim * * @details After verifying ZK Sumcheck, the verifier has to validate the claims about the evaluations of Libra - * univariates used to mask Sumcheck round univariates. To minimize the overhead of such openings, we continue the - * Shplonk batching started in Gemini, i.e. we add new claims multiplied by a suitable power of the Shplonk batching - * challenge and re-use the evaluation challenge sampled to prove the evaluations of Gemini polynomials. + * univariates used to mask Sumcheck round univariates. To minimize the overhead of such openings, we continue + * the Shplonk batching started in Gemini, i.e. we add new claims multiplied by a suitable power of the Shplonk + * batching challenge and re-use the evaluation challenge sampled to prove the evaluations of Gemini + * polynomials. * * @param commitments * @param scalars @@ -541,8 +625,8 @@ template class ShpleminiVerifier_ { if constexpr (!Curve::is_stdlib_type) { Fr::batch_invert(denominators); } - // add Libra commitments to the vector of commitments; compute corresponding scalars and the correction to the - // constant term + // add Libra commitments to the vector of commitments; compute corresponding scalars and the correction to + // the constant term for (const auto [libra_univariate_commitment, denominator, libra_univariate_evaluation] : zip_view(libra_univariate_commitments, denominators, libra_univariate_evaluations)) { commitments.push_back(std::move(libra_univariate_commitment)); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp index 3fab01e75c5..e3537c00a75 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp @@ -324,6 +324,7 @@ TYPED_TEST(ShpleminiTest, ShpleminiWithMaskingLibraUnivariates) mle_opening_point, this->vk()->get_g1_identity(), verifier_transcript, + {}, RefVector(libra_commitments), libra_evaluations); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp index 02e2de4e920..b94b0c95085 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes_recursion/shplemini.test.cpp @@ -130,14 +130,14 @@ TEST(ShpleminiRecursionTest, ProveAndVerifySingle) return zero; }); - auto opening_claim = ShpleminiVerifier::compute_batch_opening_claim(Fr::from_witness(&builder, N), - RefVector(stdlib_f_commitments), - RefVector(stdlib_g_commitments), - RefVector(stdlib_v_evaluations), - RefVector(stdlib_w_evaluations), - u_challenge_in_circuit, - Commitment::one(&builder), - stdlib_verifier_transcript); + const auto opening_claim = ShpleminiVerifier::compute_batch_opening_claim(Fr::from_witness(&builder, N), + RefVector(stdlib_f_commitments), + RefVector(stdlib_g_commitments), + RefVector(stdlib_v_evaluations), + RefVector(stdlib_w_evaluations), + u_challenge_in_circuit, + Commitment::one(&builder), + stdlib_verifier_transcript); auto pairing_points = KZG::reduce_verify_batch_opening_claim(opening_claim, stdlib_verifier_transcript); EXPECT_TRUE(CircuitChecker::check(builder)); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp index a636fbb5452..a44e827d90e 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp @@ -7,6 +7,7 @@ #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/flavor/relation_definitions.hpp" +#include "barretenberg/flavor/repeated_commitments_data.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/relations/ecc_vm/ecc_bools_relation.hpp" @@ -52,8 +53,19 @@ class ECCVMFlavor { static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 3; // The total number of witness entities not including shifts. static constexpr size_t NUM_WITNESS_ENTITIES = 87; + // The number of entities in ShiftedEntities. + static constexpr size_t NUM_SHIFTED_ENTITIES = 26; + // The number of entities in DerivedWitnessEntities that are not going to be shifted. + static constexpr size_t NUM_DERIVED_WITNESS_ENTITIES_NON_SHIFTED = 1; // The total number of witnesses including shifts and derived entities. - static constexpr size_t NUM_ALL_WITNESS_ENTITIES = 113; + static constexpr size_t NUM_ALL_WITNESS_ENTITIES = NUM_WITNESS_ENTITIES + NUM_SHIFTED_ENTITIES; + // A container to be fed to ShpleminiVerifier to avoid redundant scalar muls, the first number is the index of the + // first witness to be shifted. + static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = + RepeatedCommitmentsData(NUM_PRECOMPUTED_ENTITIES + NUM_WITNESS_ENTITIES - + NUM_DERIVED_WITNESS_ENTITIES_NON_SHIFTED - NUM_SHIFTED_ENTITIES, + NUM_PRECOMPUTED_ENTITIES + NUM_WITNESS_ENTITIES, + NUM_SHIFTED_ENTITIES); using GrandProductRelations = std::tuple>; // define the tuple of Relations that comprise the Sumcheck relation @@ -121,91 +133,91 @@ class ECCVMFlavor { template class WireEntities { public: DEFINE_FLAVOR_MEMBERS(DataType, - transcript_add, // column 0 - transcript_mul, // column 1 - transcript_eq, // column 2 - transcript_msm_transition, // column 3 - transcript_pc, // column 4 - transcript_msm_count, // column 5 - transcript_Px, // column 6 - transcript_Py, // column 7 - transcript_z1, // column 8 - transcript_z2, // column 9 - transcript_z1zero, // column 10 - transcript_z2zero, // column 11 - transcript_op, // column 12 - transcript_accumulator_x, // column 13 - transcript_accumulator_y, // column 14 - transcript_msm_x, // column 15 - transcript_msm_y, // column 16 - precompute_pc, // column 17 - precompute_point_transition, // column 18 - precompute_round, // column 19 - precompute_scalar_sum, // column 20 - precompute_s1hi, // column 21 - precompute_s1lo, // column 22 - precompute_s2hi, // column 23 - precompute_s2lo, // column 24 - precompute_s3hi, // column 25 - precompute_s3lo, // column 26 - precompute_s4hi, // column 27 - precompute_s4lo, // column 28 - precompute_skew, // column 29 - precompute_dx, // column 30 - precompute_dy, // column 31 - precompute_tx, // column 32 - precompute_ty, // column 33 - msm_transition, // column 34 - msm_add, // column 35 - msm_double, // column 36 - msm_skew, // column 37 - msm_accumulator_x, // column 38 - msm_accumulator_y, // column 39 - msm_pc, // column 40 - msm_size_of_msm, // column 41 - msm_count, // column 42 - msm_round, // column 43 - msm_add1, // column 44 - msm_add2, // column 45 - msm_add3, // column 46 - msm_add4, // column 47 - msm_x1, // column 48 - msm_y1, // column 49 - msm_x2, // column 50 - msm_y2, // column 51 - msm_x3, // column 52 - msm_y3, // column 53 - msm_x4, // column 54 - msm_y4, // column 55 - msm_collision_x1, // column 56 - msm_collision_x2, // column 57 - msm_collision_x3, // column 58 - msm_collision_x4, // column 59 - msm_lambda1, // column 60 - msm_lambda2, // column 61 - msm_lambda3, // column 62 - msm_lambda4, // column 63 - msm_slice1, // column 64 - msm_slice2, // column 65 - msm_slice3, // column 66 - msm_slice4, // column 67 - transcript_accumulator_empty, // column 68 - transcript_reset_accumulator, // column 69 - precompute_select, // column 70 - lookup_read_counts_0, // column 71 - lookup_read_counts_1, // column 72 - transcript_base_infinity, // column 73 - transcript_base_x_inverse, // column 74 - transcript_base_y_inverse, // column 75 - transcript_add_x_equal, // column 76 - transcript_add_y_equal, // column 77 - transcript_add_lambda, // column 78 - transcript_msm_intermediate_x, // column 79 - transcript_msm_intermediate_y, // column 80 - transcript_msm_infinity, // column 81 - transcript_msm_x_inverse, // column 82 - transcript_msm_count_zero_at_transition, // column 83 - transcript_msm_count_at_transition_inverse); // column 84 + transcript_add, // column 0 + transcript_eq, // column 1 + transcript_msm_transition, // column 2 + transcript_Px, // column 3 + transcript_Py, // column 4 + transcript_z1, // column 5 + transcript_z2, // column 6 + transcript_z1zero, // column 7 + transcript_z2zero, // column 8 + transcript_op, // column 9 + transcript_msm_x, // column 10 + transcript_msm_y, // column 11 + precompute_point_transition, // column 12 + precompute_s1lo, // column 13 + precompute_s2hi, // column 14 + precompute_s2lo, // column 15 + precompute_s3hi, // column 16 + precompute_s3lo, // column 17 + precompute_s4hi, // column 18 + precompute_s4lo, // column 19 + precompute_skew, // column 20 + msm_size_of_msm, // column 21 + msm_add2, // column 22 + msm_add3, // column 23 + msm_add4, // column 24 + msm_x1, // column 25 + msm_y1, // column 26 + msm_x2, // column 27 + msm_y2, // column 28 + msm_x3, // column 29 + msm_y3, // column 30 + msm_x4, // column 31 + msm_y4, // column 32 + msm_collision_x1, // column 33 + msm_collision_x2, // column 34 + msm_collision_x3, // column 35 + msm_collision_x4, // column 36 + msm_lambda1, // column 37 + msm_lambda2, // column 38 + msm_lambda3, // column 39 + msm_lambda4, // column 40 + msm_slice1, // column 41 + msm_slice2, // column 42 + msm_slice3, // column 43 + msm_slice4, // column 44 + transcript_reset_accumulator, // column 45 + lookup_read_counts_0, // column 46 + lookup_read_counts_1, // column 47 + transcript_base_infinity, // column 48 + transcript_base_x_inverse, // column 49 + transcript_base_y_inverse, // column 50 + transcript_add_x_equal, // column 51 + transcript_add_y_equal, // column 52 + transcript_add_lambda, // column 53 + transcript_msm_intermediate_x, // column 54 + transcript_msm_intermediate_y, // column 55 + transcript_msm_infinity, // column 56 + transcript_msm_x_inverse, // column 57 + transcript_msm_count_zero_at_transition, // column 58 + transcript_msm_count_at_transition_inverse, // column 59 + transcript_mul, // column 60 + transcript_msm_count, // column 61 + transcript_accumulator_x, // column 62 + transcript_accumulator_y, // column 63 + precompute_scalar_sum, // column 64 + precompute_s1hi, // column 65 + precompute_dx, // column 66 + precompute_dy, // column 67 + precompute_tx, // column 68 + precompute_ty, // column 69 + msm_transition, // column 70 + msm_add, // column 71 + msm_double, // column 72 + msm_skew, // column 73 + msm_accumulator_x, // column 74 + msm_accumulator_y, // column 75 + msm_count, // column 76 + msm_round, // column 77 + msm_add1, // column 78 + msm_pc, // column 79 + precompute_pc, // column 80 + transcript_pc, // column 81 + precompute_round, // column 82 + transcript_accumulator_empty, // column 83 + precompute_select) // column 84 }; /** diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp index f562f36c6f8..92d3ffc5f13 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp @@ -41,11 +41,8 @@ class ECCVMTranscriptTests : public ::testing::Test { size_t round = 0; manifest_expected.add_entry(round, "circuit_size", frs_per_uint32); manifest_expected.add_entry(round, "TRANSCRIPT_ADD", frs_per_G); - manifest_expected.add_entry(round, "TRANSCRIPT_MUL", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_EQ", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_MSM_TRANSITION", frs_per_G); - manifest_expected.add_entry(round, "TRANSCRIPT_PC", frs_per_G); - manifest_expected.add_entry(round, "TRANSCRIPT_MSM_COUNT", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_PX", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_PY", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_Z1", frs_per_G); @@ -53,15 +50,9 @@ class ECCVMTranscriptTests : public ::testing::Test { manifest_expected.add_entry(round, "TRANSCRIPT_Z1ZERO", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_Z2ZERO", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_OP", frs_per_G); - manifest_expected.add_entry(round, "TRANSCRIPT_ACCUMULATOR_X", frs_per_G); - manifest_expected.add_entry(round, "TRANSCRIPT_ACCUMULATOR_Y", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_MSM_X", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_MSM_Y", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_PC", frs_per_G); manifest_expected.add_entry(round, "PRECOMPUTE_POINT_TRANSITION", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_ROUND", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_SCALAR_SUM", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_S1HI", frs_per_G); manifest_expected.add_entry(round, "PRECOMPUTE_S1LO", frs_per_G); manifest_expected.add_entry(round, "PRECOMPUTE_S2HI", frs_per_G); manifest_expected.add_entry(round, "PRECOMPUTE_S2LO", frs_per_G); @@ -70,21 +61,7 @@ class ECCVMTranscriptTests : public ::testing::Test { manifest_expected.add_entry(round, "PRECOMPUTE_S4HI", frs_per_G); manifest_expected.add_entry(round, "PRECOMPUTE_S4LO", frs_per_G); manifest_expected.add_entry(round, "PRECOMPUTE_SKEW", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_DX", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_DY", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_TX", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_TY", frs_per_G); - manifest_expected.add_entry(round, "MSM_TRANSITION", frs_per_G); - manifest_expected.add_entry(round, "MSM_ADD", frs_per_G); - manifest_expected.add_entry(round, "MSM_DOUBLE", frs_per_G); - manifest_expected.add_entry(round, "MSM_SKEW", frs_per_G); - manifest_expected.add_entry(round, "MSM_ACCUMULATOR_X", frs_per_G); - manifest_expected.add_entry(round, "MSM_ACCUMULATOR_Y", frs_per_G); - manifest_expected.add_entry(round, "MSM_PC", frs_per_G); manifest_expected.add_entry(round, "MSM_SIZE_OF_MSM", frs_per_G); - manifest_expected.add_entry(round, "MSM_COUNT", frs_per_G); - manifest_expected.add_entry(round, "MSM_ROUND", frs_per_G); - manifest_expected.add_entry(round, "MSM_ADD1", frs_per_G); manifest_expected.add_entry(round, "MSM_ADD2", frs_per_G); manifest_expected.add_entry(round, "MSM_ADD3", frs_per_G); manifest_expected.add_entry(round, "MSM_ADD4", frs_per_G); @@ -108,9 +85,7 @@ class ECCVMTranscriptTests : public ::testing::Test { manifest_expected.add_entry(round, "MSM_SLICE2", frs_per_G); manifest_expected.add_entry(round, "MSM_SLICE3", frs_per_G); manifest_expected.add_entry(round, "MSM_SLICE4", frs_per_G); - manifest_expected.add_entry(round, "TRANSCRIPT_ACCUMULATOR_EMPTY", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_RESET_ACCUMULATOR", frs_per_G); - manifest_expected.add_entry(round, "PRECOMPUTE_SELECT", frs_per_G); manifest_expected.add_entry(round, "LOOKUP_READ_COUNTS_0", frs_per_G); manifest_expected.add_entry(round, "LOOKUP_READ_COUNTS_1", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_BASE_INFINITY", frs_per_G); @@ -125,6 +100,31 @@ class ECCVMTranscriptTests : public ::testing::Test { manifest_expected.add_entry(round, "TRANSCRIPT_MSM_X_INVERSE", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_MSM_COUNT_ZERO_AT_TRANSITION", frs_per_G); manifest_expected.add_entry(round, "TRANSCRIPT_MSM_COUNT_AT_TRANSITION_INVERSE", frs_per_G); + manifest_expected.add_entry(round, "TRANSCRIPT_MUL", frs_per_G); + manifest_expected.add_entry(round, "TRANSCRIPT_MSM_COUNT", frs_per_G); + manifest_expected.add_entry(round, "TRANSCRIPT_ACCUMULATOR_X", frs_per_G); + manifest_expected.add_entry(round, "TRANSCRIPT_ACCUMULATOR_Y", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_SCALAR_SUM", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_S1HI", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_DX", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_DY", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_TX", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_TY", frs_per_G); + manifest_expected.add_entry(round, "MSM_TRANSITION", frs_per_G); + manifest_expected.add_entry(round, "MSM_ADD", frs_per_G); + manifest_expected.add_entry(round, "MSM_DOUBLE", frs_per_G); + manifest_expected.add_entry(round, "MSM_SKEW", frs_per_G); + manifest_expected.add_entry(round, "MSM_ACCUMULATOR_X", frs_per_G); + manifest_expected.add_entry(round, "MSM_ACCUMULATOR_Y", frs_per_G); + manifest_expected.add_entry(round, "MSM_COUNT", frs_per_G); + manifest_expected.add_entry(round, "MSM_ROUND", frs_per_G); + manifest_expected.add_entry(round, "MSM_ADD1", frs_per_G); + manifest_expected.add_entry(round, "MSM_PC", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_PC", frs_per_G); + manifest_expected.add_entry(round, "TRANSCRIPT_PC", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_ROUND", frs_per_G); + manifest_expected.add_entry(round, "TRANSCRIPT_ACCUMULATOR_EMPTY", frs_per_G); + manifest_expected.add_entry(round, "PRECOMPUTE_SELECT", frs_per_G); manifest_expected.add_challenge(round, "beta", "gamma"); round++; diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp index 210b9ee6ac4..f4a10683335 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp @@ -80,6 +80,7 @@ bool ECCVMVerifier::verify_proof(const ECCVMProof& proof) multivariate_challenge, key->pcs_verification_key->get_g1_identity(), transcript, + Flavor::REPEATED_COMMITMENTS, RefVector(libra_commitments), libra_evaluations); diff --git a/barretenberg/cpp/src/barretenberg/flavor/repeated_commitments_data.hpp b/barretenberg/cpp/src/barretenberg/flavor/repeated_commitments_data.hpp new file mode 100644 index 00000000000..c5b697ded49 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/flavor/repeated_commitments_data.hpp @@ -0,0 +1,36 @@ +#pragma once +#include + +namespace bb { +struct RepeatedCommitmentsData { + size_t first_range_to_be_shifted_start = 0; + size_t first_range_shifted_start = 0; + size_t first_range_size = 0; + size_t second_range_to_be_shifted_start = 0; + size_t second_range_shifted_start = 0; + size_t second_range_size = 0; + + RepeatedCommitmentsData() = default; + // Constructor for a single range + constexpr RepeatedCommitmentsData(size_t first_to_be_shifted_start, size_t first_shifted_start, size_t first_size) + : first_range_to_be_shifted_start(first_to_be_shifted_start) + , first_range_shifted_start(first_shifted_start) + , first_range_size(first_size) + {} + + // Constructor for both ranges + constexpr RepeatedCommitmentsData(size_t first_to_be_shifted_start, + size_t first_shifted_start, + size_t first_size, + size_t second_to_be_shifted_start, + size_t second_shifted_start, + size_t second_size) + : first_range_to_be_shifted_start(first_to_be_shifted_start) + , first_range_shifted_start(first_shifted_start) + , first_range_size(first_size) + , second_range_to_be_shifted_start(second_to_be_shifted_start) + , second_range_shifted_start(second_shifted_start) + , second_range_size(second_size) + {} +}; +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp index f486752a9cb..637fe115c07 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp @@ -45,8 +45,8 @@ template class ECCVMRecursiveFlavor_ { static constexpr size_t NUM_PRECOMPUTED_ENTITIES = ECCVMFlavor::NUM_PRECOMPUTED_ENTITIES; // The total number of witness entities not including shifts. static constexpr size_t NUM_WITNESS_ENTITIES = ECCVMFlavor::NUM_WITNESS_ENTITIES; - static constexpr size_t NUM_ALL_WITNESS_ENTITIES = ECCVMFlavor::NUM_ALL_WITNESS_ENTITIES; + static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = ECCVMFlavor::REPEATED_COMMITMENTS; // define the tuple of Relations that comprise the Sumcheck relation // Reuse the Relations from ECCVM using Relations = ECCVMFlavor::Relations_; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp index 869e11b4920..dd1868f2cc6 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_verifier.cpp @@ -94,8 +94,10 @@ template void ECCVMRecursiveVerifier_::verify_proof(co multivariate_challenge, key->pcs_verification_key->get_g1_identity(), transcript, + Flavor::REPEATED_COMMITMENTS, RefVector(libra_commitments), libra_evaluations); + // Reduce the accumulator to a single opening claim const OpeningClaim multivariate_to_univariate_opening_claim = PCS::reduce_batch_opening_claim(sumcheck_batch_opening_claims); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp index a47470cf577..3bbb2611cfb 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp @@ -31,14 +31,15 @@ std::array DeciderRecursiveVerifier_:: sumcheck.verify(accumulator->relation_parameters, accumulator->alphas, accumulator->gate_challenges); // Execute Shplemini rounds. - auto opening_claim = Shplemini::compute_batch_opening_claim(accumulator->verification_key->circuit_size, - commitments.get_unshifted(), - commitments.get_to_be_shifted(), - claimed_evaluations.get_unshifted(), - claimed_evaluations.get_shifted(), - multivariate_challenge, - Commitment::one(builder), - transcript); + const auto opening_claim = Shplemini::compute_batch_opening_claim(accumulator->verification_key->circuit_size, + commitments.get_unshifted(), + commitments.get_to_be_shifted(), + claimed_evaluations.get_unshifted(), + claimed_evaluations.get_shifted(), + multivariate_challenge, + Commitment::one(builder), + transcript, + Flavor::REPEATED_COMMITMENTS); auto pairing_points = PCS::reduce_verify_batch_opening_claim(opening_claim, transcript); return pairing_points; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp index 5928b8a0bff..26c2297b41d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp @@ -117,8 +117,10 @@ UltraRecursiveVerifier_::AggregationObject UltraRecursiveVerifier_ class TranslatorRecursiveFlavor_ { // The total number of witness entities not including shifts. static constexpr size_t NUM_WITNESS_ENTITIES = NativeFlavor::NUM_WITNESS_ENTITIES; + static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = NativeFlavor::REPEATED_COMMITMENTS; + using Relations = TranslatorFlavor::Relations_; static constexpr size_t MAX_PARTIAL_RELATION_LENGTH = compute_max_partial_relation_length(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp index 1dbae562d7d..2849fdd48b5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp @@ -120,6 +120,9 @@ std::array TranslatorRecursiveVerifier_ opening_claim = Shplemini::compute_batch_opening_claim(circuit_size, commitments.get_unshifted_without_concatenated(), @@ -129,6 +132,7 @@ std::array TranslatorRecursiveVerifier_; // The total number of witnesses including shifts and derived entities. - static constexpr size_t NUM_ALL_WITNESS_ENTITIES = 23; + static constexpr size_t NUM_ALL_WITNESS_ENTITIES = NUM_WITNESS_ENTITIES + NUM_SHIFTED_WITNESSES; // For instances of this flavour, used in folding, we need a unique sumcheck batching challenges for each // subrelation. This @@ -272,6 +282,7 @@ class MegaFlavor { /** * @brief Class for ShiftedEntities, containing shifted witness and table polynomials. + * TODO: Remove NUM_SHIFTED_TABLES once these entities are deprecated. */ template class ShiftedTables { public: diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_recursive_flavor.hpp index 94ef17d6c8d..9fc58872fab 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_recursive_flavor.hpp @@ -68,6 +68,9 @@ template class MegaRecursiveFlavor_ { // random polynomial e.g. For \sum(x) [A(x) * B(x) + C(x)] * PowZeta(X), relation length = 2 and random relation // length = 3 static constexpr size_t BATCHED_RELATION_PARTIAL_LENGTH = MAX_PARTIAL_RELATION_LENGTH + 1; + + static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = MegaFlavor::REPEATED_COMMITMENTS; + static constexpr size_t NUM_RELATIONS = std::tuple_size_v; // For instances of this flavour, used in folding, we need a unique sumcheck batching challenge for each diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp index 4df8941fd53..d8be56ad096 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp @@ -3,6 +3,7 @@ #include "barretenberg/ecc/curves/bn254/g1.hpp" #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/flavor/flavor_macros.hpp" +#include "barretenberg/flavor/repeated_commitments_data.hpp" #include "barretenberg/plonk_honk_shared/library/grand_product_delta.hpp" #include "barretenberg/plonk_honk_shared/library/grand_product_library.hpp" #include "barretenberg/polynomials/barycentric.hpp" @@ -47,10 +48,21 @@ class UltraFlavor { static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 27; // The total number of witness entities not including shifts. static constexpr size_t NUM_WITNESS_ENTITIES = 8; - // The total number of witnesses including shifts and derived entities. - static constexpr size_t NUM_ALL_WITNESS_ENTITIES = 13; // Total number of folded polynomials, which is just all polynomials except the shifts static constexpr size_t NUM_FOLDED_ENTITIES = NUM_PRECOMPUTED_ENTITIES + NUM_WITNESS_ENTITIES; + // The number of shifted witness entities including derived witness entities + static constexpr size_t NUM_SHIFTED_WITNESSES = 5; + // The number of shifted tables + static constexpr size_t NUM_SHIFTED_TABLES = 4; + + // A container to be fed to ShpleminiVerifier to avoid redundant scalar muls + static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = + RepeatedCommitmentsData(NUM_PRECOMPUTED_ENTITIES, + NUM_PRECOMPUTED_ENTITIES + NUM_WITNESS_ENTITIES + NUM_SHIFTED_TABLES, + NUM_SHIFTED_WITNESSES); + + // The total number of witnesses including shifts and derived entities. + static constexpr size_t NUM_ALL_WITNESS_ENTITIES = NUM_WITNESS_ENTITIES + NUM_SHIFTED_WITNESSES; // define the tuple of Relations that comprise the Sumcheck relation // Note: made generic for use in MegaRecursive. @@ -193,6 +205,7 @@ class UltraFlavor { /** * @brief Class for ShiftedEntities, containing shifted witness and table polynomials. + * TODO: Remove NUM_SHIFTED_TABLES once these entities are deprecated. */ template class ShiftedTables { public: diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_recursive_flavor.hpp index 972e1bf909c..9b28f0de7d9 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_recursive_flavor.hpp @@ -69,6 +69,8 @@ template class UltraRecursiveFlavor_ { // The total number of witness entities not including shifts. static constexpr size_t NUM_WITNESS_ENTITIES = UltraFlavor::NUM_WITNESS_ENTITIES; + static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = UltraFlavor::REPEATED_COMMITMENTS; + // define the tuple of Relations that comprise the Sumcheck relation using Relations = UltraFlavor::Relations_; diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp index 2c8e0d2c9c4..6820125e050 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_circuit_builder.hpp @@ -96,53 +96,58 @@ class TranslatorCircuitBuilder : public CircuitBuilderBase { X_LOW_Y_HI, X_HIGH_Z_1, Y_LOW_Z_2, - P_X_LOW_LIMBS, // P.xₗₒ split into 2 68 bit limbs + P_X_LOW_LIMBS, // P.xₗₒ split into 2 68 bit limbs + P_X_HIGH_LIMBS, // P.xₕᵢ split into a 68 and a 50 bit limb + P_Y_LOW_LIMBS, // P.yₗₒ split into 2 68 bit limbs + P_Y_HIGH_LIMBS, // P.yₕᵢ split into a 68 and a 50 bit limb + Z_LOW_LIMBS, // Low limbs of z_1 and z_2 (68 bits each) + Z_HIGH_LIMBS, // High Limbs of z_1 and z_2 (60 bits each) + ACCUMULATORS_BINARY_LIMBS_0, // Contain 68-bit limbs of current and previous accumulator (previous at higher + // indices because of the nuances of KZG commitment). + ACCUMULATORS_BINARY_LIMBS_1, + ACCUMULATORS_BINARY_LIMBS_2, + ACCUMULATORS_BINARY_LIMBS_3, // Highest limb is 50 bits (254 mod 68) P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, // Low + // limbs split further into smaller chunks for range constraints + QUOTIENT_LOW_BINARY_LIMBS, // Quotient limbs + QUOTIENT_HIGH_BINARY_LIMBS, + RELATION_WIDE_LIMBS, // Limbs for checking the correctness of mod 2²⁷² relations. P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, // Low limbs split further into smaller chunks for range constraints P_X_LOW_LIMBS_RANGE_CONSTRAINT_1, P_X_LOW_LIMBS_RANGE_CONSTRAINT_2, P_X_LOW_LIMBS_RANGE_CONSTRAINT_3, P_X_LOW_LIMBS_RANGE_CONSTRAINT_4, P_X_LOW_LIMBS_RANGE_CONSTRAINT_TAIL, - P_X_HIGH_LIMBS, // P.xₕᵢ split into a 68 and a 50 bit limb P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, // High limbs split into chunks for range constraints P_X_HIGH_LIMBS_RANGE_CONSTRAINT_1, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_2, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_3, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_4, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL, - P_Y_LOW_LIMBS, // P.yₗₒ split into 2 68 bit limbs P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, // Low limbs split into chunks for range constraints P_Y_LOW_LIMBS_RANGE_CONSTRAINT_1, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_2, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_3, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_4, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_TAIL, - P_Y_HIGH_LIMBS, // P.yₕᵢ split into a 68 and a 50 bit limb P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, // High limbs split into chunks for range constraints P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_1, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_2, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_3, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_4, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL, - Z_LOW_LIMBS, // Low limbs of z_1 and z_2 (68 bits each) Z_LOW_LIMBS_RANGE_CONSTRAINT_0, // Range constraints for low limbs of z_1 and z_2 Z_LOW_LIMBS_RANGE_CONSTRAINT_1, Z_LOW_LIMBS_RANGE_CONSTRAINT_2, Z_LOW_LIMBS_RANGE_CONSTRAINT_3, Z_LOW_LIMBS_RANGE_CONSTRAINT_4, Z_LOW_LIMBS_RANGE_CONSTRAINT_TAIL, - Z_HIGH_LIMBS, // High Limbs of z_1 and z_2 (60 bits each) Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, // Range constraints for high limbs of z_1 and z_2 Z_HIGH_LIMBS_RANGE_CONSTRAINT_1, Z_HIGH_LIMBS_RANGE_CONSTRAINT_2, Z_HIGH_LIMBS_RANGE_CONSTRAINT_3, Z_HIGH_LIMBS_RANGE_CONSTRAINT_4, Z_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL, - ACCUMULATORS_BINARY_LIMBS_0, // Contain 68-bit limbs of current and previous accumulator (previous at higher - // indices because of the nuances of KZG commitment). - ACCUMULATORS_BINARY_LIMBS_1, - ACCUMULATORS_BINARY_LIMBS_2, - ACCUMULATORS_BINARY_LIMBS_3, // Highest limb is 50 bits (254 mod 68) + ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, // Range constraints for the current accumulator limbs (no need to // redo previous accumulator) ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_1, @@ -156,8 +161,7 @@ class TranslatorCircuitBuilder : public CircuitBuilderBase { ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_3, ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_4, ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL, - QUOTIENT_LOW_BINARY_LIMBS, // Quotient limbs - QUOTIENT_HIGH_BINARY_LIMBS, + QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, // Range constraints for quotient QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_1, QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_2, @@ -170,7 +174,6 @@ class TranslatorCircuitBuilder : public CircuitBuilderBase { QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_3, QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_4, QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_TAIL, - RELATION_WIDE_LIMBS, // Limbs for checking the correctness of mod 2²⁷² relations. RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_0, RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_1, RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_2, diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp index dced2b66d4f..7167437d4e3 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp @@ -6,6 +6,7 @@ #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/flavor/relation_definitions.hpp" +#include "barretenberg/flavor/repeated_commitments_data.hpp" #include "barretenberg/honk/proof_system/permutation_library.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include "barretenberg/polynomials/univariate.hpp" @@ -81,7 +82,30 @@ class TranslatorFlavor { static constexpr size_t NUM_WITNESS_ENTITIES = 91; // The total number of witnesses including shifts and derived entities. static constexpr size_t NUM_ALL_WITNESS_ENTITIES = 177; - + static constexpr size_t NUM_WIRES_NON_SHIFTED = 1; + static constexpr size_t NUM_SHIFTED_WITNESSES = 86; + static constexpr size_t NUM_CONCATENATED = NUM_CONCATENATED_WIRES * CONCATENATION_GROUP_SIZE; + // Number of elements in WireToBeShiftedWithoutConcatenated + static constexpr size_t NUM_WIRES_TO_BE_SHIFTED_WITHOUT_CONCATENATED = 16; + // The index of the first unshifted witness that is going to be shifted when AllEntities are partitioned into + // get_unshifted_without_concatenated(), get_to_be_shifted(), and get_groups_to_be_concatenated() + static constexpr size_t TO_BE_SHIFTED_WITNESSES_START = NUM_PRECOMPUTED_ENTITIES + NUM_WIRES_NON_SHIFTED; + // The index of the shift of the first to be shifted witness + static constexpr size_t SHIFTED_WITNESSES_START = NUM_SHIFTED_WITNESSES + TO_BE_SHIFTED_WITNESSES_START; + // The index of the first unshifted witness that is contained in the groups to be concatenated, when AllEntities are + // partitioned into get_unshifted_without_concatenated(), get_to_be_shifted(), and get_groups_to_be_concatenated() + static constexpr size_t TO_BE_CONCATENATED_START = + NUM_PRECOMPUTED_ENTITIES + NUM_WIRES_NON_SHIFTED + NUM_WIRES_TO_BE_SHIFTED_WITHOUT_CONCATENATED; + // The index of the first concatenation groups element inside AllEntities + static constexpr size_t CONCATENATED_START = NUM_SHIFTED_WITNESSES + SHIFTED_WITNESSES_START; + // A container to be fed to ShpleminiVerifier to avoid redundant scalar muls + static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = + RepeatedCommitmentsData(NUM_PRECOMPUTED_ENTITIES + NUM_WIRES_NON_SHIFTED, + NUM_PRECOMPUTED_ENTITIES + NUM_WIRES_NON_SHIFTED + NUM_SHIFTED_WITNESSES, + NUM_SHIFTED_WITNESSES, + TO_BE_CONCATENATED_START, + CONCATENATED_START, + NUM_CONCATENATED); using GrandProductRelations = std::tuple>; // define the tuple of Relations that comprise the Sumcheck relation template @@ -141,91 +165,103 @@ class TranslatorFlavor { concatenated_range_constraints_3) // column 3 }; // TODO(https://github.com/AztecProtocol/barretenberg/issues/790) dedupe with shifted? - template class WireToBeShiftedEntities { + template class WireToBeShiftedWithoutConcatenated { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + x_lo_y_hi, // column 0 + x_hi_z_1, // column 1 + y_lo_z_2, // column 2 + p_x_low_limbs, // column 3 + p_x_high_limbs, // column 4 + p_y_low_limbs, // column 5 + p_y_high_limbs, // column 6 + z_low_limbs, // column 7 + z_high_limbs, // column 8 + accumulators_binary_limbs_0, // column 9 + accumulators_binary_limbs_1, // column 10 + accumulators_binary_limbs_2, // column 11 + accumulators_binary_limbs_3, // column 12 + quotient_low_binary_limbs, // column 13 + quotient_high_binary_limbs, // column 14 + relation_wide_limbs); // column 15 + }; + + template class WireToBeShiftedAndConcatenated { public: DEFINE_FLAVOR_MEMBERS(DataType, - x_lo_y_hi, // column 0 - x_hi_z_1, // column 1 - y_lo_z_2, // column 2 - p_x_low_limbs, // column 3 - p_x_low_limbs_range_constraint_0, // column 4 - p_x_low_limbs_range_constraint_1, // column 5 - p_x_low_limbs_range_constraint_2, // column 6 - p_x_low_limbs_range_constraint_3, // column 7 - p_x_low_limbs_range_constraint_4, // column 8 - p_x_low_limbs_range_constraint_tail, // column 9 - p_x_high_limbs, // column 10 - p_x_high_limbs_range_constraint_0, // column 11 - p_x_high_limbs_range_constraint_1, // column 12 - p_x_high_limbs_range_constraint_2, // column 13 - p_x_high_limbs_range_constraint_3, // column 14 - p_x_high_limbs_range_constraint_4, // column 15 - p_x_high_limbs_range_constraint_tail, // column 16 - p_y_low_limbs, // column 17 - p_y_low_limbs_range_constraint_0, // column 18 - p_y_low_limbs_range_constraint_1, // column 19 - p_y_low_limbs_range_constraint_2, // column 20 - p_y_low_limbs_range_constraint_3, // column 21 - p_y_low_limbs_range_constraint_4, // column 22 - p_y_low_limbs_range_constraint_tail, // column 23 - p_y_high_limbs, // column 24 - p_y_high_limbs_range_constraint_0, // column 25 - p_y_high_limbs_range_constraint_1, // column 26 - p_y_high_limbs_range_constraint_2, // column 27 - p_y_high_limbs_range_constraint_3, // column 28 - p_y_high_limbs_range_constraint_4, // column 29 - p_y_high_limbs_range_constraint_tail, // column 30 - z_low_limbs, // column 31 - z_low_limbs_range_constraint_0, // column 32 - z_low_limbs_range_constraint_1, // column 33 - z_low_limbs_range_constraint_2, // column 34 - z_low_limbs_range_constraint_3, // column 35 - z_low_limbs_range_constraint_4, // column 36 - z_low_limbs_range_constraint_tail, // column 37 - z_high_limbs, // column 38 - z_high_limbs_range_constraint_0, // column 39 - z_high_limbs_range_constraint_1, // column 40 - z_high_limbs_range_constraint_2, // column 41 - z_high_limbs_range_constraint_3, // column 42 - z_high_limbs_range_constraint_4, // column 43 - z_high_limbs_range_constraint_tail, // column 44 - accumulators_binary_limbs_0, // column 45 - accumulators_binary_limbs_1, // column 46 - accumulators_binary_limbs_2, // column 47 - accumulators_binary_limbs_3, // column 48 - accumulator_low_limbs_range_constraint_0, // column 49 - accumulator_low_limbs_range_constraint_1, // column 50 - accumulator_low_limbs_range_constraint_2, // column 51 - accumulator_low_limbs_range_constraint_3, // column 52 - accumulator_low_limbs_range_constraint_4, // column 53 - accumulator_low_limbs_range_constraint_tail, // column 54 - accumulator_high_limbs_range_constraint_0, // column 55 - accumulator_high_limbs_range_constraint_1, // column 56 - accumulator_high_limbs_range_constraint_2, // column 57 - accumulator_high_limbs_range_constraint_3, // column 58 - accumulator_high_limbs_range_constraint_4, // column 59 - accumulator_high_limbs_range_constraint_tail, // column 60 - quotient_low_binary_limbs, // column 61 - quotient_high_binary_limbs, // column 62 - quotient_low_limbs_range_constraint_0, // column 63 - quotient_low_limbs_range_constraint_1, // column 64 - quotient_low_limbs_range_constraint_2, // column 65 - quotient_low_limbs_range_constraint_3, // column 66 - quotient_low_limbs_range_constraint_4, // column 67 - quotient_low_limbs_range_constraint_tail, // column 68 - quotient_high_limbs_range_constraint_0, // column 69 - quotient_high_limbs_range_constraint_1, // column 70 - quotient_high_limbs_range_constraint_2, // column 71 - quotient_high_limbs_range_constraint_3, // column 72 - quotient_high_limbs_range_constraint_4, // column 73 - quotient_high_limbs_range_constraint_tail, // column 74 - relation_wide_limbs, // column 75 + p_x_low_limbs_range_constraint_0, // column 16 + p_x_low_limbs_range_constraint_1, // column 17 + p_x_low_limbs_range_constraint_2, // column 18 + p_x_low_limbs_range_constraint_3, // column 19 + p_x_low_limbs_range_constraint_4, // column 20 + p_x_low_limbs_range_constraint_tail, // column 21 + p_x_high_limbs_range_constraint_0, // column 22 + p_x_high_limbs_range_constraint_1, // column 23 + p_x_high_limbs_range_constraint_2, // column 24 + p_x_high_limbs_range_constraint_3, // column 25 + p_x_high_limbs_range_constraint_4, // column 26 + p_x_high_limbs_range_constraint_tail, // column 27 + p_y_low_limbs_range_constraint_0, // column 28 + p_y_low_limbs_range_constraint_1, // column 29 + p_y_low_limbs_range_constraint_2, // column 30 + p_y_low_limbs_range_constraint_3, // column 31 + p_y_low_limbs_range_constraint_4, // column 32 + p_y_low_limbs_range_constraint_tail, // column 33 + p_y_high_limbs_range_constraint_0, // column 34 + p_y_high_limbs_range_constraint_1, // column 35 + p_y_high_limbs_range_constraint_2, // column 36 + p_y_high_limbs_range_constraint_3, // column 37 + p_y_high_limbs_range_constraint_4, // column 38 + p_y_high_limbs_range_constraint_tail, // column 39 + z_low_limbs_range_constraint_0, // column 40 + z_low_limbs_range_constraint_1, // column 41 + z_low_limbs_range_constraint_2, // column 42 + z_low_limbs_range_constraint_3, // column 43 + z_low_limbs_range_constraint_4, // column 44 + z_low_limbs_range_constraint_tail, // column 45 + z_high_limbs_range_constraint_0, // column 46 + z_high_limbs_range_constraint_1, // column 47 + z_high_limbs_range_constraint_2, // column 48 + z_high_limbs_range_constraint_3, // column 49 + z_high_limbs_range_constraint_4, // column 50 + z_high_limbs_range_constraint_tail, // column 51 + accumulator_low_limbs_range_constraint_0, // column 52 + accumulator_low_limbs_range_constraint_1, // column 53 + accumulator_low_limbs_range_constraint_2, // column 54 + accumulator_low_limbs_range_constraint_3, // column 55 + accumulator_low_limbs_range_constraint_4, // column 56 + accumulator_low_limbs_range_constraint_tail, // column 57 + accumulator_high_limbs_range_constraint_0, // column 58 + accumulator_high_limbs_range_constraint_1, // column 59 + accumulator_high_limbs_range_constraint_2, // column 60 + accumulator_high_limbs_range_constraint_3, // column 61 + accumulator_high_limbs_range_constraint_4, // column 62 + accumulator_high_limbs_range_constraint_tail, // column 63 + quotient_low_limbs_range_constraint_0, // column 64 + quotient_low_limbs_range_constraint_1, // column 65 + quotient_low_limbs_range_constraint_2, // column 66 + quotient_low_limbs_range_constraint_3, // column 67 + quotient_low_limbs_range_constraint_4, // column 68 + quotient_low_limbs_range_constraint_tail, // column 69 + quotient_high_limbs_range_constraint_0, // column 70 + quotient_high_limbs_range_constraint_1, // column 71 + quotient_high_limbs_range_constraint_2, // column 72 + quotient_high_limbs_range_constraint_3, // column 73 + quotient_high_limbs_range_constraint_4, // column 74 + quotient_high_limbs_range_constraint_tail, // column 75 relation_wide_limbs_range_constraint_0, // column 76 relation_wide_limbs_range_constraint_1, // column 77 relation_wide_limbs_range_constraint_2, // column 78 relation_wide_limbs_range_constraint_3); // column 79 }; + template + class WireToBeShiftedEntities : public WireToBeShiftedWithoutConcatenated, + public WireToBeShiftedAndConcatenated { + public: + DEFINE_COMPOUND_GET_ALL(WireToBeShiftedWithoutConcatenated, WireToBeShiftedAndConcatenated) + }; + // TODO(https://github.com/AztecProtocol/barretenberg/issues/907) // Note: These are technically derived from wires but do not depend on challenges (like z_perm). They are committed // to in the wires commitment round. @@ -407,51 +443,54 @@ class TranslatorFlavor { x_hi_z_1_shift, // column 1 y_lo_z_2_shift, // column 2 p_x_low_limbs_shift, // column 3 + p_x_high_limbs_shift, // column 10 + p_y_low_limbs_shift, // column 17 + p_y_high_limbs_shift, // column 24 + z_low_limbs_shift, // column 31 + z_high_limbs_shift, // column 38 + accumulators_binary_limbs_0_shift, // column 45 + accumulators_binary_limbs_1_shift, // column 46 + accumulators_binary_limbs_2_shift, // column 47 + accumulators_binary_limbs_3_shift, // column 48 + quotient_low_binary_limbs_shift, // column 61 + quotient_high_binary_limbs_shift, // column 62 + relation_wide_limbs_shift, // column 75 p_x_low_limbs_range_constraint_0_shift, // column 4 p_x_low_limbs_range_constraint_1_shift, // column 5 p_x_low_limbs_range_constraint_2_shift, // column 6 p_x_low_limbs_range_constraint_3_shift, // column 7 p_x_low_limbs_range_constraint_4_shift, // column 8 p_x_low_limbs_range_constraint_tail_shift, // column 9 - p_x_high_limbs_shift, // column 10 p_x_high_limbs_range_constraint_0_shift, // column 11 p_x_high_limbs_range_constraint_1_shift, // column 12 p_x_high_limbs_range_constraint_2_shift, // column 13 p_x_high_limbs_range_constraint_3_shift, // column 14 p_x_high_limbs_range_constraint_4_shift, // column 15 p_x_high_limbs_range_constraint_tail_shift, // column 16 - p_y_low_limbs_shift, // column 17 p_y_low_limbs_range_constraint_0_shift, // column 18 p_y_low_limbs_range_constraint_1_shift, // column 19 p_y_low_limbs_range_constraint_2_shift, // column 20 p_y_low_limbs_range_constraint_3_shift, // column 21 p_y_low_limbs_range_constraint_4_shift, // column 22 p_y_low_limbs_range_constraint_tail_shift, // column 23 - p_y_high_limbs_shift, // column 24 p_y_high_limbs_range_constraint_0_shift, // column 25 p_y_high_limbs_range_constraint_1_shift, // column 26 p_y_high_limbs_range_constraint_2_shift, // column 27 p_y_high_limbs_range_constraint_3_shift, // column 28 p_y_high_limbs_range_constraint_4_shift, // column 29 p_y_high_limbs_range_constraint_tail_shift, // column 30 - z_low_limbs_shift, // column 31 z_low_limbs_range_constraint_0_shift, // column 32 z_low_limbs_range_constraint_1_shift, // column 33 z_low_limbs_range_constraint_2_shift, // column 34 z_low_limbs_range_constraint_3_shift, // column 35 z_low_limbs_range_constraint_4_shift, // column 36 z_low_limbs_range_constraint_tail_shift, // column 37 - z_high_limbs_shift, // column 38 z_high_limbs_range_constraint_0_shift, // column 39 z_high_limbs_range_constraint_1_shift, // column 40 z_high_limbs_range_constraint_2_shift, // column 41 z_high_limbs_range_constraint_3_shift, // column 42 z_high_limbs_range_constraint_4_shift, // column 43 z_high_limbs_range_constraint_tail_shift, // column 44 - accumulators_binary_limbs_0_shift, // column 45 - accumulators_binary_limbs_1_shift, // column 46 - accumulators_binary_limbs_2_shift, // column 47 - accumulators_binary_limbs_3_shift, // column 48 accumulator_low_limbs_range_constraint_0_shift, // column 49 accumulator_low_limbs_range_constraint_1_shift, // column 50 accumulator_low_limbs_range_constraint_2_shift, // column 51 @@ -464,8 +503,6 @@ class TranslatorFlavor { accumulator_high_limbs_range_constraint_3_shift, // column 58 accumulator_high_limbs_range_constraint_4_shift, // column 59 accumulator_high_limbs_range_constraint_tail_shift, // column 60 - quotient_low_binary_limbs_shift, // column 61 - quotient_high_binary_limbs_shift, // column 62 quotient_low_limbs_range_constraint_0_shift, // column 63 quotient_low_limbs_range_constraint_1_shift, // column 64 quotient_low_limbs_range_constraint_2_shift, // column 65 @@ -478,7 +515,6 @@ class TranslatorFlavor { quotient_high_limbs_range_constraint_3_shift, // column 72 quotient_high_limbs_range_constraint_4_shift, // column 73 quotient_high_limbs_range_constraint_tail_shift, // column 74 - relation_wide_limbs_shift, // column 75 relation_wide_limbs_range_constraint_0_shift, // column 76 relation_wide_limbs_range_constraint_1_shift, // column 77 relation_wide_limbs_range_constraint_2_shift, // column 78 @@ -839,13 +875,13 @@ class TranslatorFlavor { this->x_hi_z_1 = "X_HI_Z_1"; this->y_lo_z_2 = "Y_LO_Z_2"; this->p_x_low_limbs = "P_X_LOW_LIMBS"; + this->p_x_high_limbs = "P_X_HIGH_LIMBS"; this->p_x_low_limbs_range_constraint_0 = "P_X_LOW_LIMBS_RANGE_CONSTRAINT_0"; this->p_x_low_limbs_range_constraint_1 = "P_X_LOW_LIMBS_RANGE_CONSTRAINT_1"; this->p_x_low_limbs_range_constraint_2 = "P_X_LOW_LIMBS_RANGE_CONSTRAINT_2"; this->p_x_low_limbs_range_constraint_3 = "P_X_LOW_LIMBS_RANGE_CONSTRAINT_3"; this->p_x_low_limbs_range_constraint_4 = "P_X_LOW_LIMBS_RANGE_CONSTRAINT_4"; this->p_x_low_limbs_range_constraint_tail = "P_X_LOW_LIMBS_RANGE_CONSTRAINT_TAIL"; - this->p_x_high_limbs = "P_X_HIGH_LIMBS"; this->p_x_high_limbs_range_constraint_0 = "P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0"; this->p_x_high_limbs_range_constraint_1 = "P_X_HIGH_LIMBS_RANGE_CONSTRAINT_1"; this->p_x_high_limbs_range_constraint_2 = "P_X_HIGH_LIMBS_RANGE_CONSTRAINT_2"; diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp index 1aabba2e607..628d3b0aab1 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_verifier.cpp @@ -117,6 +117,7 @@ bool TranslatorVerifier::verify_proof(const HonkProof& proof) if (sumcheck_verified.has_value() && !sumcheck_verified.value()) { return false; } + // Execute Shplemini const BatchOpeningClaim opening_claim = Shplemini::compute_batch_opening_claim(circuit_size, @@ -127,6 +128,7 @@ bool TranslatorVerifier::verify_proof(const HonkProof& proof) multivariate_challenge, Commitment::one(), transcript, + Flavor::REPEATED_COMMITMENTS, RefVector(libra_commitments), libra_evaluations, commitments.get_groups_to_be_concatenated(), diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp index 540a74e6f13..996df320f95 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp @@ -79,6 +79,7 @@ template bool DeciderVerifier_::verify() sumcheck_output.challenge, Commitment::one(), transcript, + Flavor::REPEATED_COMMITMENTS, RefVector(libra_commitments), libra_evaluations); const auto pairing_points = PCS::reduce_verify_batch_opening_claim(opening_claim, transcript); From 7e587d6d43cc28174d807c255f5270212a0b1c98 Mon Sep 17 00:00:00 2001 From: Cody Gunton Date: Thu, 14 Nov 2024 12:55:05 -0500 Subject: [PATCH 3/5] feat: Mega memory benchmarks (#9858) It would be better to actually use Google Bench's memory manager functionality and count allocations. We already have something similar implemented for Tracy. After striking out with that approach for a bit I reverted to just manually counting the size of the biggest vectors. The PR uncovered this issue: some trace structures have unusable capacity, not just due to using fewer than a dyadic number of gates, but also because of coupling of certain gate types https://github.com/AztecProtocol/barretenberg/issues/1149 See https://github.com/AztecProtocol/aztec-packages/pull/9858 for logs of benchmarks. --- .../src/barretenberg/benchmark/CMakeLists.txt | 1 + .../mega_memory_bench/CMakeLists.txt | 5 + .../mega_memory_bench/mega_memory.bench.cpp | 344 ++++++++++++++++++ .../arithmetization/mega_arithmetization.hpp | 15 +- .../mega_circuit_builder.hpp | 38 ++ .../stdlib_circuit_builders/mega_flavor.hpp | 18 + .../plookup_tables/types.hpp | 1 - .../ultra_honk/decider_proving_key.hpp | 4 +- 8 files changed, 422 insertions(+), 4 deletions(-) create mode 100644 barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/CMakeLists.txt create mode 100644 barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/mega_memory.bench.cpp diff --git a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt index e76758ca571..31db5c02858 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt @@ -17,3 +17,4 @@ add_subdirectory(append_only_tree_bench) add_subdirectory(ultra_bench) add_subdirectory(stdlib_hash) add_subdirectory(circuit_construction_bench) +add_subdirectory(mega_memory_bench) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/CMakeLists.txt new file mode 100644 index 00000000000..f35647268d7 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/CMakeLists.txt @@ -0,0 +1,5 @@ +barretenberg_module( + mega_memory_bench + ultra_honk + stdlib_primitives +) \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/mega_memory.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/mega_memory.bench.cpp new file mode 100644 index 00000000000..29d58f16606 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/benchmark/mega_memory_bench/mega_memory.bench.cpp @@ -0,0 +1,344 @@ +#include "barretenberg/stdlib/primitives/field/field.hpp" +#include "barretenberg/stdlib/primitives/plookup/plookup.hpp" +#include "barretenberg/stdlib_circuit_builders/plookup_tables/fixed_base/fixed_base.hpp" +#include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" +#include "barretenberg/ultra_honk/decider_proving_key.hpp" + +#include + +using namespace benchmark; +using namespace bb; +using namespace bb::plookup; + +namespace { +auto& engine = numeric::get_debug_randomness(); +} + +using DeciderProvingKey = DeciderProvingKey_; +using Builder = MegaCircuitBuilder; +using field_ct = stdlib::field_t; +using witness_ct = stdlib::witness_t; +using plookup_read = stdlib::plookup_read; + +static constexpr size_t NUM_SHORT = 10; + +void fill_ecc_op_block(Builder& builder) +{ + const auto point = g1::affine_element::random_element(); + const auto scalar = fr::random_element(); + const size_t num_to_add((builder.blocks.ecc_op.get_fixed_size() - NUM_SHORT) >> 1); // each accum call adds two rows + for (size_t idx = 0; idx < num_to_add; idx++) { + builder.queue_ecc_mul_accum(point, scalar); + } +} + +void fill_pub_inputs_block(Builder& builder) +{ + for (size_t idx = 0; idx < builder.blocks.pub_inputs.get_fixed_size() - NUM_SHORT; idx++) { + builder.add_public_variable(fr::random_element()); + } +} + +void fill_databus_blocks(Builder& builder) +{ + static constexpr size_t NUM_BUS_IDS(3); + const size_t num_gates_per_bus_id((builder.blocks.busread.get_fixed_size() - NUM_SHORT) / NUM_BUS_IDS); + for (size_t idx = 1; idx < num_gates_per_bus_id + 1; idx++) { // start at 1 to avoid / by zero below + const uint32_t idx_1_1 = builder.add_variable(fr::random_element()); + const uint32_t idx_1_2 = builder.add_variable(static_cast(fr::random_element()) % idx); + builder.add_public_calldata(idx_1_1); + builder.read_calldata(idx_1_2); + const uint32_t idx_2_1 = builder.add_variable(fr::random_element()); + const uint32_t idx_2_2 = builder.add_variable(static_cast(fr::random_element()) % idx); + builder.add_public_secondary_calldata(idx_2_1); + builder.read_secondary_calldata(idx_2_2); + const uint32_t idx_3_1 = builder.add_variable(fr::random_element()); + const uint32_t idx_3_2 = builder.add_variable(static_cast(fr::random_element()) % idx); + builder.add_public_return_data(idx_3_1); + builder.read_return_data(idx_3_2); + } +} + +void fill_delta_range_block(Builder& builder) +{ + // At the moment the trace has space for 90k delta range gates but I don't think it's possible to use them all + // because there is not enough capacity in the arithmetic block! + + const uint32_t idx_1 = builder.add_variable(1 << 0); + builder.create_range_constraint(idx_1, 1, "whoops"); + const uint32_t idx_2 = builder.add_variable(1 << 1); + builder.create_range_constraint(idx_2, 2, "whoops"); + const uint32_t idx_3 = builder.add_variable(1 << 2); + builder.create_range_constraint(idx_3, 3, "whoops"); + const uint32_t idx_4 = builder.add_variable(1 << 3); + builder.create_range_constraint(idx_4, 4, "whoops"); + const uint32_t idx_5 = builder.add_variable(1 << 4); + builder.create_range_constraint(idx_5, 5, "whoops"); + const uint32_t idx_6 = builder.add_variable(1 << 5); + builder.create_range_constraint(idx_6, 6, "whoops"); + const uint32_t idx_7 = builder.add_variable(1 << 6); + builder.create_range_constraint(idx_7, 7, "whoops"); + const uint32_t idx_8 = builder.add_variable(1 << 7); + builder.create_range_constraint(idx_8, 8, "whoops"); + const uint32_t idx_9 = builder.add_variable(1 << 8); + builder.create_range_constraint(idx_9, 9, "whoops"); + const uint32_t idx_10 = builder.add_variable(1 << 9); + builder.create_range_constraint(idx_10, 10, "whoops"); + const uint32_t idx_11 = builder.add_variable(1 << 10); + builder.create_range_constraint(idx_11, 11, "whoops"); + const uint32_t idx_12 = builder.add_variable(1 << 11); + builder.create_range_constraint(idx_12, 12, "whoops"); + const uint32_t idx_13 = builder.add_variable(1 << 12); + builder.create_range_constraint(idx_13, 13, "whoops"); + const uint32_t idx_14 = builder.add_variable(1 << 13); + builder.create_range_constraint(idx_14, 14, "whoops"); + // the above range constraints as 2759 gates + static constexpr size_t NUM_GATES_ADDED_FOR_ALL_DEFAULT_RANGES = 2759; + + size_t num_range_constraints = 14; + + auto& range_block = builder.blocks.delta_range; + auto& arith_block = builder.blocks.arithmetic; + + const auto range_block_has_space = [&range_block, &num_range_constraints]() { + return num_range_constraints < + 4 * (range_block.get_fixed_size() - NUM_GATES_ADDED_FOR_ALL_DEFAULT_RANGES - NUM_SHORT); + }; + + const auto arith_block_has_space = [&arith_block]() { + return arith_block.size() < arith_block.get_fixed_size() - 100; + }; + + while (range_block_has_space() && arith_block_has_space()) { + const uint32_t w_idx = builder.add_variable(1023); + builder.create_range_constraint(w_idx, 10, "failed to create range constraint"); + num_range_constraints++; + } +} + +void fill_arithmetic_block(Builder& builder) +{ + const uint32_t idx_1 = builder.add_variable(fr::random_element()); + const uint32_t idx_2 = builder.add_variable(fr::random_element()); + const uint32_t idx_3 = builder.add_variable(fr::random_element()); + const uint32_t idx_4 = builder.add_variable(fr::random_element()); + while (builder.blocks.arithmetic.size() < builder.blocks.arithmetic.get_fixed_size() - 10 * NUM_SHORT) { + builder.create_big_add_gate({ idx_1, idx_2, idx_3, idx_4, 1, 1, 1, 1, 1 }); + } +} + +void fill_elliptic_block(Builder& builder) +{ + const uint32_t x1_idx = builder.add_variable(fr::random_element()); + const uint32_t y1_idx = builder.add_variable(fr::random_element()); + const uint32_t x2_idx = builder.add_variable(fr::random_element()); + const uint32_t y2_idx = builder.add_variable(fr::random_element()); + const uint32_t x3_idx = builder.add_variable(fr::random_element()); + const uint32_t y3_idx = builder.add_variable(fr::random_element()); + while (builder.blocks.elliptic.size() < builder.blocks.elliptic.get_fixed_size() - 10 * NUM_SHORT) { + builder.create_ecc_add_gate({ x1_idx, y1_idx, x2_idx, y2_idx, x3_idx, y3_idx, 1 }); + } +} + +void fill_aux_block(Builder& builder) +{ + auto& block = builder.blocks.aux; + + const uint32_t idx_1 = builder.add_variable(fr::random_element()); + const uint32_t idx_2 = builder.add_variable(fr::random_element()); + const uint32_t idx_3 = builder.add_variable(fr::random_element()); + const uint32_t idx_4 = builder.add_variable(fr::random_element()); + while (block.size() < block.get_fixed_size() - 10 * NUM_SHORT) { + builder.apply_aux_selectors(Builder::AUX_SELECTORS::ROM_READ); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::LIMB_ACCUMULATE_1); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::LIMB_ACCUMULATE_2); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::NON_NATIVE_FIELD_1); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::NON_NATIVE_FIELD_2); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::NON_NATIVE_FIELD_3); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::RAM_CONSISTENCY_CHECK); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::ROM_CONSISTENCY_CHECK); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::RAM_TIMESTAMP_CHECK); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::ROM_READ); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::RAM_READ); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + builder.apply_aux_selectors(Builder::AUX_SELECTORS::RAM_WRITE); + block.populate_wires(idx_1, idx_2, idx_3, idx_4); + } +} + +void fill_poseidon2_internal_block(Builder& builder) +{ + auto& block = builder.blocks.poseidon2_internal; + const uint32_t idx_1 = builder.add_variable(fr::random_element()); + const uint32_t idx_2 = builder.add_variable(fr::random_element()); + const uint32_t idx_3 = builder.add_variable(fr::random_element()); + const uint32_t idx_4 = builder.add_variable(fr::random_element()); + + while (block.size() < block.get_fixed_size() - NUM_SHORT) { + builder.create_poseidon2_internal_gate({ idx_1, idx_2, idx_3, idx_4, 1 }); + } +} + +void fill_poseidon2_external_block(Builder& builder) +{ + auto& block = builder.blocks.poseidon2_external; + const uint32_t idx_1 = builder.add_variable(fr::random_element()); + const uint32_t idx_2 = builder.add_variable(fr::random_element()); + const uint32_t idx_3 = builder.add_variable(fr::random_element()); + const uint32_t idx_4 = builder.add_variable(fr::random_element()); + + while (block.size() < block.get_fixed_size() - NUM_SHORT) { + builder.create_poseidon2_external_gate({ idx_1, idx_2, idx_3, idx_4, 1 }); + } +} + +void fill_lookup_block(Builder& builder) +{ + auto& block = builder.blocks.lookup; + + // static constexpr size_t NUM_LOOKUP_TYPES_USED(15); + + while (block.size() < (block.get_fixed_size() - 20 * NUM_SHORT)) { + // SHA + uint256_t left_value = (engine.get_random_uint256() & 0xffffffffULL); + uint256_t right_value = (engine.get_random_uint256() & 0xffffffffULL); + field_ct left = witness_ct(&builder, bb::fr(left_value)); + field_ct right = witness_ct(&builder, bb::fr(right_value)); + plookup_read::get_lookup_accumulators(MultiTableId::SHA256_CH_INPUT, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::SHA256_CH_OUTPUT, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::SHA256_MAJ_INPUT, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::SHA256_MAJ_OUTPUT, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::SHA256_WITNESS_INPUT, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::SHA256_WITNESS_OUTPUT, left, right, true); + + // AES tables not actually used anywhere... + + // fixed base + auto pedersen_input_value = fr::random_element(); + const auto input_hi = + uint256_t(pedersen_input_value) + .slice(plookup::fixed_base::table::BITS_PER_LO_SCALAR, + plookup::fixed_base::table::BITS_PER_LO_SCALAR + plookup::fixed_base::table::BITS_PER_HI_SCALAR); + const auto input_lo = + uint256_t(pedersen_input_value).slice(0, bb::plookup::fixed_base::table::BITS_PER_LO_SCALAR); + plookup::get_lookup_accumulators(bb::plookup::MultiTableId::FIXED_BASE_LEFT_HI, input_hi); + plookup::get_lookup_accumulators(bb::plookup::MultiTableId::FIXED_BASE_LEFT_LO, input_lo); + plookup::get_lookup_accumulators(bb::plookup::MultiTableId::FIXED_BASE_RIGHT_HI, input_hi); + plookup::get_lookup_accumulators(bb::plookup::MultiTableId::FIXED_BASE_RIGHT_LO, input_lo); + + // bit ops + plookup_read::get_lookup_accumulators(MultiTableId::UINT32_XOR, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::UINT32_AND, left, right, true); + + // bn254 generator slices + auto byte = field_ct::from_witness(&builder, engine.get_random_uint256() & 0xffULL); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_XLO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_XHI, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_YLO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_YHI, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_XYPRIME, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_XLO_ENDO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_XHI_ENDO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::BN254_XYPRIME_ENDO, byte, 0, false); + + // secp256k1 generator slices + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_XLO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_XHI, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_YLO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_YHI, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_XYPRIME, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_XLO_ENDO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_XHI_ENDO, byte, 0, false); + plookup_read::get_lookup_accumulators(MultiTableId::SECP256K1_XYPRIME_ENDO, byte, 0, false); + + // blake xor + plookup_read::get_lookup_accumulators(MultiTableId::BLAKE_XOR, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::BLAKE_XOR_ROTATE_16, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::BLAKE_XOR_ROTATE_8, left, right, true); + plookup_read::get_lookup_accumulators(MultiTableId::BLAKE_XOR_ROTATE_7, left, right, true); + + // keccak tests trigger + // SharedShiftedVirtualZeroesArray ... Assertion `(index >= start_ && index < end_)' failed. + // plookup_read::get_lookup_accumulators(MultiTableId::KECCAK_THETA_OUTPUT, left, right, true); + // plookup_read::get_lookup_accumulators(MultiTableId::KECCAK_CHI_OUTPUT, left, right, true); + // plookup_read::get_lookup_accumulators(MultiTableId::KECCAK_FORMAT_INPUT, left, right, true); + // plookup_read::get_lookup_accumulators(MultiTableId::KECCAK_FORMAT_OUTPUT, left, right, true); + // plookup_read::get_lookup_accumulators(MultiTableId::KECCAK_NORMALIZE_AND_ROTATE, left, right, true); + } +} + +void fill_trace(State& state, TraceSettings settings) +{ + Builder builder; + builder.blocks.set_fixed_block_sizes(settings); + + fill_ecc_op_block(builder); + fill_pub_inputs_block(builder); + fill_databus_blocks(builder); + fill_delta_range_block(builder); + fill_arithmetic_block(builder); // must come after fill_delta_range_block + fill_elliptic_block(builder); + fill_aux_block(builder); + fill_poseidon2_external_block(builder); + fill_poseidon2_internal_block(builder); + fill_lookup_block(builder); + + { + // finalize doesn't populate public inputs block, so copy to verify that the block is being filled well. + // otherwise the pk construction will overflow the block + // alternative: add to finalize or add a flag to check whether PIs have already been populated + auto builder_copy = builder; + builder_copy.finalize_circuit(/* ensure_nonzero */ false); + DeciderProvingKey::Trace::populate_public_inputs_block(builder_copy); + + for (const auto [label, block] : zip_view(builder_copy.blocks.get_labels(), builder_copy.blocks.get())) { + bool overfilled = block.size() >= block.get_fixed_size(); + if (overfilled) { + vinfo(label, " overfilled"); + } + ASSERT(!overfilled); + vinfo(label, ": ", block.size(), " / ", block.get_fixed_size()); + } + } + + builder.finalize_circuit(/* ensure_nonzero */ true); + uint64_t builder_estimate = builder.estimate_memory(); + for (auto _ : state) { + DeciderProvingKey proving_key(builder, settings); + uint64_t memory_estimate = proving_key.proving_key.estimate_memory(); + state.counters["poly_mem_est"] = static_cast(memory_estimate); + state.counters["builder_mem_est"] = static_cast(builder_estimate); + benchmark::DoNotOptimize(proving_key); + } +} + +void fill_trace_client_ivc_bench(State& state) +{ + fill_trace(state, { TraceStructure::CLIENT_IVC_BENCH, /*overflow_capacity=*/0 }); +} + +void fill_trace_e2e_full_test(State& state) +{ + fill_trace(state, { TraceStructure::E2E_FULL_TEST, /*overflow_capacity=*/0 }); +} + +static void pk_mem(State& state, void (*test_circuit_function)(State&)) noexcept +{ + test_circuit_function(state); +} + +BENCHMARK_CAPTURE(pk_mem, E2E_FULL_TEST, &fill_trace_e2e_full_test)->Unit(kMillisecond)->Iterations(1); + +BENCHMARK_CAPTURE(pk_mem, CLIENT_IVC_BENCH, &fill_trace_client_ivc_bench)->Unit(kMillisecond)->Iterations(1); + +BENCHMARK_MAIN(); \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp index 8a19c73cc43..dffe7d5d25a 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp @@ -38,6 +38,14 @@ template class MegaArith { T lookup; T overflow; // block gates of arbitrary type that overflow their designated block + std::vector get_labels() const + { + return { "ecc_op", "pub_inputs", "busread", + "arithmetic", "delta_range", "elliptic", + "aux", "poseidon2_external", "poseidon2_internal", + "lookup" }; + } + auto get() { return RefArray{ ecc_op, @@ -52,6 +60,7 @@ template class MegaArith { lookup, overflow }; } + auto get() const { return RefArray{ ecc_op, @@ -297,7 +306,11 @@ template class MegaArith { { info("Gate blocks summary: (actual gates / fixed capacity)"); info("goblin ecc op :\t", this->ecc_op.size(), "/", this->ecc_op.get_fixed_size()); - info("pub inputs :\t", this->pub_inputs.size(), "/", this->pub_inputs.get_fixed_size()); + info("pub inputs :\t", + this->pub_inputs.size(), + "/", + this->pub_inputs.get_fixed_size(), + " (populated in decider pk constructor)"); info("busread :\t", this->busread.size(), "/", this->busread.get_fixed_size()); info("arithmetic :\t", this->arithmetic.size(), "/", this->arithmetic.get_fixed_size()); info("delta range :\t", this->delta_range.size(), "/", this->delta_range.get_fixed_size()); diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp index dd51a5424f9..a9f51823547 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp @@ -236,6 +236,44 @@ template class MegaCircuitBuilder_ : public UltraCircuitBuilder_(BusId::CALLDATA)]; } const BusVector& get_secondary_calldata() const { return databus[static_cast(BusId::SECONDARY_CALLDATA)]; } const BusVector& get_return_data() const { return databus[static_cast(BusId::RETURNDATA)]; } + uint64_t estimate_memory() const + { + vinfo("++Estimating builder memory++"); + uint64_t result{ 0 }; + + // gates: + for (auto [block, label] : zip_view(this->blocks.get(), this->blocks.get_labels())) { + uint64_t size{ 0 }; + for (const auto& wire : block.wires) { + size += wire.capacity() * sizeof(uint32_t); + } + for (const auto& selector : block.selectors) { + size += selector.capacity() * sizeof(FF); + } + vinfo(label, " size ", size >> 10, " KiB"); + result += size; + } + + // variables + size_t to_add{ this->variables.capacity() * sizeof(FF) }; + result += to_add; + vinfo("variables: ", to_add); + + // public inputs + to_add = this->public_inputs.capacity() * sizeof(uint32_t); + result += to_add; + vinfo("public inputs: ", to_add); + + // other variable indices + to_add = this->next_var_index.capacity() * sizeof(uint32_t); + to_add += this->prev_var_index.capacity() * sizeof(uint32_t); + to_add += this->real_variable_index.capacity() * sizeof(uint32_t); + to_add += this->real_variable_tags.capacity() * sizeof(uint32_t); + result += to_add; + vinfo("variable indices: ", to_add); + + return result; + } }; using MegaCircuitBuilder = MegaCircuitBuilder_; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp index 3b113b52270..5df52d1997e 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp @@ -520,6 +520,24 @@ class MegaFlavor { compute_grand_product>( this->polynomials, relation_parameters, size_override); } + + uint64_t estimate_memory() + { + vinfo("++Estimating proving key memory++"); + for (auto [polynomial, label] : zip_view(polynomials.get_all(), polynomials.get_labels())) { + uint64_t size = polynomial.size(); + vinfo(label, " num: ", size, " size: ", (size * sizeof(FF)) >> 10, " KiB"); + } + + uint64_t result(0); + for (auto& polynomial : polynomials.get_unshifted()) { + result += polynomial.size() * sizeof(FF); + } + + result += public_inputs.capacity() * sizeof(FF); + + return result; + } }; /** diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp index 259082820eb..9ba18ab891f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp @@ -111,7 +111,6 @@ enum MultiTableId { BLAKE_XOR_ROTATE_16, BLAKE_XOR_ROTATE_8, BLAKE_XOR_ROTATE_7, - PEDERSEN_IV, HONK_DUMMY_MULTI, KECCAK_THETA_OUTPUT, KECCAK_CHI_OUTPUT, diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp index 14f399503f8..e09628038e3 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp @@ -30,12 +30,12 @@ template class DeciderProvingKey_ { using Polynomial = typename Flavor::Polynomial; using RelationSeparator = typename Flavor::RelationSeparator; - using Trace = ExecutionTrace_; - // Flag indicating whether the polynomials will be constructed with fixed block sizes for each gate type bool is_structured; public: + using Trace = ExecutionTrace_; + ProvingKey proving_key; bool is_accumulator = false; From 9370c91dbc9c7b8eb38236e2fa3637e92d7f3786 Mon Sep 17 00:00:00 2001 From: Alex Gherghisan Date: Thu, 14 Nov 2024 18:04:01 +0000 Subject: [PATCH 4/5] fix: wait for tx to be re-included (#9964) Fix #9635 --------- Co-authored-by: Santiago Palladino --- .../prover-coordination/e2e_prover_coordination.test.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts b/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts index e53a6327309..a6ea11a011b 100644 --- a/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts +++ b/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts @@ -7,6 +7,7 @@ import { EpochProofQuotePayload, TxStatus, createDebugLogger, + retryUntil, sleep, } from '@aztec/aztec.js'; import { type AztecAddress, EthAddress } from '@aztec/circuits.js'; @@ -402,6 +403,12 @@ describe('e2e_prover_coordination', () => { // Wait a bit for the sequencer / node to notice a re-org await sleep(2000); + await retryUntil( + async () => (await ctx.aztecNode.getTxReceipt(tx2BeforeReorg.txHash)).status === TxStatus.SUCCESS, + 'wait for re-inclusion', + 60, + 1, + ); // the sequencer will add valid txs again but in a new block const tx2AfterReorg = await ctx.aztecNode.getTxReceipt(tx2BeforeReorg.txHash); From bf4176f9fc2ae13ddc3f3ca534bc0611f85d7aa7 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Thu, 14 Nov 2024 21:07:50 +0000 Subject: [PATCH 5/5] chore: pull changes out of sync PR (#9966) Please read [contributing guidelines](CONTRIBUTING.md) and remove this line. --- noir/noir-repo/Cargo.lock | 11 + .../src/brillig/brillig_ir/procedures/mod.rs | 5 +- .../compiler/noirc_evaluator/src/lib.rs | 1 - .../src/ssa/acir_gen/acir_ir/acir_variable.rs | 22 +- .../noirc_evaluator/src/ssa/acir_gen/mod.rs | 3 +- .../noirc_evaluator/src/ssa/opt/mod.rs | 4 +- .../noirc_evaluator/src/ssa/parser.rs | 9 +- .../noirc_evaluator/src/ssa/parser/lexer.rs | 1 + .../noirc_evaluator/src/ssa/parser/tests.rs | 11 + .../noirc_evaluator/src/ssa/parser/token.rs | 3 + .../noirc_frontend/src/elaborator/comptime.rs | 9 +- .../src/hir/comptime/interpreter/builtin.rs | 3 +- .../src/hir/comptime/interpreter/foreign.rs | 26 ++- .../src/hir/def_collector/dc_mod.rs | 38 +++- .../src/hir/def_collector/errors.rs | 8 + .../compiler/noirc_frontend/src/tests.rs | 48 +++++ .../Nargo.toml | 6 + .../src/main.nr | 5 + .../attribute_args/src/main.nr | 2 +- .../Nargo.toml | 5 + .../src/main.nr | 5 + noir/noir-repo/tooling/nargo_cli/Cargo.toml | 1 + noir/noir-repo/tooling/nargo_cli/build.rs | 201 ++++++++++++++---- 23 files changed, 356 insertions(+), 71 deletions(-) create mode 100644 noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml create mode 100644 noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr create mode 100644 noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml create mode 100644 noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr diff --git a/noir/noir-repo/Cargo.lock b/noir/noir-repo/Cargo.lock index 35ff97f55e3..15ad7806a17 100644 --- a/noir/noir-repo/Cargo.lock +++ b/noir/noir-repo/Cargo.lock @@ -1510,6 +1510,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "file-lock" +version = "2.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "040b48f80a749da50292d0f47a1e2d5bf1d772f52836c07f64bfccc62ba6e664" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "filetime" version = "0.2.25" @@ -2777,6 +2787,7 @@ dependencies = [ "criterion", "dap", "dirs", + "file-lock", "fm", "iai", "iter-extended", diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs index 2fa51f3db59..0955142e414 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/mod.rs @@ -1,3 +1,6 @@ +use noirc_errors::debug_info::ProcedureDebugId; +use serde::{Deserialize, Serialize}; + mod array_copy; mod array_reverse; mod check_max_stack_depth; @@ -14,11 +17,9 @@ use array_copy::compile_array_copy_procedure; use array_reverse::compile_array_reverse_procedure; use check_max_stack_depth::compile_check_max_stack_depth_procedure; use mem_copy::compile_mem_copy_procedure; -use noirc_errors::debug_info::ProcedureDebugId; use prepare_vector_insert::compile_prepare_vector_insert_procedure; use prepare_vector_push::compile_prepare_vector_push_procedure; use revert_with_string::compile_revert_with_string_procedure; -use serde::{Deserialize, Serialize}; use vector_copy::compile_vector_copy_procedure; use vector_pop_back::compile_vector_pop_back_procedure; use vector_pop_front::compile_vector_pop_front_procedure; diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs b/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs index b2b4762723f..5f0c7a5bbb8 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/lib.rs @@ -12,7 +12,6 @@ pub mod ssa; pub mod brillig; pub use ssa::create_program; - pub use ssa::ir::instruction::ErrorType; /// Trims leading whitespace from each line of the input string, according to diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs index e7d298558c4..c6e4a261897 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs @@ -13,6 +13,7 @@ use acvm::acir::circuit::opcodes::{ }; use acvm::acir::circuit::{AssertionPayload, ExpressionOrMemory, ExpressionWidth, Opcode}; use acvm::brillig_vm::{MemoryValue, VMStatus, VM}; +use acvm::BlackBoxFunctionSolver; use acvm::{ acir::AcirField, acir::{ @@ -107,7 +108,9 @@ impl From for AcirType { /// Context object which holds the relationship between /// `Variables`(AcirVar) and types such as `Expression` and `Witness` /// which are placed into ACIR. -pub(crate) struct AcirContext { +pub(crate) struct AcirContext> { + blackbox_solver: B, + /// Two-way map that links `AcirVar` to `AcirVarData`. /// /// The vars object is an instance of the `TwoWayMap`, which provides a bidirectional mapping between `AcirVar` and `AcirVarData`. @@ -132,7 +135,7 @@ pub(crate) struct AcirContext { pub(crate) warnings: Vec, } -impl AcirContext { +impl> AcirContext { pub(crate) fn set_expression_width(&mut self, expression_width: ExpressionWidth) { self.expression_width = expression_width; } @@ -1758,8 +1761,8 @@ impl AcirContext { brillig_stdlib_func, ); - fn range_constraint_value( - context: &mut AcirContext, + fn range_constraint_value>( + context: &mut AcirContext, value: &AcirValue, ) -> Result<(), RuntimeError> { match value { @@ -1878,7 +1881,7 @@ impl AcirContext { inputs: &[BrilligInputs], outputs_types: &[AcirType], ) -> Option> { - let mut memory = (execute_brillig(code, inputs)?).into_iter(); + let mut memory = (execute_brillig(code, &self.blackbox_solver, inputs)?).into_iter(); let outputs_var = vecmap(outputs_types.iter(), |output| match output { AcirType::NumericType(_) => { @@ -2171,8 +2174,9 @@ pub(crate) struct AcirVar(usize); /// Returns the finished state of the Brillig VM if execution can complete. /// /// Returns `None` if complete execution of the Brillig bytecode is not possible. -fn execute_brillig( +fn execute_brillig>( code: &[BrilligOpcode], + blackbox_solver: &B, inputs: &[BrilligInputs], ) -> Option>> { // Set input values @@ -2198,12 +2202,8 @@ fn execute_brillig( } // Instantiate a Brillig VM given the solved input registers and memory, along with the Brillig bytecode. - // - // We pass a stubbed solver here as a concrete solver implies a field choice which conflicts with this function - // being generic. - let solver = acvm::blackbox_solver::StubbedBlackBoxSolver; let profiling_active = false; - let mut vm = VM::new(calldata, code, Vec::new(), &solver, profiling_active); + let mut vm = VM::new(calldata, code, Vec::new(), blackbox_solver, profiling_active); // Run the Brillig VM on these inputs, bytecode, etc! let vm_status = vm.process_opcodes(); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index ecf7561321f..33fdf2abc82 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -30,6 +30,7 @@ use crate::brillig::{brillig_gen::brillig_fn::FunctionContext as BrilligFunction use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; pub(crate) use acir_ir::generated_acir::GeneratedAcir; use acvm::acir::circuit::opcodes::{AcirFunctionId, BlockType}; +use bn254_blackbox_solver::Bn254BlackBoxSolver; use noirc_frontend::monomorphization::ast::InlineType; use acvm::acir::circuit::brillig::{BrilligBytecode, BrilligFunctionId}; @@ -157,7 +158,7 @@ struct Context<'a> { current_side_effects_enabled_var: AcirVar, /// Manages and builds the `AcirVar`s to which the converted SSA values refer. - acir_context: AcirContext, + acir_context: AcirContext, /// Track initialized acir dynamic arrays /// diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs index 5576b494570..098f62bceba 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs @@ -44,7 +44,7 @@ pub(crate) fn assert_normalized_ssa_equals(mut ssa: super::Ssa, expected: &str) let expected = trim_leading_whitespace_from_lines(expected); if ssa != expected { - println!("Got:\n~~~\n{}\n~~~\nExpected:\n~~~\n{}\n~~~", ssa, expected); - similar_asserts::assert_eq!(ssa, expected); + println!("Expected:\n~~~\n{expected}\n~~~\nGot:\n~~~\n{ssa}\n~~~"); + similar_asserts::assert_eq!(expected, ssa); } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser.rs index 717d2691b2f..11d43284786 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser.rs @@ -735,10 +735,17 @@ impl<'a> Parser<'a> { } fn eat_int(&mut self) -> ParseResult> { + let negative = self.eat(Token::Dash)?; + if matches!(self.token.token(), Token::Int(..)) { let token = self.bump()?; match token.into_token() { - Token::Int(int) => Ok(Some(int)), + Token::Int(mut int) => { + if negative { + int = -int; + } + Ok(Some(int)) + } _ => unreachable!(), } } else { diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/lexer.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/lexer.rs index ac4c3b77205..4c90475be74 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/lexer.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/lexer.rs @@ -60,6 +60,7 @@ impl<'a> Lexer<'a> { Some(']') => self.single_char_token(Token::RightBracket), Some('&') => self.single_char_token(Token::Ampersand), Some('-') if self.peek_char() == Some('>') => self.double_char_token(Token::Arrow), + Some('-') => self.single_char_token(Token::Dash), Some(ch) if ch.is_ascii_alphanumeric() || ch == '_' => self.eat_alpha_numeric(ch), Some(char) => Err(LexerError::UnexpectedCharacter { char, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs index 3ed6be57b5e..9205353151e 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -425,3 +425,14 @@ fn test_slice() { "; assert_ssa_roundtrip(src); } + +#[test] +fn test_negative() { + let src = " + acir(inline) fn main f0 { + b0(): + return Field -1 + } + "; + assert_ssa_roundtrip(src); +} diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs index 41c4f9ca164..d648f58de41 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/parser/token.rs @@ -57,6 +57,8 @@ pub(crate) enum Token { Equal, /// & Ampersand, + /// - + Dash, Eof, } @@ -90,6 +92,7 @@ impl Display for Token { Token::Arrow => write!(f, "->"), Token::Equal => write!(f, "=="), Token::Ampersand => write!(f, "&"), + Token::Dash => write!(f, "-"), Token::Eof => write!(f, "(end of stream)"), } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/comptime.rs b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/comptime.rs index 279adc331ea..a27e2bf0163 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/comptime.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/comptime.rs @@ -452,7 +452,14 @@ impl<'context> Elaborator<'context> { } ItemKind::Impl(r#impl) => { let module = self.module_id(); - dc_mod::collect_impl(self.interner, generated_items, r#impl, self.file, module); + dc_mod::collect_impl( + self.interner, + generated_items, + r#impl, + self.file, + module, + &mut self.errors, + ); } ItemKind::ModuleDecl(_) diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs index 8a6c46ca50c..80c1ee217c2 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs @@ -42,7 +42,7 @@ use crate::{ }; use self::builtin_helpers::{eq_item, get_array, get_ctstring, get_str, get_u8, hash_item, lex}; -use super::Interpreter; +use super::{foreign, Interpreter}; pub(crate) mod builtin_helpers; @@ -57,6 +57,7 @@ impl<'local, 'context> Interpreter<'local, 'context> { let interner = &mut self.elaborator.interner; let call_stack = &self.elaborator.interpreter_call_stack; match name { + "apply_range_constraint" => foreign::apply_range_constraint(arguments, location), "array_as_str_unchecked" => array_as_str_unchecked(interner, arguments, location), "array_len" => array_len(interner, arguments, location), "assert_constant" => Ok(Value::Bool(true)), diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs index d1ab6a1dabd..3de72969cab 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs @@ -1,4 +1,6 @@ -use acvm::blackbox_solver::BlackBoxFunctionSolver; +use acvm::{ + acir::BlackBoxFunc, blackbox_solver::BlackBoxFunctionSolver, AcirField, BlackBoxResolutionError, +}; use bn254_blackbox_solver::Bn254BlackBoxSolver; use im::Vector; use iter_extended::try_vecmap; @@ -29,6 +31,28 @@ pub(super) fn call_foreign( } } +pub(super) fn apply_range_constraint( + arguments: Vec<(Value, Location)>, + location: Location, +) -> IResult { + let (value, num_bits) = check_two_arguments(arguments, location)?; + + let input = get_field(value)?; + let num_bits = get_u32(num_bits)?; + + if input.num_bits() < num_bits { + Ok(Value::Unit) + } else { + Err(InterpreterError::BlackBoxError( + BlackBoxResolutionError::Failed( + BlackBoxFunc::RANGE, + "value exceeds range check bounds".to_owned(), + ), + location, + )) + } +} + // poseidon2_permutation(_input: [Field; N], _state_length: u32) -> [Field; N] fn poseidon2_permutation( interner: &mut NodeInterner, diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs index a373441b4e0..bae57daae15 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs @@ -97,9 +97,9 @@ pub fn collect_defs( errors.extend(collector.collect_functions(context, ast.functions, crate_id)); - collector.collect_trait_impls(context, ast.trait_impls, crate_id); + errors.extend(collector.collect_trait_impls(context, ast.trait_impls, crate_id)); - collector.collect_impls(context, ast.impls, crate_id); + errors.extend(collector.collect_impls(context, ast.impls, crate_id)); collector.collect_attributes( ast.inner_attributes, @@ -163,7 +163,13 @@ impl<'a> ModCollector<'a> { errors } - fn collect_impls(&mut self, context: &mut Context, impls: Vec, krate: CrateId) { + fn collect_impls( + &mut self, + context: &mut Context, + impls: Vec, + krate: CrateId, + ) -> Vec<(CompilationError, FileId)> { + let mut errors = Vec::new(); let module_id = ModuleId { krate, local_id: self.module_id }; for r#impl in impls { @@ -173,8 +179,11 @@ impl<'a> ModCollector<'a> { r#impl, self.file_id, module_id, + &mut errors, ); } + + errors } fn collect_trait_impls( @@ -182,7 +191,9 @@ impl<'a> ModCollector<'a> { context: &mut Context, impls: Vec, krate: CrateId, - ) { + ) -> Vec<(CompilationError, FileId)> { + let mut errors = Vec::new(); + for mut trait_impl in impls { let trait_name = trait_impl.trait_name.clone(); @@ -198,6 +209,13 @@ impl<'a> ModCollector<'a> { let module = ModuleId { krate, local_id: self.module_id }; for (_, func_id, noir_function) in &mut unresolved_functions.functions { + if noir_function.def.attributes.is_test_function() { + let error = DefCollectorErrorKind::TestOnAssociatedFunction { + span: noir_function.name_ident().span(), + }; + errors.push((error.into(), self.file_id)); + } + let location = Location::new(noir_function.def.span, self.file_id); context.def_interner.push_function(*func_id, &noir_function.def, module, location); } @@ -224,6 +242,8 @@ impl<'a> ModCollector<'a> { self.def_collector.items.trait_impls.push(unresolved_trait_impl); } + + errors } fn collect_functions( @@ -1051,6 +1071,7 @@ pub fn collect_impl( r#impl: TypeImpl, file_id: FileId, module_id: ModuleId, + errors: &mut Vec<(CompilationError, FileId)>, ) { let mut unresolved_functions = UnresolvedFunctions { file_id, functions: Vec::new(), trait_id: None, self_type: None }; @@ -1058,6 +1079,15 @@ pub fn collect_impl( for (method, _) in r#impl.methods { let doc_comments = method.doc_comments; let mut method = method.item; + + if method.def.attributes.is_test_function() { + let error = DefCollectorErrorKind::TestOnAssociatedFunction { + span: method.name_ident().span(), + }; + errors.push((error.into(), file_id)); + continue; + } + let func_id = interner.push_empty_fn(); method.def.where_clause.extend(r#impl.where_clause.clone()); let location = Location::new(method.span(), file_id); diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/errors.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/errors.rs index d72f493092d..c08b4ff2062 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/errors.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/errors.rs @@ -82,6 +82,8 @@ pub enum DefCollectorErrorKind { }, #[error("{0}")] UnsupportedNumericGenericType(#[from] UnsupportedNumericGenericType), + #[error("The `#[test]` attribute may only be used on a non-associated function")] + TestOnAssociatedFunction { span: Span }, } impl DefCollectorErrorKind { @@ -291,6 +293,12 @@ impl<'a> From<&'a DefCollectorErrorKind> for Diagnostic { diag } DefCollectorErrorKind::UnsupportedNumericGenericType(err) => err.into(), + DefCollectorErrorKind::TestOnAssociatedFunction { span } => Diagnostic::simple_error( + "The `#[test]` attribute is disallowed on `impl` methods".into(), + String::new(), + *span, + ), + } } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/tests.rs b/noir/noir-repo/compiler/noirc_frontend/src/tests.rs index f72741721d8..20a5bac49f6 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/tests.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/tests.rs @@ -3692,3 +3692,51 @@ fn allows_struct_with_generic_infix_type_as_main_input_3() { "#; assert_no_errors(src); } + +#[test] +fn disallows_test_attribute_on_impl_method() { + let src = r#" + pub struct Foo {} + impl Foo { + #[test] + fn foo() {} + } + + fn main() {} + "#; + let errors = get_program_errors(src); + assert_eq!(errors.len(), 1); + + assert!(matches!( + errors[0].0, + CompilationError::DefinitionError(DefCollectorErrorKind::TestOnAssociatedFunction { + span: _ + }) + )); +} + +#[test] +fn disallows_test_attribute_on_trait_impl_method() { + let src = r#" + pub trait Trait { + fn foo() {} + } + + pub struct Foo {} + impl Trait for Foo { + #[test] + fn foo() {} + } + + fn main() {} + "#; + let errors = get_program_errors(src); + assert_eq!(errors.len(), 1); + + assert!(matches!( + errors[0].0, + CompilationError::DefinitionError(DefCollectorErrorKind::TestOnAssociatedFunction { + span: _ + }) + )); +} diff --git a/noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml b/noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml new file mode 100644 index 00000000000..e14cfeae5d6 --- /dev/null +++ b/noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "comptime_apply_failing_range_constraint" +type = "bin" +authors = [""] + +[dependencies] diff --git a/noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr b/noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr new file mode 100644 index 00000000000..752f13a2e51 --- /dev/null +++ b/noir/noir-repo/test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr @@ -0,0 +1,5 @@ +fn main() { + comptime { + 256.assert_max_bit_size::<8>() + } +} diff --git a/noir/noir-repo/test_programs/compile_success_empty/attribute_args/src/main.nr b/noir/noir-repo/test_programs/compile_success_empty/attribute_args/src/main.nr index 5fc193150db..29690ba36c7 100644 --- a/noir/noir-repo/test_programs/compile_success_empty/attribute_args/src/main.nr +++ b/noir/noir-repo/test_programs/compile_success_empty/attribute_args/src/main.nr @@ -13,7 +13,7 @@ comptime fn attr_with_args(s: StructDefinition, a: Field, b: Field) { #[varargs] comptime fn attr_with_varargs(s: StructDefinition, t: [Field]) { - let _: StructDefinition = s; + let _ = s; for _ in t {} assert(t.len() < 5); } diff --git a/noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml b/noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml new file mode 100644 index 00000000000..bfd6fa75728 --- /dev/null +++ b/noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml @@ -0,0 +1,5 @@ +[package] +name = "comptime_apply_range_constraint" +type = "bin" +authors = [""] +[dependencies] diff --git a/noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr b/noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr new file mode 100644 index 00000000000..ff5e0ba9511 --- /dev/null +++ b/noir/noir-repo/test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr @@ -0,0 +1,5 @@ +fn main() { + comptime { + 2.assert_max_bit_size::<8>() + } +} diff --git a/noir/noir-repo/tooling/nargo_cli/Cargo.toml b/noir/noir-repo/tooling/nargo_cli/Cargo.toml index 317706bb237..02e669f5c68 100644 --- a/noir/noir-repo/tooling/nargo_cli/Cargo.toml +++ b/noir/noir-repo/tooling/nargo_cli/Cargo.toml @@ -78,6 +78,7 @@ dirs.workspace = true assert_cmd = "2.0.8" assert_fs = "1.0.10" predicates = "2.1.5" +file-lock = "2.1.11" fm.workspace = true criterion.workspace = true pprof.workspace = true diff --git a/noir/noir-repo/tooling/nargo_cli/build.rs b/noir/noir-repo/tooling/nargo_cli/build.rs index 438eef687b8..ce46a717113 100644 --- a/noir/noir-repo/tooling/nargo_cli/build.rs +++ b/noir/noir-repo/tooling/nargo_cli/build.rs @@ -59,6 +59,12 @@ const IGNORED_BRILLIG_TESTS: [&str; 11] = [ "is_unconstrained", ]; +/// Tests which aren't expected to work with the default inliner cases. +const INLINER_MIN_OVERRIDES: [(&str, i64); 1] = [ + // 0 works if PoseidonHasher::write is tagged as `inline_always`, otherwise 22. + ("eddsa", 0), +]; + /// Some tests are expected to have warnings /// These should be fixed and removed from this list. const TESTS_WITH_EXPECTED_WARNINGS: [&str; 2] = [ @@ -88,22 +94,119 @@ fn read_test_cases( }) } -fn generate_test_case( +#[derive(Default)] +struct MatrixConfig { + // Only used with execution, and only on selected tests. + vary_brillig: bool, + // Only seems to have an effect on the `execute_success` cases. + vary_inliner: bool, + // If there is a non-default minimum inliner aggressiveness to use with the brillig tests. + min_inliner: i64, +} + +// Enum to be able to preserve readable test labels and also compare to numbers. +enum Inliner { + Min, + Default, + Max, + Custom(i64), +} + +impl Inliner { + fn value(&self) -> i64 { + match self { + Inliner::Min => i64::MIN, + Inliner::Default => 0, + Inliner::Max => i64::MAX, + Inliner::Custom(i) => *i, + } + } + fn label(&self) -> String { + match self { + Inliner::Min => "i64::MIN".to_string(), + Inliner::Default => "0".to_string(), + Inliner::Max => "i64::MAX".to_string(), + Inliner::Custom(i) => i.to_string(), + } + } +} + +/// Generate all test cases for a given test name (expected to be unique for the test directory), +/// based on the matrix configuration. These will be executed serially, but concurrently with +/// other test directories. Running multiple tests on the same directory would risk overriding +/// each others compilation artifacts, which is why this method injects a mutex shared by +/// all cases in the test matrix, as long as the test name and directory has a 1-to-1 relationship. +fn generate_test_cases( test_file: &mut File, test_name: &str, test_dir: &std::path::Display, + test_command: &str, test_content: &str, + matrix_config: &MatrixConfig, ) { + let brillig_cases = if matrix_config.vary_brillig { vec![false, true] } else { vec![false] }; + let inliner_cases = if matrix_config.vary_inliner { + let mut cases = vec![Inliner::Min, Inliner::Default, Inliner::Max]; + if !cases.iter().any(|c| c.value() == matrix_config.min_inliner) { + cases.push(Inliner::Custom(matrix_config.min_inliner)); + } + cases + } else { + vec![Inliner::Max] + }; + + // We can't use a `#[test_matrix(brillig_cases, inliner_cases)` if we only want to limit the + // aggressiveness range for the brillig tests, and let them go full range on the ACIR case. + let mut test_cases = Vec::new(); + for brillig in &brillig_cases { + for inliner in &inliner_cases { + if *brillig && inliner.value() < matrix_config.min_inliner { + continue; + } + test_cases.push(format!("#[test_case::test_case({brillig}, {})]", inliner.label())); + } + } + let test_cases = test_cases.join("\n"); + + // We need to isolate test cases in the same group, otherwise they overwrite each other's artifacts. + // On CI we use `cargo nextest`, which runs tests in different processes; for this we use a file lock. + // Locally we might be using `cargo test`, which run tests in the same process; in this case the file lock + // wouldn't work, becuase the process itself has the lock, and it looks like it can have N instances without + // any problems; for this reason we also use a `Mutex`. + let mutex_name = format! {"TEST_MUTEX_{}", test_name.to_uppercase()}; write!( test_file, r#" -#[test] -fn test_{test_name}() {{ +lazy_static::lazy_static! {{ + /// Prevent concurrent tests in the matrix from overwriting the compilation artifacts in {test_dir} + static ref {mutex_name}: std::sync::Mutex<()> = std::sync::Mutex::new(()); +}} + +{test_cases} +fn test_{test_name}(force_brillig: bool, inliner_aggressiveness: i64) {{ let test_program_dir = PathBuf::from("{test_dir}"); + // Ignore poisoning errors if some of the matrix cases failed. + let mutex_guard = {mutex_name}.lock().unwrap_or_else(|e| e.into_inner()); + + let file_guard = file_lock::FileLock::lock( + test_program_dir.join("Nargo.toml"), + true, + file_lock::FileOptions::new().read(true).write(true).append(true) + ).expect("failed to lock Nargo.toml"); + let mut nargo = Command::cargo_bin("nargo").unwrap(); nargo.arg("--program-dir").arg(test_program_dir); + nargo.arg("{test_command}").arg("--force"); + nargo.arg("--inliner-aggressiveness").arg(inliner_aggressiveness.to_string()); + if force_brillig {{ + nargo.arg("--force-brillig"); + }} + {test_content} + + drop(file_guard); + drop(mutex_guard); }} "# ) @@ -124,27 +227,24 @@ fn generate_execution_success_tests(test_file: &mut File, test_data_dir: &Path) for (test_name, test_dir) in test_cases { let test_dir = test_dir.display(); - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, + "execute", r#" - nargo.arg("execute").arg("--force"); - - nargo.assert().success();"#, + nargo.assert().success(); + "#, + &MatrixConfig { + vary_brillig: !IGNORED_BRILLIG_TESTS.contains(&test_name.as_str()), + vary_inliner: false, + min_inliner: INLINER_MIN_OVERRIDES + .iter() + .find(|(n, _)| *n == test_name.as_str()) + .map(|(_, i)| *i) + .unwrap_or(i64::MIN), + }, ); - - if !IGNORED_BRILLIG_TESTS.contains(&test_name.as_str()) { - generate_test_case( - test_file, - &format!("{test_name}_brillig"), - &test_dir, - r#" - nargo.arg("execute").arg("--force").arg("--force-brillig"); - - nargo.assert().success();"#, - ); - } } writeln!(test_file, "}}").unwrap(); } @@ -163,14 +263,15 @@ fn generate_execution_failure_tests(test_file: &mut File, test_data_dir: &Path) for (test_name, test_dir) in test_cases { let test_dir = test_dir.display(); - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, + "execute", r#" - nargo.arg("execute").arg("--force"); - - nargo.assert().failure().stderr(predicate::str::contains("The application panicked (crashed).").not());"#, + nargo.assert().failure().stderr(predicate::str::contains("The application panicked (crashed).").not()); + "#, + &MatrixConfig::default(), ); } writeln!(test_file, "}}").unwrap(); @@ -190,14 +291,15 @@ fn generate_noir_test_success_tests(test_file: &mut File, test_data_dir: &Path) for (test_name, test_dir) in test_cases { let test_dir = test_dir.display(); - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, + "test", r#" - nargo.arg("test"); - - nargo.assert().success();"#, + nargo.assert().success(); + "#, + &MatrixConfig::default(), ); } writeln!(test_file, "}}").unwrap(); @@ -216,14 +318,15 @@ fn generate_noir_test_failure_tests(test_file: &mut File, test_data_dir: &Path) .unwrap(); for (test_name, test_dir) in test_cases { let test_dir = test_dir.display(); - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, + "test", r#" - nargo.arg("test"); - - nargo.assert().failure();"#, + nargo.assert().failure(); + "#, + &MatrixConfig::default(), ); } writeln!(test_file, "}}").unwrap(); @@ -266,16 +369,18 @@ fn generate_compile_success_empty_tests(test_file: &mut File, test_data_dir: &Pa assert_eq!(num_opcodes.as_u64().expect("number of opcodes should fit in a u64"), 0); "#; - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, + "info", &format!( r#" - nargo.arg("info").arg("--json").arg("--force"); - - {assert_zero_opcodes}"#, + nargo.arg("--json"); + {assert_zero_opcodes} + "#, ), + &MatrixConfig::default(), ); } writeln!(test_file, "}}").unwrap(); @@ -295,13 +400,15 @@ fn generate_compile_success_contract_tests(test_file: &mut File, test_data_dir: for (test_name, test_dir) in test_cases { let test_dir = test_dir.display(); - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, + "compile", r#" - nargo.arg("compile").arg("--force"); - nargo.assert().success().stderr(predicate::str::contains("warning:").not());"#, + nargo.assert().success().stderr(predicate::str::contains("warning:").not()); + "#, + &MatrixConfig::default(), ); } writeln!(test_file, "}}").unwrap(); @@ -322,13 +429,15 @@ fn generate_compile_success_no_bug_tests(test_file: &mut File, test_data_dir: &P for (test_name, test_dir) in test_cases { let test_dir = test_dir.display(); - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, + "compile", r#" - nargo.arg("compile").arg("--force"); - nargo.assert().success().stderr(predicate::str::contains("bug:").not());"#, + nargo.assert().success().stderr(predicate::str::contains("bug:").not()); + "#, + &MatrixConfig::default(), ); } writeln!(test_file, "}}").unwrap(); @@ -348,13 +457,15 @@ fn generate_compile_failure_tests(test_file: &mut File, test_data_dir: &Path) { for (test_name, test_dir) in test_cases { let test_dir = test_dir.display(); - generate_test_case( + generate_test_cases( test_file, &test_name, &test_dir, - r#"nargo.arg("compile").arg("--force"); - - nargo.assert().failure().stderr(predicate::str::contains("The application panicked (crashed).").not());"#, + "compile", + r#" + nargo.assert().failure().stderr(predicate::str::contains("The application panicked (crashed).").not()); + "#, + &MatrixConfig::default(), ); } writeln!(test_file, "}}").unwrap();