diff --git a/.github/workflows/ci-arm.yml b/.github/workflows/ci-arm.yml index d763bfbf395a..77b4c150bdfa 100644 --- a/.github/workflows/ci-arm.yml +++ b/.github/workflows/ci-arm.yml @@ -85,9 +85,8 @@ jobs: ./bootstrap.sh image-e2e - name: "Test" timeout-minutes: 40 - continue-on-error: true run: | - ./bootstrap.sh e2e-test uniswap_trade_on_l1_from_l2 + ./bootstrap.sh test-e2e uniswap_trade_on_l1_from_l2 rerun-check: runs-on: ubuntu-20.04 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 19d5d3b2bffe..746dcf7fbc7d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -418,10 +418,10 @@ jobs: fail-fast: false matrix: config: - - test: reorg.test.ts - values: ci - runner_type: 16core-tester-x86-high-memory - timeout: 60 + # - test: reorg.test.ts + # values: ci + # runner_type: 16core-tester-x86-high-memory + # timeout: 60 - test: 4epochs.test.ts values: ci runner_type: 16core-tester-x86 diff --git a/.noir-sync-commit b/.noir-sync-commit index 76a4a97e19eb..acaab867910c 100644 --- a/.noir-sync-commit +++ b/.noir-sync-commit @@ -1 +1 @@ -3c488f4b272f460383341c51270b87bfe2b94468 +db28cb9ffb710c286b54dbfcf57292ae3dffb03d diff --git a/Dockerfile.boxes b/Dockerfile.boxes index 869395adf01c..1949c745dee1 100644 --- a/Dockerfile.boxes +++ b/Dockerfile.boxes @@ -1,5 +1,5 @@ -FROM aztecprotocol/ci:2.0 +FROM aztecprotocol/ci:2.2 COPY /usr/src /usr/src WORKDIR /usr/src/boxes RUN ls /usr/src/boxes -RUN /usr/local/share/docker-init.sh &> /dev/null && ./bootstrap.sh test \ No newline at end of file +RUN /usr/local/share/docker-init.sh &> /dev/null && ./bootstrap.sh test diff --git a/aztec-nargo/README.md b/aztec-nargo/README.md new file mode 100644 index 000000000000..79e3187a3754 --- /dev/null +++ b/aztec-nargo/README.md @@ -0,0 +1,12 @@ +## `aztec-nargo` + +The `aztec-nargo` utility is packaged with docker and does the following: +1. If the first argument to `aztec-nargo` is not `compile`, it just forwards args to `nargo` and exits. +1. If the first argument _is_ `compile`, it forwards args to `nargo` with some added options (like `--inliner-aggressiveness 0 --show-artifact-paths`) +3. Extracts all artifacts modified by `nargo` +4. Transpiles each artifact using the `avm-transpiler` +5. Generates verification keys for each artifact using `bb` (`barretenberg`'s binary) + +Example usage: `aztec-nargo compile` + +Note: uses versions of each tool from this repository (`nargo` version is from `../noir`). diff --git a/aztec-up/README.md b/aztec-up/README.md index d3cc08d04fdd..26f5e97ae6f4 100644 --- a/aztec-up/README.md +++ b/aztec-up/README.md @@ -11,7 +11,7 @@ the user's `PATH` variable in their shell startup script so they can be found. - `aztec` - The infrastructure container. - `aztec-cli` - A command-line tool for interacting with infrastructure. -- `aztec-nargo` - A build of `nargo` from `noir` that is guaranteed to be version-aligned. Provides compiler, lsp and more. +- `aztec-nargo` - A build of `nargo` from `noir` that is guaranteed to be version-aligned. Provides compiler, lsp and more. On `aztec-nargo compile <...>`, automatically transpiles artifacts using `avm-transpiler` and generates verification keys using `bb`. - `aztec-sandbox` - A wrapper around docker-compose that launches services needed for sandbox testing. - `aztec-up` - A tool to upgrade the aztec toolchain to the latest, or specific versions. - `aztec-builder` - A useful tool for projects to generate ABIs and update their dependencies. diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index b47c65d1977f..60ad98bf5e32 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = 560058aa5ec26a23436f09d89a8b80633fbee785 - parent = 36b640aed54fd4da0f9899300bf7b0d05faf5b8d + commit = 37aa7c0fb0224521c00673584b0adc9be11640d8 + parent = 231f017d14c3d261b28ab19dcbdf368c561d0cc7 method = merge cmdver = 0.4.6 diff --git a/barretenberg/cpp/pil/vm2/README.md b/barretenberg/cpp/pil/vm2/README.md new file mode 100644 index 000000000000..4194d164843e --- /dev/null +++ b/barretenberg/cpp/pil/vm2/README.md @@ -0,0 +1,7 @@ +Compile with: + +``` +~/aztec-packages/bb-pilcom/target/release/bb_pil pil/vm2/execution.pil --name Avm2 -y -o src/barretenberg/vm2/generated && ./format.sh changed +``` + +while on the `barretenberg/cpp` directory. diff --git a/barretenberg/cpp/pil/vm2/addressing.pil b/barretenberg/cpp/pil/vm2/addressing.pil new file mode 100644 index 000000000000..19947f5f6612 --- /dev/null +++ b/barretenberg/cpp/pil/vm2/addressing.pil @@ -0,0 +1,21 @@ +// This is a virtual gadget, which is part of the execution trace. +namespace execution(256); + +pol commit stack_pointer_val; +pol commit stack_pointer_tag; +pol commit sel_addressing_error; // true if any error type +pol commit addressing_error_kind; // TODO: might need to be selectors +pol commit addressing_error_idx; // operand index for error, if any + +// whether each operand is an address for the given opcode. +// retrieved from the instruction spec. +pol commit sel_op1_is_address; +pol commit sel_op2_is_address; +pol commit sel_op3_is_address; +pol commit sel_op4_is_address; +// operands after relative resolution +pol commit op1_after_relative; +pol commit op2_after_relative; +pol commit op3_after_relative; +pol commit op4_after_relative; +// operands after indirect resolution are the resolved_operands rop1, ... diff --git a/barretenberg/cpp/pil/vm2/alu.pil b/barretenberg/cpp/pil/vm2/alu.pil new file mode 100644 index 000000000000..ee0de13f9ab7 --- /dev/null +++ b/barretenberg/cpp/pil/vm2/alu.pil @@ -0,0 +1,16 @@ +namespace alu(256); + +pol commit sel_op_add; +pol commit ia; +pol commit ib; +pol commit ic; +pol commit op; +pol commit ia_addr; +pol commit ib_addr; +pol commit dst_addr; + +#[SEL_ADD_BINARY] +sel_op_add * (1 - sel_op_add) = 0; + +#[ALU_ADD] +ia + ib = ic; \ No newline at end of file diff --git a/barretenberg/cpp/pil/vm2/execution.pil b/barretenberg/cpp/pil/vm2/execution.pil new file mode 100644 index 000000000000..5718674edb50 --- /dev/null +++ b/barretenberg/cpp/pil/vm2/execution.pil @@ -0,0 +1,55 @@ +include "alu.pil"; +include "addressing.pil"; +include "precomputed.pil"; + +namespace execution(256); + +pol commit sel; // subtrace selector + +pol commit ex_opcode; +pol commit indirect; +// operands +pol commit op1; +pol commit op2; +pol commit op3; +pol commit op4; +// resolved operands +pol commit rop1; +pol commit rop2; +pol commit rop3; +pol commit rop4; + +pol commit pc; +pol commit clk; +pol commit last; + +// Selector constraints +sel * (1 - sel) = 0; +last * (1 - last) = 0; + +// If the current row is an execution row, then either +// the next row is an execution row, or the current row is marked as the last row. +// sel => (sel' v last) = 1 iff +// ¬sel v (sel' v last) = 1 iff +// ¬(¬sel v (sel' v last)) = 0 iff +// sel ^ (¬sel' ^ ¬last) = 0 iff +// sel * (1 - sel') * (1 - last) = 0 +#[TRACE_CONTINUITY_1] +sel * (1 - sel') * (1 - last) = 0; +// If the current row is not an execution row, then there are no more execution rows after that. +// (not enforced for the first row) +#[TRACE_CONTINUITY_2] +(1 - precomputed.first_row) * (1 - sel) * sel' = 0; +// If the current row is the last row, then the next row is not an execution row. +#[LAST_IS_LAST] +last * sel' = 0; + +// These are needed to have a non-empty set of columns for each type. +pol public input; +#[LOOKUP_DUMMY_PRECOMPUTED] +sel {/*will be 1=OR*/ sel, clk, clk, clk} in +precomputed.sel_bitwise {precomputed.bitwise_op_id, precomputed.bitwise_input_a, precomputed.bitwise_input_b, precomputed.bitwise_output}; +#[LOOKUP_DUMMY_DYNAMIC] // Just a self-lookup for now, for testing. +sel {op1, op2, op3, op4} in sel {op1, op2, op3, op4}; +#[PERM_DUMMY_DYNAMIC] // Just a self-permutation for now, for testing. +sel {op1, op2, op3, op4} is sel {op1, op2, op3, op4}; \ No newline at end of file diff --git a/barretenberg/cpp/pil/vm2/precomputed.pil b/barretenberg/cpp/pil/vm2/precomputed.pil new file mode 100644 index 000000000000..f5dc8ff26359 --- /dev/null +++ b/barretenberg/cpp/pil/vm2/precomputed.pil @@ -0,0 +1,17 @@ +// General/shared precomputed columns. +namespace precomputed(256); + +// From 0 and incrementing up to the size of the circuit (2^21). +pol constant clk; + +// 1 only at row 0. +pol constant first_row; + +// AND/OR/XOR of all 8-bit numbers. +// The tables are "stacked". First AND, then OR, then XOR. +// Note: think if we can avoid the selector. +pol constant sel_bitwise; // 1 in the first 3 * 256 rows. +pol constant bitwise_op_id; // identifies if operation is AND/OR/XOR. +pol constant bitwise_input_a; // column of all 8-bit numbers. +pol constant bitwise_input_b; // column of all 8-bit numbers. +pol constant bitwise_output; // output = a AND/OR/XOR b. \ No newline at end of file diff --git a/barretenberg/cpp/src/CMakeLists.txt b/barretenberg/cpp/src/CMakeLists.txt index fd9925660826..cd0965babb97 100644 --- a/barretenberg/cpp/src/CMakeLists.txt +++ b/barretenberg/cpp/src/CMakeLists.txt @@ -95,6 +95,7 @@ add_subdirectory(barretenberg/transcript) add_subdirectory(barretenberg/translator_vm) add_subdirectory(barretenberg/ultra_honk) add_subdirectory(barretenberg/vm) +add_subdirectory(barretenberg/vm2) add_subdirectory(barretenberg/wasi) add_subdirectory(barretenberg/world_state) @@ -171,6 +172,7 @@ set(BARRETENBERG_TARGET_OBJECTS if(NOT DISABLE_AZTEC_VM) # enable AVM list(APPEND BARRETENBERG_TARGET_OBJECTS $) + list(APPEND BARRETENBERG_TARGET_OBJECTS $) endif() if(NOT WASM) diff --git a/barretenberg/cpp/src/barretenberg/bb/main.cpp b/barretenberg/cpp/src/barretenberg/bb/main.cpp index cf6f7fefba12..465d9732ac1c 100644 --- a/barretenberg/cpp/src/barretenberg/bb/main.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/main.cpp @@ -3,6 +3,7 @@ #include "barretenberg/bb/file_io.hpp" #include "barretenberg/client_ivc/client_ivc.hpp" #include "barretenberg/common/benchmark.hpp" +#include "barretenberg/common/log.hpp" #include "barretenberg/common/map.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/common/timer.hpp" @@ -13,7 +14,6 @@ #include "barretenberg/dsl/acir_format/proof_surgeon.hpp" #include "barretenberg/dsl/acir_proofs/acir_composer.hpp" #include "barretenberg/dsl/acir_proofs/honk_contract.hpp" -#include "barretenberg/flavor/flavor.hpp" #include "barretenberg/honk/proof_system/types/proof.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/plonk/proof_system/proving_key/serialize.hpp" @@ -24,15 +24,17 @@ #include "barretenberg/stdlib_circuit_builders/ultra_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_keccak_flavor.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_rollup_flavor.hpp" -#include "barretenberg/vm/avm/trace/public_inputs.hpp" -#include #ifndef DISABLE_AZTEC_VM #include "barretenberg/vm/avm/generated/flavor.hpp" #include "barretenberg/vm/avm/trace/common.hpp" #include "barretenberg/vm/avm/trace/execution.hpp" +#include "barretenberg/vm/avm/trace/public_inputs.hpp" #include "barretenberg/vm/aztec_constants.hpp" #include "barretenberg/vm/stats.hpp" +#include "barretenberg/vm2/avm_api.hpp" +#include "barretenberg/vm2/common/aztec_types.hpp" +#include "barretenberg/vm2/common/constants.hpp" #endif using namespace bb; @@ -671,6 +673,16 @@ void vk_as_fields(const std::string& vk_path, const std::string& output_path) } #ifndef DISABLE_AZTEC_VM +void print_avm_stats() +{ +#ifdef AVM_TRACK_STATS + info("------- STATS -------"); + const auto& stats = avm_trace::Stats::get(); + const int levels = std::getenv("AVM_STATS_DEPTH") != nullptr ? std::stoi(std::getenv("AVM_STATS_DEPTH")) : 2; + info(stats.to_string(levels)); +#endif +} + /** * @brief Writes an avm proof and corresponding (incomplete) verification key to files. * @@ -726,12 +738,34 @@ void avm_prove(const std::filesystem::path& public_inputs_path, write_file(vk_fields_path, { vk_json.begin(), vk_json.end() }); vinfo("vk as fields written to: ", vk_fields_path); -#ifdef AVM_TRACK_STATS - info("------- STATS -------"); - const auto& stats = avm_trace::Stats::get(); - const int levels = std::getenv("AVM_STATS_DEPTH") != nullptr ? std::stoi(std::getenv("AVM_STATS_DEPTH")) : 2; - info(stats.to_string(levels)); -#endif + print_avm_stats(); +} + +void avm2_prove(const std::filesystem::path& inputs_path, const std::filesystem::path& output_path) +{ + avm2::AvmAPI avm; + auto inputs = avm2::AvmAPI::ProvingInputs::from(read_file(inputs_path)); + + // This is bigger than CIRCUIT_SUBGROUP_SIZE because of BB inefficiencies. + init_bn254_crs(avm2::CIRCUIT_SUBGROUP_SIZE * 2); + auto [proof, vk] = avm.prove(inputs); + + // NOTE: As opposed to Avm1 and other proof systems, the public inputs are NOT part of the proof. + write_file(output_path / "proof", to_buffer(proof)); + write_file(output_path / "vk", vk); + + print_avm_stats(); +} + +void avm2_check_circuit(const std::filesystem::path& inputs_path) +{ + avm2::AvmAPI avm; + auto inputs = avm2::AvmAPI::ProvingInputs::from(read_file(inputs_path)); + + bool res = avm.check_circuit(inputs); + info("circuit check: ", res ? "success" : "failure"); + + print_avm_stats(); } /** @@ -783,8 +817,28 @@ bool avm_verify(const std::filesystem::path& proof_path, const std::filesystem:: const bool verified = AVM_TRACK_TIME_V("verify/all", avm_trace::Execution::verify(vk, proof)); vinfo("verified: ", verified); + + print_avm_stats(); return verified; } + +// NOTE: The proof should NOT include the public inputs. +bool avm2_verify(const std::filesystem::path& proof_path, + const std::filesystem::path& public_inputs_path, + const std::filesystem::path& vk_path) +{ + const auto proof = many_from_buffer(read_file(proof_path)); + std::vector vk_bytes = read_file(vk_path); + auto public_inputs = avm2::PublicInputs::from(read_file(public_inputs_path)); + + init_bn254_crs(1); + avm2::AvmAPI avm; + bool res = avm.verify(proof, public_inputs, vk_bytes); + info("verification: ", res ? "success" : "failure"); + + print_avm_stats(); + return res; +} #endif /** @@ -1382,6 +1436,18 @@ int main(int argc, char* argv[]) std::string output_path = get_option(args, "-o", "./target"); write_recursion_inputs_honk(bytecode_path, witness_path, output_path, recursive); #ifndef DISABLE_AZTEC_VM + } else if (command == "avm2_prove") { + std::filesystem::path inputs_path = get_option(args, "--avm-inputs", "./target/avm_inputs.bin"); + // This outputs both files: proof and vk, under the given directory. + std::filesystem::path output_path = get_option(args, "-o", "./proofs"); + avm2_prove(inputs_path, output_path); + } else if (command == "avm2_check_circuit") { + std::filesystem::path inputs_path = get_option(args, "--avm-inputs", "./target/avm_inputs.bin"); + avm2_check_circuit(inputs_path); + } else if (command == "avm2_verify") { + std::filesystem::path public_inputs_path = + get_option(args, "--avm-public-inputs", "./target/avm_public_inputs.bin"); + return avm2_verify(proof_path, public_inputs_path, vk_path) ? 0 : 1; } else if (command == "avm_prove") { std::filesystem::path avm_public_inputs_path = get_option(args, "--avm-public-inputs", "./target/avm_public_inputs.bin"); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.test.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.test.hpp index adafef90c7c4..de417753927b 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.test.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/commitment_key.test.hpp @@ -14,44 +14,54 @@ namespace bb { constexpr size_t COMMITMENT_TEST_NUM_BN254_POINTS = 4096; constexpr size_t COMMITMENT_TEST_NUM_GRUMPKIN_POINTS = 1 << CONST_ECCVM_LOG_N; -template inline std::shared_ptr CreateCommitmentKey(); +template inline std::shared_ptr create_commitment_key(const size_t num_points = 0); -template <> inline std::shared_ptr> CreateCommitmentKey>() +template <> +inline std::shared_ptr> create_commitment_key>( + const size_t num_points) { srs::init_crs_factory(bb::srs::get_ignition_crs_path()); + if (num_points != 0) { + return std::make_shared>(num_points); + }; return std::make_shared>(COMMITMENT_TEST_NUM_BN254_POINTS); } // For IPA -template <> inline std::shared_ptr> CreateCommitmentKey>() +template <> +inline std::shared_ptr> create_commitment_key>( + const size_t num_points) { srs::init_grumpkin_crs_factory(bb::srs::get_grumpkin_crs_path()); + if (num_points != 0) { + return std::make_shared>(num_points); + } return std::make_shared>(COMMITMENT_TEST_NUM_GRUMPKIN_POINTS); } -template inline std::shared_ptr CreateCommitmentKey() +template inline std::shared_ptr create_commitment_key(size_t num_points) // requires std::default_initializable { - return std::make_shared(); + return std::make_shared(num_points); } -template inline std::shared_ptr CreateVerifierCommitmentKey(); +template inline std::shared_ptr create_verifier_commitment_key(); template <> -inline std::shared_ptr> CreateVerifierCommitmentKey< +inline std::shared_ptr> create_verifier_commitment_key< VerifierCommitmentKey>() { return std::make_shared>(); } // For IPA template <> -inline std::shared_ptr> CreateVerifierCommitmentKey< +inline std::shared_ptr> create_verifier_commitment_key< VerifierCommitmentKey>() { auto crs_factory = std::make_shared>( bb::srs::get_grumpkin_crs_path(), COMMITMENT_TEST_NUM_GRUMPKIN_POINTS); return std::make_shared>(COMMITMENT_TEST_NUM_GRUMPKIN_POINTS, crs_factory); } -template inline std::shared_ptr CreateVerifierCommitmentKey() +template inline std::shared_ptr create_verifier_commitment_key() // requires std::default_initializable { return std::make_shared(); @@ -149,10 +159,10 @@ template class CommitmentTest : public ::testing::Test { { // Avoid reallocating static objects if called in subclasses of FooTest. if (commitment_key == nullptr) { - commitment_key = CreateCommitmentKey(); + commitment_key = create_commitment_key(); } if (verification_key == nullptr) { - verification_key = CreateVerifierCommitmentKey(); + verification_key = create_verifier_commitment_key(); } } diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp index a66387474d10..3828a009b6b2 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini_impl.hpp @@ -124,6 +124,15 @@ std::vector::Claim> GeminiProver_::prove( } const Fr r_challenge = transcript->template get_challenge("Gemini:r"); + const bool gemini_challenge_in_small_subgroup = (has_zk) && (r_challenge.pow(Curve::SUBGROUP_SIZE) == Fr(1)); + + // If Gemini evaluation challenge lands in the multiplicative subgroup used by SmallSubgroupIPA protocol, the + // evaluations of prover polynomials at this challenge would leak witness data. + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1194). Handle edge cases in PCS + if (gemini_challenge_in_small_subgroup) { + throw_or_abort("Gemini evaluation challenge is in the SmallSubgroup."); + } + std::vector claims = compute_fold_polynomial_evaluations(log_n, std::move(fold_polynomials), r_challenge, std::move(batched_group)); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp index bfb9fd931694..2bcc54538b24 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplemini.test.cpp @@ -5,8 +5,10 @@ #include "../kzg/kzg.hpp" #include "../shplonk/shplonk.hpp" #include "../utils/batch_mul_native.hpp" -#include "barretenberg/commitment_schemes/claim.hpp" #include "barretenberg/commitment_schemes/ipa/ipa.hpp" +#include "barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.hpp" +#include "barretenberg/commitment_schemes/utils/instance_witness_generator.hpp" +#include "barretenberg/commitment_schemes/utils/test_settings.hpp" #include "barretenberg/ecc/curves/bn254/g1.hpp" #include @@ -14,29 +16,29 @@ namespace bb { -template class ShpleminiTest : public CommitmentTest { +template class ShpleminiTest : public CommitmentTest { public: - using Fr = typename Curve::ScalarField; - using Commitment = typename Curve::AffineElement; - using GroupElement = typename Curve::Element; - using Polynomial = bb::Polynomial; + static constexpr size_t n = 32; + static constexpr size_t log_n = 5; + static constexpr size_t num_polynomials = 5; + static constexpr size_t num_shiftable = 2; }; -using CurveTypes = ::testing::Types; +using TestSettings = ::testing::Types; -TYPED_TEST_SUITE(ShpleminiTest, CurveTypes); +TYPED_TEST_SUITE(ShpleminiTest, TestSettings); // This test checks that batch_multivariate_opening_claims method operates correctly TYPED_TEST(ShpleminiTest, CorrectnessOfMultivariateClaimBatching) { - using ShpleminiVerifier = ShpleminiVerifier_; - using Fr = typename TypeParam::ScalarField; - using GroupElement = typename TypeParam::Element; - using Commitment = typename TypeParam::AffineElement; - using Polynomial = typename bb::Polynomial; + using Curve = typename TypeParam::Curve; + using ShpleminiVerifier = ShpleminiVerifier_; + using Fr = typename Curve::ScalarField; + using GroupElement = typename Curve::Element; + using Commitment = typename Curve::AffineElement; + using CK = typename TypeParam::CommitmentKey; - const size_t n = 16; - const size_t log_n = 4; + std::shared_ptr ck = create_commitment_key(this->n); // Generate mock challenges Fr rho = Fr::random_element(); @@ -45,37 +47,40 @@ TYPED_TEST(ShpleminiTest, CorrectnessOfMultivariateClaimBatching) Fr shplonk_eval_challenge = Fr::random_element(); // Generate multilinear polynomials and compute their commitments - auto mle_opening_point = this->random_evaluation_point(log_n); - auto poly1 = Polynomial::random(n); - auto poly2 = Polynomial::random(n, /*shiftable*/ 1); - Polynomial poly3(n); - - Commitment commitment1 = this->commit(poly1); - Commitment commitment2 = this->commit(poly2); - Commitment commitment3 = this->commit(poly3); - EXPECT_TRUE(commitment3.is_point_at_infinity()); - - std::vector unshifted_commitments = { commitment1, commitment2, commitment3 }; - std::vector shifted_commitments = { commitment2, commitment3 }; - - // Evaluate the polynomials at the multivariate challenge, poly3 is not evaluated, because it is 0. - auto eval1 = poly1.evaluate_mle(mle_opening_point); - auto eval2 = poly2.evaluate_mle(mle_opening_point); - Fr eval3{ 0 }; - Fr eval3_shift{ 0 }; - auto eval2_shift = poly2.evaluate_mle(mle_opening_point, true); + auto mle_opening_point = this->random_evaluation_point(this->log_n); + + auto pcs_instance_witness = + InstanceWitnessGenerator(this->n, this->num_polynomials, this->num_shiftable, mle_opening_point, ck); // Collect multilinear evaluations - std::vector multilinear_evaluations = { eval1, eval2, eval3, eval2_shift, eval3_shift }; - std::vector rhos = gemini::powers_of_rho(rho, multilinear_evaluations.size()); + std::vector rhos = gemini::powers_of_rho(rho, this->num_polynomials + this->num_shiftable); // Compute batched multivariate evaluation - Fr batched_evaluation = - std::inner_product(multilinear_evaluations.begin(), multilinear_evaluations.end(), rhos.begin(), Fr::zero()); + Fr batched_evaluation = Fr(0); + size_t idx = 0; + for (auto& eval : pcs_instance_witness.unshifted_evals) { + batched_evaluation += eval * rhos[idx]; + idx++; + } + + for (auto& eval : pcs_instance_witness.shifted_evals) { + batched_evaluation += eval * rhos[idx]; + idx++; + } // Compute batched commitments manually - GroupElement batched_commitment_unshifted = commitment1 * rhos[0] + commitment2 * rhos[1] + commitment3 * rhos[2]; - GroupElement batched_commitment_to_be_shifted = commitment2 * rhos[3] + commitment3 * rhos[4]; + idx = 0; + GroupElement batched_commitment_unshifted = GroupElement::zero(); + for (auto& comm : pcs_instance_witness.unshifted_commitments) { + batched_commitment_unshifted += comm * rhos[idx]; + idx++; + } + + GroupElement batched_commitment_to_be_shifted = GroupElement::zero(); + for (auto& comm : pcs_instance_witness.to_be_shifted_commitments) { + batched_commitment_to_be_shifted += comm * rhos[idx]; + idx++; + } // Compute expected result manually GroupElement commitment_to_univariate = @@ -93,17 +98,17 @@ TYPED_TEST(ShpleminiTest, CorrectnessOfMultivariateClaimBatching) std::vector scalars; Fr verifier_batched_evaluation{ 0 }; - Fr unshifted_scalar = (shplonk_eval_challenge - gemini_eval_challenge).invert() + - shplonk_batching_challenge * (shplonk_eval_challenge + gemini_eval_challenge).invert(); + const Fr unshifted_scalar = (shplonk_eval_challenge - gemini_eval_challenge).invert() + + shplonk_batching_challenge * (shplonk_eval_challenge + gemini_eval_challenge).invert(); - Fr shifted_scalar = gemini_eval_challenge.invert() * - ((shplonk_eval_challenge - gemini_eval_challenge).invert() - - shplonk_batching_challenge * (shplonk_eval_challenge + gemini_eval_challenge).invert()); + const Fr shifted_scalar = gemini_eval_challenge.invert() * + ((shplonk_eval_challenge - gemini_eval_challenge).invert() - + shplonk_batching_challenge * (shplonk_eval_challenge + gemini_eval_challenge).invert()); - ShpleminiVerifier::batch_multivariate_opening_claims(RefVector(unshifted_commitments), - RefVector(shifted_commitments), - RefArray{ eval1, eval2, eval3 }, - RefArray{ eval2_shift, eval3_shift }, + ShpleminiVerifier::batch_multivariate_opening_claims(RefVector(pcs_instance_witness.unshifted_commitments), + RefVector(pcs_instance_witness.to_be_shifted_commitments), + RefVector(pcs_instance_witness.unshifted_evals), + RefVector(pcs_instance_witness.shifted_evals), rho, unshifted_scalar, shifted_scalar, @@ -114,22 +119,25 @@ TYPED_TEST(ShpleminiTest, CorrectnessOfMultivariateClaimBatching) // Final pairing check GroupElement shplemini_result = batch_mul_native(commitments, scalars); - EXPECT_EQ(commitments.size(), unshifted_commitments.size() + shifted_commitments.size()); + EXPECT_EQ(commitments.size(), + pcs_instance_witness.unshifted_commitments.size() + + pcs_instance_witness.to_be_shifted_commitments.size()); EXPECT_EQ(batched_evaluation, verifier_batched_evaluation); EXPECT_EQ(-expected_result, shplemini_result); } TYPED_TEST(ShpleminiTest, CorrectnessOfGeminiClaimBatching) { - using GeminiProver = GeminiProver_; - using ShpleminiVerifier = ShpleminiVerifier_; - using ShplonkVerifier = ShplonkVerifier_; - using Fr = typename TypeParam::ScalarField; - using GroupElement = typename TypeParam::Element; - using Commitment = typename TypeParam::AffineElement; + using Curve = TypeParam::Curve; + using GeminiProver = GeminiProver_; + using ShpleminiVerifier = ShpleminiVerifier_; + using ShplonkVerifier = ShplonkVerifier_; + using Fr = typename Curve::ScalarField; + using GroupElement = typename Curve::Element; + using Commitment = typename Curve::AffineElement; using Polynomial = typename bb::Polynomial; + using CK = typename TypeParam::CommitmentKey; - const size_t n = 16; - const size_t log_n = 4; + std::shared_ptr ck = create_commitment_key(this->n); // Generate mock challenges Fr rho = Fr::random_element(); @@ -137,62 +145,58 @@ TYPED_TEST(ShpleminiTest, CorrectnessOfGeminiClaimBatching) Fr shplonk_batching_challenge = Fr::random_element(); Fr shplonk_eval_challenge = Fr::random_element(); - // Generate multilinear polynomials and compute their commitments - auto mle_opening_point = this->random_evaluation_point(log_n); - auto poly1 = Polynomial::random(n); - auto poly2 = Polynomial::random(n, /*shiftable*/ 1); - Polynomial poly3 = Polynomial::shiftable(n); - - // Evaluate the polynomials at the multivariate challenge, poly3 is not evaluated, because it is 0. - auto eval1 = poly1.evaluate_mle(mle_opening_point); - auto eval2 = poly2.evaluate_mle(mle_opening_point); - Fr eval3{ 0 }; - Fr eval3_shift{ 0 }; - auto eval2_shift = poly2.evaluate_mle(mle_opening_point, true); + std::vector mle_opening_point = this->random_evaluation_point(this->log_n); + + auto pcs_instance_witness = + InstanceWitnessGenerator(this->n, this->num_polynomials, this->num_shiftable, mle_opening_point, ck); // Collect multilinear evaluations - std::vector multilinear_evaluations = { eval1, eval2, eval3, eval2_shift, eval3_shift }; - std::vector rhos = gemini::powers_of_rho(rho, multilinear_evaluations.size()); + std::vector rhos = gemini::powers_of_rho(rho, this->num_polynomials + this->num_shiftable); + + Polynomial batched_unshifted(this->n); + Polynomial batched_to_be_shifted = Polynomial::shiftable(this->n); - Polynomial batched_unshifted(n); - Polynomial batched_to_be_shifted = Polynomial::shiftable(n); - batched_unshifted.add_scaled(poly1, rhos[0]); - batched_unshifted.add_scaled(poly2, rhos[1]); - batched_unshifted.add_scaled(poly3, rhos[2]); - batched_to_be_shifted.add_scaled(poly2, rhos[3]); - batched_to_be_shifted.add_scaled(poly3, rhos[4]); + size_t idx = 0; + for (auto& poly : pcs_instance_witness.unshifted_polynomials) { + batched_unshifted.add_scaled(poly, rhos[idx]); + idx++; + } + + for (auto& poly : pcs_instance_witness.to_be_shifted_polynomials) { + batched_unshifted.add_scaled(poly, rhos[idx]); + idx++; + } // Compute: // - (d+1) opening pairs: {r, \hat{a}_0}, {-r^{2^i}, a_i}, i = 0, ..., d-1 // - (d+1) Fold polynomials Fold_{r}^(0), Fold_{-r}^(0), and Fold^(i), i = 0, ..., d-1 auto fold_polynomials = GeminiProver::compute_fold_polynomials( - log_n, mle_opening_point, std::move(batched_unshifted), std::move(batched_to_be_shifted)); + this->log_n, mle_opening_point, std::move(batched_unshifted), std::move(batched_to_be_shifted)); std::vector prover_commitments; - for (size_t l = 0; l < log_n - 1; ++l) { - auto commitment = this->ck()->commit(fold_polynomials[l + 2]); + for (size_t l = 0; l < this->log_n - 1; ++l) { + auto commitment = ck->commit(fold_polynomials[l + 2]); prover_commitments.emplace_back(commitment); } - const auto opening_claims = - GeminiProver::compute_fold_polynomial_evaluations(log_n, std::move(fold_polynomials), gemini_eval_challenge); + const auto opening_claims = GeminiProver::compute_fold_polynomial_evaluations( + this->log_n, std::move(fold_polynomials), gemini_eval_challenge); std::vector prover_evaluations; - for (size_t l = 0; l < log_n; ++l) { + for (size_t l = 0; l < this->log_n; ++l) { const auto& evaluation = opening_claims[l + 1].opening_pair.evaluation; prover_evaluations.emplace_back(evaluation); } - std::vector r_squares = gemini::powers_of_evaluation_challenge(gemini_eval_challenge, log_n); + std::vector r_squares = gemini::powers_of_evaluation_challenge(gemini_eval_challenge, this->log_n); GroupElement expected_result = GroupElement::zero(); - std::vector expected_inverse_vanishing_evals(log_n + 1); + std::vector expected_inverse_vanishing_evals(this->log_n + 1); // Compute expected inverses expected_inverse_vanishing_evals[0] = (shplonk_eval_challenge - r_squares[0]).invert(); - expected_inverse_vanishing_evals[1] = (shplonk_eval_challenge + r_squares[0]).invert(); - expected_inverse_vanishing_evals[2] = (shplonk_eval_challenge + r_squares[1]).invert(); - expected_inverse_vanishing_evals[3] = (shplonk_eval_challenge + r_squares[2]).invert(); - expected_inverse_vanishing_evals[4] = (shplonk_eval_challenge + r_squares[3]).invert(); + for (size_t idx = 1; idx < this->log_n + 1; idx++) { + expected_inverse_vanishing_evals[idx] = (shplonk_eval_challenge + r_squares[idx - 1]).invert(); + } Fr current_challenge{ shplonk_batching_challenge * shplonk_batching_challenge }; for (size_t idx = 0; idx < prover_commitments.size(); ++idx) { @@ -202,13 +206,13 @@ TYPED_TEST(ShpleminiTest, CorrectnessOfGeminiClaimBatching) // Run the ShepliminiVerifier batching method std::vector inverse_vanishing_evals = - ShplonkVerifier::compute_inverted_gemini_denominators(log_n + 1, shplonk_eval_challenge, r_squares); + ShplonkVerifier::compute_inverted_gemini_denominators(this->log_n + 1, shplonk_eval_challenge, r_squares); std::vector commitments; std::vector scalars; Fr expected_constant_term_accumulator{ 0 }; - ShpleminiVerifier::batch_gemini_claims_received_from_prover(log_n, + ShpleminiVerifier::batch_gemini_claims_received_from_prover(this->log_n, prover_commitments, prover_evaluations, inverse_vanishing_evals, @@ -223,4 +227,118 @@ TYPED_TEST(ShpleminiTest, CorrectnessOfGeminiClaimBatching) EXPECT_EQ(shplemini_result, expected_result); } -} // namespace bb +/** + * @brief Test Shplemini with ZK data consisting of a hiding polynomial generated by GeminiProver and Libra polynomials + * used to mask Sumcheck Round Univariates. + * + */ +TYPED_TEST(ShpleminiTest, ShpleminiWithZK) +{ + using ZKData = ZKSumcheckData; + using Curve = TypeParam::Curve; + using ShpleminiProver = ShpleminiProver_; + using ShpleminiVerifier = ShpleminiVerifier_; + using Fr = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + using CK = typename TypeParam::CommitmentKey; + + // Initialize transcript and commitment key + auto prover_transcript = TypeParam::Transcript::prover_init_empty(); + + // SmallSubgroupIPAProver requires at least CURVE::SUBGROUP_SIZE + 3 elements in the ck. + static constexpr size_t log_subgroup_size = static_cast(numeric::get_msb(Curve::SUBGROUP_SIZE)); + std::shared_ptr ck = create_commitment_key(std::max(this->n, 1ULL << (log_subgroup_size + 1))); + + // Generate Libra polynomials, compute masked concatenated Libra polynomial, commit to it + ZKData zk_sumcheck_data(this->log_n, prover_transcript, ck); + + // Generate multivariate challenge of size CONST_PROOF_SIZE_LOG_N + std::vector const_size_mle_opening_point = this->random_evaluation_point(CONST_PROOF_SIZE_LOG_N); + // Truncate the multivariate challenge to evaluate prover polynomials (As in Sumcheck) + const std::vector mle_opening_point(const_size_mle_opening_point.begin(), + const_size_mle_opening_point.begin() + this->log_n); + + // Generate random prover polynomials, compute their evaluations and commitments + auto pcs_instance_witness = + InstanceWitnessGenerator(this->n, this->num_polynomials, this->num_shiftable, mle_opening_point, ck); + + // Compute the sum of the Libra constant term and Libra univariates evaluated at Sumcheck challenges + const Fr claimed_inner_product = SmallSubgroupIPAProver::compute_claimed_inner_product( + zk_sumcheck_data, const_size_mle_opening_point, this->log_n); + + prover_transcript->template send_to_verifier("Libra:claimed_evaluation", claimed_inner_product); + + // Instantiate SmallSubgroupIPAProver, this prover sends commitments to Big Sum and Quotient polynomials + auto small_subgroup_ipa_prover = SmallSubgroupIPAProver( + zk_sumcheck_data, const_size_mle_opening_point, claimed_inner_product, prover_transcript, ck); + + // Reduce to KZG or IPA based on the curve used in the test Flavor + const auto opening_claim = ShpleminiProver::prove(this->n, + RefVector(pcs_instance_witness.unshifted_polynomials), + RefVector(pcs_instance_witness.to_be_shifted_polynomials), + const_size_mle_opening_point, + ck, + prover_transcript, + small_subgroup_ipa_prover.get_witness_polynomials()); + + if constexpr (std::is_same_v) { + IPA::compute_opening_proof(this->ck(), opening_claim, prover_transcript); + } else { + KZG::compute_opening_proof(this->ck(), opening_claim, prover_transcript); + } + + // Initialize verifier's transcript + auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript); + + // Start populating Verifier's array of Libra commitments + std::array libra_commitments = {}; + libra_commitments[0] = + verifier_transcript->template receive_from_prover("Libra:concatenation_commitment"); + + // Place Libra data to the transcript + const Fr libra_total_sum = verifier_transcript->template receive_from_prover("Libra:Sum"); + const Fr libra_challenge = verifier_transcript->template get_challenge("Libra:Challenge"); + const Fr libra_evaluation = verifier_transcript->template receive_from_prover("Libra:claimed_evaluation"); + + // Check that transcript is consistent + EXPECT_EQ(libra_total_sum, zk_sumcheck_data.libra_total_sum); + EXPECT_EQ(libra_challenge, zk_sumcheck_data.libra_challenge); + EXPECT_EQ(libra_evaluation, claimed_inner_product); + + // Finalize the array of Libra/SmallSubgroupIpa commitments + libra_commitments[1] = verifier_transcript->template receive_from_prover("Libra:big_sum_commitment"); + libra_commitments[2] = verifier_transcript->template receive_from_prover("Libra:quotient_commitment"); + + // Used to verify the consistency of the evaluations of the concatenated libra polynomial, big sum polynomial, and + // the quotient polynomial computed by SmallSubgroupIPAProver + bool consistency_checked = true; + + // Run Shplemini + const auto batch_opening_claim = + ShpleminiVerifier::compute_batch_opening_claim(this->n, + RefVector(pcs_instance_witness.unshifted_commitments), + RefVector(pcs_instance_witness.to_be_shifted_commitments), + RefVector(pcs_instance_witness.unshifted_evals), + RefVector(pcs_instance_witness.shifted_evals), + const_size_mle_opening_point, + this->vk()->get_g1_identity(), + verifier_transcript, + {}, + true, + &consistency_checked, + libra_commitments, + libra_evaluation); + // Verify claim using KZG or IPA + if constexpr (std::is_same_v) { + auto result = + IPA::reduce_verify_batch_opening_claim(batch_opening_claim, this->vk(), verifier_transcript); + EXPECT_EQ(result, true); + } else { + const auto pairing_points = + KZG::reduce_verify_batch_opening_claim(batch_opening_claim, verifier_transcript); + // Final pairing check: e([Q] - [Q_z] + z[W], [1]_2) = e([W], [x]_2) + EXPECT_EQ(this->vk()->pairing_check(pairing_points[0], pairing_points[1]), true); + } +} + +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.hpp index 39f9b27b6253..54af7999a2ab 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.hpp @@ -374,6 +374,32 @@ template class SmallSubgroupIPAProver { remainder.at(idx - SUBGROUP_SIZE) += remainder.at(idx); } } + + /** + * @brief For test purposes: Compute the sum of the Libra constant term and Libra univariates evaluated at Sumcheck + * challenges. + * + * @param zk_sumcheck_data Contains Libra constant term and scaled Libra univariates + * @param multivariate_challenge Sumcheck challenge + * @param log_circuit_size + */ + static FF compute_claimed_inner_product(ZKSumcheckData& zk_sumcheck_data, + const std::vector& multivariate_challenge, + const size_t& log_circuit_size) + { + const FF libra_challenge_inv = zk_sumcheck_data.libra_challenge.invert(); + // Compute claimed inner product similarly to the SumcheckProver + FF claimed_inner_product = FF{ 0 }; + size_t idx = 0; + for (const auto& univariate : zk_sumcheck_data.libra_univariates) { + claimed_inner_product += univariate.evaluate(multivariate_challenge[idx]); + idx++; + } + // Libra Univariates are mutiplied by the Libra challenge in setup_auxiliary_data(), needs to be undone + claimed_inner_product *= libra_challenge_inv / FF(1 << (log_circuit_size - 1)); + claimed_inner_product += zk_sumcheck_data.constant_term; + return claimed_inner_product; + } }; /** @@ -426,6 +452,18 @@ template class SmallSubgroupIPAVerifier { // Compute the evaluation of the vanishing polynomia Z_H(X) at X = gemini_evaluation_challenge const FF vanishing_poly_eval = gemini_evaluation_challenge.pow(SUBGROUP_SIZE) - FF(1); + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1194). Handle edge cases in PCS + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1186). Insecure pattern. + bool gemini_challenge_in_small_subgroup = false; + if constexpr (Curve::is_stdlib_type) { + gemini_challenge_in_small_subgroup = (vanishing_poly_eval.get_value() == FF(0).get_value()); + } else { + gemini_challenge_in_small_subgroup = (vanishing_poly_eval == FF(0)); + } + // The probability of this event is negligible but it has to be processed correctly + if (gemini_challenge_in_small_subgroup) { + throw_or_abort("Gemini evaluation challenge is in the SmallSubgroup."); + } // Construct the challenge polynomial from the sumcheck challenge, the verifier has to evaluate it on its own const std::vector challenge_polynomial_lagrange = compute_challenge_polynomial(multilinear_challenge); @@ -542,4 +580,4 @@ template class SmallSubgroupIPAVerifier { return result; } }; -} // namespace bb +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.test.cpp new file mode 100644 index 000000000000..cc445d3a3944 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.test.cpp @@ -0,0 +1,248 @@ +#include "barretenberg/commitment_schemes/small_subgroup_ipa/small_subgroup_ipa.hpp" +#include "../commitment_key.test.hpp" +#include "barretenberg/commitment_schemes/shplonk/shplemini.hpp" +#include "barretenberg/commitment_schemes/utils/test_settings.hpp" + +#include +#include +#include + +namespace bb { +template class SmallSubgroupIPATest : public ::testing::Test { + public: + using Curve = typename Flavor::Curve; + using Transcript = typename Flavor::Transcript; + using FF = typename Curve::ScalarField; + + static constexpr size_t log_circuit_size = 7; + static constexpr size_t circuit_size = 1ULL << log_circuit_size; + + FF evaluation_challenge; + + void SetUp() override { evaluation_challenge = FF::random_element(); } + + static std::vector generate_random_vector(const size_t size) + { + std::vector multivariate_challenge(size); + for (auto& challenge : multivariate_challenge) { + challenge = FF::random_element(); + } + return multivariate_challenge; + } +}; + +using TestFlavors = ::testing::Types; +TYPED_TEST_SUITE(SmallSubgroupIPATest, TestFlavors); + +// Check the correctness of the computation of the claimed inner product and various polynomials needed for the +// SmallSubgroupIPA. +TYPED_TEST(SmallSubgroupIPATest, ProverComputationsCorrectness) +{ + using ZKData = ZKSumcheckData; + using SmallSubgroupIPA = SmallSubgroupIPAProver; + using FF = typename TypeParam::FF; + static constexpr size_t SUBGROUP_SIZE = TypeParam::SUBGROUP_SIZE; + + using CK = typename TypeParam::CommitmentKey; + + // SmallSubgroupIPAProver requires at least CURVE::SUBGROUP_SIZE + 3 elements in the ck. + static constexpr size_t log_subgroup_size = static_cast(numeric::get_msb(SUBGROUP_SIZE)); + std::shared_ptr ck = + create_commitment_key(std::max(this->circuit_size, 1ULL << (log_subgroup_size + 1))); + + auto prover_transcript = TypeParam::Transcript::prover_init_empty(); + + ZKData zk_sumcheck_data(this->log_circuit_size, prover_transcript, ck); + std::vector multivariate_challenge = this->generate_random_vector(this->log_circuit_size); + + const FF claimed_inner_product = SmallSubgroupIPA::compute_claimed_inner_product( + zk_sumcheck_data, multivariate_challenge, this->log_circuit_size); + + SmallSubgroupIPA small_subgroup_ipa_prover = + SmallSubgroupIPA(zk_sumcheck_data, multivariate_challenge, claimed_inner_product, prover_transcript, ck); + + const Polynomial batched_polynomial = small_subgroup_ipa_prover.get_batched_polynomial(); + const Polynomial libra_concatenated_polynomial = small_subgroup_ipa_prover.get_witness_polynomials()[0]; + const Polynomial batched_quotient = small_subgroup_ipa_prover.get_witness_polynomials()[3]; + const Polynomial challenge_polynomial = small_subgroup_ipa_prover.get_challenge_polynomial(); + + // Check that claimed inner product coincides with the inner product of libra_concatenated_polynomial and + // challenge_polynomial. Since libra_concatenated_polynomial is masked, we also check that masking does not affect + // the evaluations over H + FF inner_product = FF(0); + const std::array domain = zk_sumcheck_data.interpolation_domain; + for (size_t idx = 0; idx < SUBGROUP_SIZE; idx++) { + inner_product += + challenge_polynomial.evaluate(domain[idx]) * libra_concatenated_polynomial.evaluate(domain[idx]); + } + EXPECT_TRUE(inner_product == claimed_inner_product); + + // Check that batched polynomial is divisible by Z_H(X) + bool ipa_claim_consistency = true; + for (size_t idx = 0; idx < SUBGROUP_SIZE; idx++) { + ipa_claim_consistency = (batched_polynomial.evaluate(zk_sumcheck_data.interpolation_domain[idx]) == FF{ 0 }) && + ipa_claim_consistency; + } + EXPECT_EQ(ipa_claim_consistency, true); + + // Check that Z_H(X) * Q(X) = batched_polynomial + std::vector Z_H(SUBGROUP_SIZE + 1); + Z_H[0] = -FF(1); + Z_H[SUBGROUP_SIZE] = FF(1); + Polynomial product(batched_polynomial.size()); + + for (size_t i = 0; i < Z_H.size(); i++) { + for (size_t j = 0; j < batched_quotient.size(); j++) { + product.at(i + j) += Z_H[i] * batched_quotient.at(j); + } + } + bool quotient_is_correct = true; + for (const auto& [coeff_expected, coeff] : zip_view(product.coeffs(), batched_polynomial.coeffs())) { + quotient_is_correct = (coeff_expected == coeff) && quotient_is_correct; + } + EXPECT_EQ(quotient_is_correct, true); +} + +// Check the correctness of the evaluations of the challenge_polynomial, Lagrange first, and Lagrange last that the +// verifier has to compute on its own. Compare the values against the evaluations obtaned by applying Lagrange +// interpolation method used by Polynomial class constructor. +TYPED_TEST(SmallSubgroupIPATest, VerifierEvaluations) +{ + using FF = typename TypeParam::FF; + using Curve = typename TypeParam::Curve; + using SmallSubgroupIPA = SmallSubgroupIPAVerifier; + + // Extract the constants + static constexpr size_t SUBGROUP_SIZE = TypeParam::SUBGROUP_SIZE; + const FF subgroup_generator_inverse = Curve::subgroup_generator_inverse; + const FF subgroup_generator = subgroup_generator_inverse.invert(); + + // Sample random Lagrange coefficients over H + std::vector challenge_poly_lagrange = this->generate_random_vector(SUBGROUP_SIZE); + + // Evaluate Verifier's polynomials at the challenge + const FF vanishing_poly_eval = this->evaluation_challenge.pow(SUBGROUP_SIZE) - 1; + + // Compute required evaluations using efficient batch evaluation + const auto [challenge_poly_eval, lagrange_first, lagrange_last] = + SmallSubgroupIPA::compute_batched_barycentric_evaluations( + challenge_poly_lagrange, this->evaluation_challenge, subgroup_generator_inverse, vanishing_poly_eval); + + // Compute the evaluations differently, namely, using Lagrange interpolation + std::array interpolation_domain; + interpolation_domain[0] = FF(1); + for (size_t idx = 1; idx < SUBGROUP_SIZE; idx++) { + interpolation_domain[idx] = interpolation_domain[idx - 1] * subgroup_generator; + } + Polynomial challenge_poly_monomial = + Polynomial(interpolation_domain, challenge_poly_lagrange, SUBGROUP_SIZE); + + // Evaluate at the challenge + const FF challenge_poly_expected_eval = challenge_poly_monomial.evaluate(this->evaluation_challenge); + + EXPECT_EQ(challenge_poly_eval, challenge_poly_expected_eval); + + // Compute Lagrange polynomials using interpolation + std::vector lagrange_poly(SUBGROUP_SIZE); + lagrange_poly.at(0) = FF(1); + Polynomial lagrange_first_monomial = Polynomial(interpolation_domain, lagrange_poly, SUBGROUP_SIZE); + EXPECT_EQ(lagrange_first, lagrange_first_monomial.evaluate(this->evaluation_challenge)); + + lagrange_poly.at(0) = FF(0); + lagrange_poly.at(SUBGROUP_SIZE - 1) = FF(1); + Polynomial lagrange_last_monomial = Polynomial(interpolation_domain, lagrange_poly, SUBGROUP_SIZE); + EXPECT_EQ(lagrange_last, lagrange_last_monomial.evaluate(this->evaluation_challenge)); +} + +// Simulate the interaction between the prover and the verifier leading to the consistency check performed by the +// verifier. +TYPED_TEST(SmallSubgroupIPATest, ProverAndVerifierSimple) +{ + using FF = typename TypeParam::FF; + using Curve = typename TypeParam::Curve; + using Verifier = SmallSubgroupIPAVerifier; + using Prover = SmallSubgroupIPAProver; + using ZKData = ZKSumcheckData; + using CK = typename TypeParam::CommitmentKey; + + auto prover_transcript = TypeParam::Transcript::prover_init_empty(); + + // SmallSubgroupIPAProver requires at least CURVE::SUBGROUP_SIZE + 3 elements in the ck. + static constexpr size_t log_subgroup_size = static_cast(numeric::get_msb(Curve::SUBGROUP_SIZE)); + std::shared_ptr ck = + create_commitment_key(std::max(this->circuit_size, 1ULL << (log_subgroup_size + 1))); + + ZKData zk_sumcheck_data(this->log_circuit_size, prover_transcript, ck); + + std::vector multivariate_challenge = this->generate_random_vector(CONST_PROOF_SIZE_LOG_N); + + const FF claimed_inner_product = + Prover::compute_claimed_inner_product(zk_sumcheck_data, multivariate_challenge, this->log_circuit_size); + + Prover small_subgroup_ipa_prover = + Prover(zk_sumcheck_data, multivariate_challenge, claimed_inner_product, prover_transcript, ck); + + const std::array, NUM_LIBRA_EVALUATIONS> witness_polynomials = + small_subgroup_ipa_prover.get_witness_polynomials(); + + std::array libra_evaluations = { + witness_polynomials[0].evaluate(this->evaluation_challenge), + witness_polynomials[1].evaluate(this->evaluation_challenge * Curve::subgroup_generator), + witness_polynomials[2].evaluate(this->evaluation_challenge), + witness_polynomials[3].evaluate(this->evaluation_challenge) + }; + + bool consistency_checked = Verifier::check_evaluations_consistency( + libra_evaluations, this->evaluation_challenge, multivariate_challenge, claimed_inner_product); + + EXPECT_TRUE(consistency_checked); +} + +// Check that consistency check fails when some of the prover's data is corrupted. +TYPED_TEST(SmallSubgroupIPATest, ProverAndVerifierSimpleFailure) +{ + using FF = typename TypeParam::FF; + using Curve = typename TypeParam::Curve; + using Verifier = SmallSubgroupIPAVerifier; + using Prover = SmallSubgroupIPAProver; + using ZKData = ZKSumcheckData; + using CK = typename TypeParam::CommitmentKey; + + auto prover_transcript = TypeParam::Transcript::prover_init_empty(); + + // SmallSubgroupIPAProver requires at least CURVE::SUBGROUP_SIZE + 3 elements in the ck. + static constexpr size_t log_subgroup_size = static_cast(numeric::get_msb(Curve::SUBGROUP_SIZE)); + std::shared_ptr ck = + create_commitment_key(std::max(this->circuit_size, 1ULL << (log_subgroup_size + 1))); + + ZKData zk_sumcheck_data(this->log_circuit_size, prover_transcript, ck); + + std::vector multivariate_challenge = this->generate_random_vector(CONST_PROOF_SIZE_LOG_N); + + const FF claimed_inner_product = + Prover::compute_claimed_inner_product(zk_sumcheck_data, multivariate_challenge, this->log_circuit_size); + + Prover small_subgroup_ipa_prover = + Prover(zk_sumcheck_data, multivariate_challenge, claimed_inner_product, prover_transcript, ck); + + std::array, NUM_LIBRA_EVALUATIONS> witness_polynomials = + small_subgroup_ipa_prover.get_witness_polynomials(); + + // Tamper with witness polynomials + witness_polynomials[0].at(0) = FF::random_element(); + + std::array libra_evaluations = { + witness_polynomials[0].evaluate(this->evaluation_challenge), + witness_polynomials[1].evaluate(this->evaluation_challenge * Curve::subgroup_generator), + witness_polynomials[2].evaluate(this->evaluation_challenge), + witness_polynomials[3].evaluate(this->evaluation_challenge) + }; + + bool consistency_checked = Verifier::check_evaluations_consistency( + libra_evaluations, this->evaluation_challenge, multivariate_challenge, claimed_inner_product); + + // Since witness polynomials were modified, the consistency check must fail + EXPECT_FALSE(consistency_checked); +} + +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/instance_witness_generator.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/instance_witness_generator.hpp new file mode 100644 index 000000000000..f3b99924b9f8 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/instance_witness_generator.hpp @@ -0,0 +1,70 @@ +#pragma once + +#include "barretenberg/commitment_schemes/commitment_key.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "barretenberg/polynomials/polynomial.hpp" +#include "barretenberg/transcript/transcript.hpp" + +namespace bb { +/** + * @brief Constructs random polynomials, computes commitments and corresponding evaluations. + * + * @tparam Curve + */ +template struct InstanceWitnessGenerator { + public: + using CommitmentKey = bb::CommitmentKey; + using Fr = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + using Polynomial = bb::Polynomial; + + std::shared_ptr ck; + std::vector unshifted_polynomials; + std::vector to_be_shifted_polynomials; + std::vector const_size_mle_opening_point; + std::vector unshifted_commitments; + std::vector to_be_shifted_commitments; + std::vector unshifted_evals; + std::vector shifted_evals; + + InstanceWitnessGenerator(const size_t n, + const size_t num_polynomials, + const size_t num_shiftable, + const std::vector& mle_opening_point, + std::shared_ptr& commitment_key) + : ck(commitment_key) // Initialize the commitment key + , unshifted_polynomials(num_polynomials) + , to_be_shifted_polynomials(num_shiftable) + + { + construct_instance_and_witnesses(n, mle_opening_point); + } + + void construct_instance_and_witnesses(size_t n, const std::vector& mle_opening_point) + { + + const size_t num_unshifted = unshifted_polynomials.size() - to_be_shifted_polynomials.size(); + + // Constructs polynomials that are not shifted + for (size_t idx = 0; idx < num_unshifted; idx++) { + unshifted_polynomials[idx] = Polynomial::random(n); + unshifted_commitments.push_back(ck->commit(unshifted_polynomials[idx])); + unshifted_evals.push_back(unshifted_polynomials[idx].evaluate_mle(mle_opening_point)); + } + + // Constructs polynomials that are being shifted + size_t idx = num_unshifted; + for (auto& poly : to_be_shifted_polynomials) { + poly = Polynomial::random(n, /*shiftable*/ 1); + unshifted_polynomials[idx] = poly; + const Commitment comm = this->ck->commit(poly); + unshifted_commitments.push_back(comm); + to_be_shifted_commitments.push_back(comm); + unshifted_evals.push_back(poly.evaluate_mle(mle_opening_point)); + shifted_evals.push_back(poly.evaluate_mle(mle_opening_point, true)); + idx++; + } + } +}; + +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/test_settings.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/test_settings.hpp new file mode 100644 index 000000000000..5023d02a8f72 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/utils/test_settings.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include "barretenberg/commitment_schemes/commitment_key.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "barretenberg/transcript/transcript.hpp" + +namespace bb { +/** + * @brief Mock Flavors to use ZKSumcheckData and SmallSubgroupIPAProver in the PCS tests. + * + */ +class BN254Settings { + public: + using Curve = curve::BN254; + using CommitmentKey = bb::CommitmentKey; + using Transcript = NativeTranscript; + using FF = typename Curve::ScalarField; + static constexpr size_t SUBGROUP_SIZE = Curve::SUBGROUP_SIZE; +}; + +class GrumpkinSettings { + public: + using Curve = curve::Grumpkin; + using CommitmentKey = bb::CommitmentKey; + using Transcript = NativeTranscript; + using FF = typename Curve::ScalarField; + static constexpr size_t SUBGROUP_SIZE = Curve::SUBGROUP_SIZE; +}; +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp index 883f8bcb017b..3372899d730d 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp @@ -170,6 +170,9 @@ template class alignas(64) affine_ } Fq x; Fq y; + + // Note: this serialization from typescript does not support infinity. + MSGPACK_FIELDS(x, y); }; template diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp index 7855c9915137..a06cbfd8fb42 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/execution.cpp @@ -1,5 +1,6 @@ #include "barretenberg/vm/avm/trace/execution.hpp" #include "barretenberg/bb/log.hpp" +#include "barretenberg/common/log.hpp" #include "barretenberg/common/serialize.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/common/throw_or_abort.hpp" @@ -31,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -195,7 +197,7 @@ void show_trace_info(const auto& trace) std::string fullnesses; for (size_t j = i; j < i + 10 && j < column_stats.size(); j++) { const auto& stat = column_stats.at(j); - fullnesses += std::format("{:3}: {:3}% ", stat.column_number, stat.fullness); + fullnesses += format(std::setw(3), stat.column_number, ": ", std::setw(3), stat.fullness, "% "); } vinfo(fullnesses); } diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp index cd7e26908bd5..346ac3b059dc 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.cpp @@ -288,7 +288,8 @@ void AvmTraceBuilder::insert_private_revertible_state(const std::vector& sil for (size_t i = 0; i < siloed_note_hashes.size(); i++) { size_t note_index_in_tx = i + get_inserted_note_hashes_count(); - FF nonce = AvmMerkleTreeTraceBuilder::unconstrained_compute_note_hash_nonce(get_tx_hash(), note_index_in_tx); + FF nonce = + AvmMerkleTreeTraceBuilder::unconstrained_compute_note_hash_nonce(get_first_nullifier(), note_index_in_tx); unique_note_hashes.push_back( AvmMerkleTreeTraceBuilder::unconstrained_compute_unique_note_hash(nonce, siloed_note_hashes.at(i))); } @@ -3101,8 +3102,8 @@ AvmError AvmTraceBuilder::op_emit_note_hash(uint8_t indirect, uint32_t note_hash AppendTreeHint note_hash_write_hint = execution_hints.note_hash_write_hints.at(note_hash_write_counter++); FF siloed_note_hash = AvmMerkleTreeTraceBuilder::unconstrained_silo_note_hash( current_public_call_request.contract_address, row.main_ia); - FF nonce = - AvmMerkleTreeTraceBuilder::unconstrained_compute_note_hash_nonce(get_tx_hash(), inserted_note_hashes_count); + FF nonce = AvmMerkleTreeTraceBuilder::unconstrained_compute_note_hash_nonce(get_first_nullifier(), + inserted_note_hashes_count); FF unique_note_hash = AvmMerkleTreeTraceBuilder::unconstrained_compute_unique_note_hash(nonce, siloed_note_hash); ASSERT(unique_note_hash == note_hash_write_hint.leaf_value); diff --git a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp index 0ae4e06d904f..a2c750d633e6 100644 --- a/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp +++ b/barretenberg/cpp/src/barretenberg/vm/avm/trace/trace.hpp @@ -395,7 +395,7 @@ class AvmTraceBuilder { uint32_t get_inserted_note_hashes_count(); uint32_t get_inserted_nullifiers_count(); uint32_t get_public_data_writes_count(); - FF get_tx_hash() const { return public_inputs.previous_non_revertible_accumulated_data.nullifiers[0]; } + FF get_first_nullifier() const { return public_inputs.previous_non_revertible_accumulated_data.nullifiers[0]; } // TODO: remove these once everything is constrained. AvmMemoryTag unconstrained_get_memory_tag(AddressWithMode addr); diff --git a/barretenberg/cpp/src/barretenberg/vm2/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/vm2/CMakeLists.txt new file mode 100644 index 000000000000..8084b7b8306e --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/vm2/CMakeLists.txt @@ -0,0 +1,3 @@ +if(NOT DISABLE_AZTEC_VM) + barretenberg_module(vm2 sumcheck stdlib_honk_verifier) +endif() \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/vm2/avm_api.cpp b/barretenberg/cpp/src/barretenberg/vm2/avm_api.cpp new file mode 100644 index 000000000000..8dddc62058d6 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/vm2/avm_api.cpp @@ -0,0 +1,58 @@ +#include "barretenberg/vm2/avm_api.hpp" + +#include "barretenberg/vm/stats.hpp" +#include "barretenberg/vm2/proving_helper.hpp" +#include "barretenberg/vm2/simulation_helper.hpp" +#include "barretenberg/vm2/tracegen_helper.hpp" + +namespace bb::avm2 { + +using namespace bb::avm2::simulation; + +std::pair AvmAPI::prove(const AvmAPI::ProvingInputs& inputs) +{ + // Simulate. + info("Simulating..."); + AvmSimulationHelper simulation_helper(inputs); + auto events = AVM_TRACK_TIME_V("simulation/all", simulation_helper.simulate()); + + // Generate trace. + info("Generating trace..."); + AvmTraceGenHelper tracegen_helper; + auto trace = AVM_TRACK_TIME_V("tracegen/all", tracegen_helper.generate_trace(std::move(events))); + + // Prove. + info("Proving..."); + AvmProvingHelper proving_helper; + auto [proof, vk] = AVM_TRACK_TIME_V("proving/all", proving_helper.prove(std::move(trace))); + + info("Done!"); + return { std::move(proof), std::move(vk) }; +} + +bool AvmAPI::check_circuit(const AvmAPI::ProvingInputs& inputs) +{ + // Simulate. + info("Simulating..."); + AvmSimulationHelper simulation_helper(inputs); + auto events = AVM_TRACK_TIME_V("simulation/all", simulation_helper.simulate()); + + // Generate trace. + info("Generating trace..."); + AvmTraceGenHelper tracegen_helper; + auto trace = AVM_TRACK_TIME_V("tracegen/all", tracegen_helper.generate_trace(std::move(events))); + + // Check circuit. + info("Checking circuit..."); + AvmProvingHelper proving_helper; + return proving_helper.check_circuit(std::move(trace)); +} + +bool AvmAPI::verify(const AvmProof& proof, const PublicInputs& pi, const AvmVerificationKey& vk_data) +{ + info("Verifying..."); + AvmProvingHelper proving_helper; + return AVM_TRACK_TIME_V("verifing/all", proving_helper.verify(proof, pi, vk_data)); +} + +} // namespace bb::avm2 \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/vm2/avm_api.hpp b/barretenberg/cpp/src/barretenberg/vm2/avm_api.hpp new file mode 100644 index 000000000000..9e94e922c5a9 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/vm2/avm_api.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include + +#include "barretenberg/vm2/common/avm_inputs.hpp" +#include "barretenberg/vm2/proving_helper.hpp" + +namespace bb::avm2 { + +class AvmAPI { + public: + using AvmProof = AvmProvingHelper::Proof; + using AvmVerificationKey = std::vector; + using ProvingInputs = AvmProvingInputs; + + AvmAPI() = default; + + // NOTE: The public inputs are NOT part of the proof. + std::pair prove(const ProvingInputs& inputs); + bool check_circuit(const ProvingInputs& inputs); + bool verify(const AvmProof& proof, const PublicInputs& pi, const AvmVerificationKey& vk_data); +}; + +} // namespace bb::avm2 \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/vm2/common/ankerl_map.hpp b/barretenberg/cpp/src/barretenberg/vm2/common/ankerl_map.hpp new file mode 100644 index 000000000000..6342ce0171ad --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/vm2/common/ankerl_map.hpp @@ -0,0 +1,2126 @@ +///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// + +// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. +// Version 4.5.0 +// https://github.com/martinus/unordered_dense +// +// Licensed under the MIT License . +// SPDX-License-Identifier: MIT +// Copyright (c) 2022-2024 Martin Leitner-Ankerl +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ANKERL_UNORDERED_DENSE_H +#define ANKERL_UNORDERED_DENSE_H + +// see https://semver.org/spec/v2.0.0.html +#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes +#define ANKERL_UNORDERED_DENSE_VERSION_MINOR \ + 5 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality +#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes + +// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/ + +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) \ + ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) +#define ANKERL_UNORDERED_DENSE_NAMESPACE \ + ANKERL_UNORDERED_DENSE_VERSION_CONCAT(ANKERL_UNORDERED_DENSE_VERSION_MAJOR, \ + ANKERL_UNORDERED_DENSE_VERSION_MINOR, \ + ANKERL_UNORDERED_DENSE_VERSION_PATCH) + +#if defined(_MSVC_LANG) +#define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG +#else +#define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus +#endif + +#if defined(__GNUC__) +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__)) +#elif defined(_MSC_VER) +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop)) +#endif + +// exceptions +#if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND) +#define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 1 // NOLINT(cppcoreguidelines-macro-usage) +#else +#define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 0 // NOLINT(cppcoreguidelines-macro-usage) +#endif +#ifdef _MSC_VER +#define ANKERL_UNORDERED_DENSE_NOINLINE __declspec(noinline) +#else +#define ANKERL_UNORDERED_DENSE_NOINLINE __attribute__((noinline)) +#endif + +// defined in unordered_dense.cpp +#if !defined(ANKERL_UNORDERED_DENSE_EXPORT) +#define ANKERL_UNORDERED_DENSE_EXPORT +#endif + +#if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L +#error ankerl::unordered_dense requires C++17 or higher +#else +#include // for array +#include // for uint64_t, uint32_t, uint8_t, UINT64_C +#include // for size_t, memcpy, memset +#include // for equal_to, hash +#include // for initializer_list +#include // for pair, distance +#include // for numeric_limits +#include // for allocator, allocator_traits, shared_ptr +#include // for optional +#include // for out_of_range +#include // for basic_string +#include // for basic_string_view, hash +#include // for forward_as_tuple +#include // for enable_if_t, declval, conditional_t, ena... +#include // for forward, exchange, pair, as_const, piece... +#include // for vector +#if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0 +#include // for abort +#endif + +#if defined(__has_include) && !defined(ANKERL_UNORDERED_DENSE_DISABLE_PMR) +#if __has_include() +#define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage) +#include // for polymorphic_allocator +#elif __has_include() +#define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage) +#include // for polymorphic_allocator +#endif +#endif + +#if defined(_MSC_VER) && defined(_M_X64) +#include +#pragma intrinsic(_umul128) +#endif + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) +#define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage) +#define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage) +#else +#define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage) +#define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage) +#endif + +namespace ankerl::unordered_dense { +inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE { + +namespace detail { + +#if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() + +// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other +// inlinings more difficult. Throws are also generally the slow path. +[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_key_not_found() +{ + throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found"); +} +[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_bucket_overflow() +{ + throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size"); +} +[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_too_many_elements() +{ + throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements"); +} + +#else + +[[noreturn]] inline void on_error_key_not_found() +{ + abort(); +} +[[noreturn]] inline void on_error_bucket_overflow() +{ + abort(); +} +[[noreturn]] inline void on_error_too_many_elements() +{ + abort(); +} + +#endif + +} // namespace detail + +// hash /////////////////////////////////////////////////////////////////////// + +// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash +// No big-endian support (because different values on different machines don't matter), +// hardcodes seed and the secret, reformats the code, and clang-tidy fixes. +namespace detail::wyhash { + +inline void mum(uint64_t* a, uint64_t* b) +{ +#if defined(__SIZEOF_INT128__) + __uint128_t r = *a; + r *= *b; + *a = static_cast(r); + *b = static_cast(r >> 64U); +#elif defined(_MSC_VER) && defined(_M_X64) + *a = _umul128(*a, *b, b); +#else + uint64_t ha = *a >> 32U; + uint64_t hb = *b >> 32U; + uint64_t la = static_cast(*a); + uint64_t lb = static_cast(*b); + uint64_t hi{}; + uint64_t lo{}; + uint64_t rh = ha * hb; + uint64_t rm0 = ha * lb; + uint64_t rm1 = hb * la; + uint64_t rl = la * lb; + uint64_t t = rl + (rm0 << 32U); + auto c = static_cast(t < rl); + lo = t + (rm1 << 32U); + c += static_cast(lo < t); + hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c; + *a = lo; + *b = hi; +#endif +} + +// multiply and xor mix function, aka MUM +[[nodiscard]] inline auto mix(uint64_t a, uint64_t b) -> uint64_t +{ + mum(&a, &b); + return a ^ b; +} + +// read functions. WARNING: we don't care about endianness, so results are different on big endian! +[[nodiscard]] inline auto r8(const uint8_t* p) -> uint64_t +{ + uint64_t v{}; + std::memcpy(&v, p, 8U); + return v; +} + +[[nodiscard]] inline auto r4(const uint8_t* p) -> uint64_t +{ + uint32_t v{}; + std::memcpy(&v, p, 4); + return v; +} + +// reads 1, 2, or 3 bytes +[[nodiscard]] inline auto r3(const uint8_t* p, size_t k) -> uint64_t +{ + return (static_cast(p[0]) << 16U) | (static_cast(p[k >> 1U]) << 8U) | p[k - 1]; +} + +[[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, size_t len) -> uint64_t +{ + static constexpr auto secret = std::array{ UINT64_C(0xa0761d6478bd642f), + UINT64_C(0xe7037ed1a0b428db), + UINT64_C(0x8ebc6af09c88c6e3), + UINT64_C(0x589965cc75374cc3) }; + + auto const* p = static_cast(key); + uint64_t seed = secret[0]; + uint64_t a{}; + uint64_t b{}; + if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) { + if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) { + a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U)); + b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U)); + } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) { + a = r3(p, len); + b = 0; + } else { + a = 0; + b = 0; + } + } else { + size_t i = len; + if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) { + uint64_t see1 = seed; + uint64_t see2 = seed; + do { + seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); + see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1); + see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2); + p += 48; + i -= 48; + } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48)); + seed ^= see1 ^ see2; + } + while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) { + seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); + i -= 16; + p += 16; + } + a = r8(p + i - 16); + b = r8(p + i - 8); + } + + return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed)); +} + +[[nodiscard]] inline auto hash(uint64_t x) -> uint64_t +{ + return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15)); +} + +} // namespace detail::wyhash + +ANKERL_UNORDERED_DENSE_EXPORT template struct hash { + auto operator()(T const& obj) const + noexcept(noexcept(std::declval>().operator()(std::declval()))) -> uint64_t + { + return std::hash{}(obj); + } +}; + +template struct hash::is_avalanching> { + using is_avalanching = void; + auto operator()(T const& obj) const + noexcept(noexcept(std::declval>().operator()(std::declval()))) -> uint64_t + { + return std::hash{}(obj); + } +}; + +template struct hash> { + using is_avalanching = void; + auto operator()(std::basic_string const& str) const noexcept -> uint64_t + { + return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size()); + } +}; + +template struct hash> { + using is_avalanching = void; + auto operator()(std::basic_string_view const& sv) const noexcept -> uint64_t + { + return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size()); + } +}; + +template struct hash { + using is_avalanching = void; + auto operator()(T* ptr) const noexcept -> uint64_t + { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) + return detail::wyhash::hash(reinterpret_cast(ptr)); + } +}; + +template struct hash> { + using is_avalanching = void; + auto operator()(std::unique_ptr const& ptr) const noexcept -> uint64_t + { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) + return detail::wyhash::hash(reinterpret_cast(ptr.get())); + } +}; + +template struct hash> { + using is_avalanching = void; + auto operator()(std::shared_ptr const& ptr) const noexcept -> uint64_t + { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) + return detail::wyhash::hash(reinterpret_cast(ptr.get())); + } +}; + +template struct hash::value>::type> { + using is_avalanching = void; + auto operator()(Enum e) const noexcept -> uint64_t + { + using underlying = typename std::underlying_type_t; + return detail::wyhash::hash(static_cast(e)); + } +}; + +template struct tuple_hash_helper { + // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest. + // If it isn't an integral we need to hash it. + template [[nodiscard]] constexpr static auto to64(Arg const& arg) -> uint64_t + { + if constexpr (std::is_integral_v || std::is_enum_v) { + return static_cast(arg); + } else { + return hash{}(arg); + } + } + + [[nodiscard]] static auto mix64(uint64_t state, uint64_t v) -> uint64_t + { + return detail::wyhash::mix(state + v, uint64_t{ 0x9ddfea08eb382d69 }); + } + + // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. + // If not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is + // optimized away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer. + template + [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence) noexcept -> uint64_t + { + auto h = uint64_t{}; + ((h = mix64(h, to64(std::get(t)))), ...); + return h; + } +}; + +template struct hash> : tuple_hash_helper { + using is_avalanching = void; + auto operator()(std::tuple const& t) const noexcept -> uint64_t + { + return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); + } +}; + +template struct hash> : tuple_hash_helper { + using is_avalanching = void; + auto operator()(std::pair const& t) const noexcept -> uint64_t + { + return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); + } +}; + +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ + template <> struct hash { \ + using is_avalanching = void; \ + auto operator()(T const& obj) const noexcept -> uint64_t \ + { \ + return detail::wyhash::hash(static_cast(obj)); \ + } \ + } + +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuseless-cast" +#endif +// see https://en.cppreference.com/w/cpp/utility/hash +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char); +#if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L && defined(__cpp_char8_t) +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t); +#endif +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long); + +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif + +// bucket_type ////////////////////////////////////////////////////////// + +namespace bucket_type { + +struct standard { + static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint + static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint + + uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash + uint32_t m_value_idx; // index into the m_values vector. +}; + +ANKERL_UNORDERED_DENSE_PACK(struct big { + static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint + static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint + + uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash + size_t m_value_idx; // index into the m_values vector. +}); + +} // namespace bucket_type + +namespace detail { + +struct nonesuch {}; +struct default_container_t {}; + +template class Op, class... Args> struct detector { + using value_t = std::false_type; + using type = Default; +}; + +template class Op, class... Args> +struct detector>, Op, Args...> { + using value_t = std::true_type; + using type = Op; +}; + +template