diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b87359aec88..69b197376db 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,7 +1,7 @@ { - ".": "0.65.0", + ".": "0.65.1", "yarn-project/cli": "0.35.1", - "yarn-project/aztec": "0.65.0", - "barretenberg": "0.65.0", - "barretenberg/ts": "0.65.0" + "yarn-project/aztec": "0.65.1", + "barretenberg": "0.65.1", + "barretenberg/ts": "0.65.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d195810fc47..19379f1aa57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,47 @@ # Changelog +## [0.65.1](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.65.0...aztec-packages-v0.65.1) (2024-11-27) + + +### Features + +* Add total mana used to header ([#9868](https://github.com/AztecProtocol/aztec-packages/issues/9868)) ([2478d19](https://github.com/AztecProtocol/aztec-packages/commit/2478d1909db2d79cc0cdd3063dc2ac4e1eaedce3)) +* Assert metrics in network tests ([#10215](https://github.com/AztecProtocol/aztec-packages/issues/10215)) ([9380c0f](https://github.com/AztecProtocol/aztec-packages/commit/9380c0f68bc01722b60539034a9f064606e1b119)) +* Avm inserts nullifiers from private ([#10129](https://github.com/AztecProtocol/aztec-packages/issues/10129)) ([3fc0c7c](https://github.com/AztecProtocol/aztec-packages/commit/3fc0c7c7d4b6b4052d185dbb795a7fe3d724f09f)) +* Burn congestion fee ([#10231](https://github.com/AztecProtocol/aztec-packages/issues/10231)) ([20a33f2](https://github.com/AztecProtocol/aztec-packages/commit/20a33f2d097d7fd3bd67eabf2d2254b43d5723d0)) +* Configure world state block history ([#10216](https://github.com/AztecProtocol/aztec-packages/issues/10216)) ([01eb392](https://github.com/AztecProtocol/aztec-packages/commit/01eb392f15995f344e40aa8f8e41a28f6f5b825d)) +* Full IPA Recursive Verifier ([#10189](https://github.com/AztecProtocol/aztec-packages/issues/10189)) ([b5783d3](https://github.com/AztecProtocol/aztec-packages/commit/b5783d3945959056d24aa3d988e9ca9efd3ec224)) +* Integrate fee into rollup ([#10176](https://github.com/AztecProtocol/aztec-packages/issues/10176)) ([12744d6](https://github.com/AztecProtocol/aztec-packages/commit/12744d6bd9ca6f4c4c1ef43ddd919e81cffb7a17)) +* Speed up transaction execution ([#10172](https://github.com/AztecProtocol/aztec-packages/issues/10172)) ([da265b6](https://github.com/AztecProtocol/aztec-packages/commit/da265b6b7d61a0d991fa23bd044f711513a0e86c)) +* Using current gas prices in cli-wallet ([#10105](https://github.com/AztecProtocol/aztec-packages/issues/10105)) ([15ffeea](https://github.com/AztecProtocol/aztec-packages/commit/15ffeea8ef47b619f9922793be7e3380964297a3)) + + +### Bug Fixes + +* Add pako as a dependency in bb.js ([#10186](https://github.com/AztecProtocol/aztec-packages/issues/10186)) ([b773c14](https://github.com/AztecProtocol/aztec-packages/commit/b773c14a8fe8bf425dc755b3a156e500e9924c1e)) +* **avm:** Execution test ordering ([#10226](https://github.com/AztecProtocol/aztec-packages/issues/10226)) ([49b4a6c](https://github.com/AztecProtocol/aztec-packages/commit/49b4a6c07f39711ad2a0477e1fad11e11b8ee23c)) +* Deploy preview master ([#10227](https://github.com/AztecProtocol/aztec-packages/issues/10227)) ([321a175](https://github.com/AztecProtocol/aztec-packages/commit/321a17531eb5d440f2726ff32bc6e157a732a8ed)) +* Docs preview on master ([#10254](https://github.com/AztecProtocol/aztec-packages/issues/10254)) ([37684cc](https://github.com/AztecProtocol/aztec-packages/commit/37684ccc686c04c4f2e069eee9e4c356e891a864)) +* Flamegraph script (and enable > 1 circuit) ([#10065](https://github.com/AztecProtocol/aztec-packages/issues/10065)) ([0c3b7ef](https://github.com/AztecProtocol/aztec-packages/commit/0c3b7ef956774056d3ff51a52117b6656036d21b)) +* Use current base fee for public fee payment ([#10230](https://github.com/AztecProtocol/aztec-packages/issues/10230)) ([f081d80](https://github.com/AztecProtocol/aztec-packages/commit/f081d8013ce37a2109750424d1ed615411d9056a)) + + +### Miscellaneous + +* Add traces and histograms to avm simulator ([#10233](https://github.com/AztecProtocol/aztec-packages/issues/10233)) ([e83726d](https://github.com/AztecProtocol/aztec-packages/commit/e83726dddbc7ea98c86b99a7439e39f076a63b25)), closes [#10146](https://github.com/AztecProtocol/aztec-packages/issues/10146) +* Avm-proving and avm-integration tests do not require simulator to export function with jest mocks ([#10228](https://github.com/AztecProtocol/aztec-packages/issues/10228)) ([f28fcdb](https://github.com/AztecProtocol/aztec-packages/commit/f28fcdb1e41aa353f0fdc2233ea66ae51ef745a4)) +* **avm:** Handle parsing error ([#10203](https://github.com/AztecProtocol/aztec-packages/issues/10203)) ([3c623fc](https://github.com/AztecProtocol/aztec-packages/commit/3c623fc2d857d6792b557dc7d1ccb929274046bb)), closes [#9770](https://github.com/AztecProtocol/aztec-packages/issues/9770) +* **avm:** Zero initialization in avm public inputs and execution test fixes ([#10238](https://github.com/AztecProtocol/aztec-packages/issues/10238)) ([0c7c4c9](https://github.com/AztecProtocol/aztec-packages/commit/0c7c4c9bb0c01067abe57ccd06962d71c7279aa0)) +* Bump timeout for after-hook for data store test again ([#10240](https://github.com/AztecProtocol/aztec-packages/issues/10240)) ([52047f0](https://github.com/AztecProtocol/aztec-packages/commit/52047f05495ef95a778e8669fc4e115cacb590a0)) +* CIVC VK ([#10223](https://github.com/AztecProtocol/aztec-packages/issues/10223)) ([089c34c](https://github.com/AztecProtocol/aztec-packages/commit/089c34cc3e9fb5cb493096246525c2205e646204)) +* Declare global types ([#10206](https://github.com/AztecProtocol/aztec-packages/issues/10206)) ([7b2e343](https://github.com/AztecProtocol/aztec-packages/commit/7b2e343a61eb9c74f365758530deca87b40891d0)) +* Delete old serialization methods ([#9951](https://github.com/AztecProtocol/aztec-packages/issues/9951)) ([10d3f6f](https://github.com/AztecProtocol/aztec-packages/commit/10d3f6fe851dc73f5f12edec26b028fe526f0be6)) +* Fix migration notes ([#10252](https://github.com/AztecProtocol/aztec-packages/issues/10252)) ([05bdcd5](https://github.com/AztecProtocol/aztec-packages/commit/05bdcd51d45f35a3ed683c1a90bb8e9370533fb0)) +* Pull out some sync changes ([#10245](https://github.com/AztecProtocol/aztec-packages/issues/10245)) ([1bfc15e](https://github.com/AztecProtocol/aztec-packages/commit/1bfc15e08873a1f0f3743e259f418b70426b3f25)) +* Remove docs from sync ([#10241](https://github.com/AztecProtocol/aztec-packages/issues/10241)) ([eeea0aa](https://github.com/AztecProtocol/aztec-packages/commit/eeea0aade045bfba73ee1e6458d5815163f55dd6)) +* Replace relative paths to noir-protocol-circuits ([e7690ca](https://github.com/AztecProtocol/aztec-packages/commit/e7690ca2e441ca71f8a02d39ed5fb2c7e9ba533d)) +* Stop tracing and limiting read requests in avm ([#10220](https://github.com/AztecProtocol/aztec-packages/issues/10220)) ([7d5c33d](https://github.com/AztecProtocol/aztec-packages/commit/7d5c33d1f046e1b8b3f367ff1682b9fd6272e2fd)) + ## [0.65.0](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.64.0...aztec-packages-v0.65.0) (2024-11-26) diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index ac728011d0f..0072026244a 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = 515858041cc07a84ffd7dd15c8ae6eea502914d7 - parent = f081d8013ce37a2109750424d1ed615411d9056a + commit = f61941ed102bef94fd78470362b4345ecd535bab + parent = 3392629818e6d51c01ca4c75c1ad916bb4b4fdb1 method = merge cmdver = 0.4.6 diff --git a/barretenberg/CHANGELOG.md b/barretenberg/CHANGELOG.md index 31983109118..50162ad8d1a 100644 --- a/barretenberg/CHANGELOG.md +++ b/barretenberg/CHANGELOG.md @@ -1,5 +1,28 @@ # Changelog +## [0.65.1](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.65.0...barretenberg-v0.65.1) (2024-11-27) + + +### Features + +* Add total mana used to header ([#9868](https://github.com/AztecProtocol/aztec-packages/issues/9868)) ([2478d19](https://github.com/AztecProtocol/aztec-packages/commit/2478d1909db2d79cc0cdd3063dc2ac4e1eaedce3)) +* Configure world state block history ([#10216](https://github.com/AztecProtocol/aztec-packages/issues/10216)) ([01eb392](https://github.com/AztecProtocol/aztec-packages/commit/01eb392f15995f344e40aa8f8e41a28f6f5b825d)) +* Full IPA Recursive Verifier ([#10189](https://github.com/AztecProtocol/aztec-packages/issues/10189)) ([b5783d3](https://github.com/AztecProtocol/aztec-packages/commit/b5783d3945959056d24aa3d988e9ca9efd3ec224)) +* Speed up transaction execution ([#10172](https://github.com/AztecProtocol/aztec-packages/issues/10172)) ([da265b6](https://github.com/AztecProtocol/aztec-packages/commit/da265b6b7d61a0d991fa23bd044f711513a0e86c)) + + +### Bug Fixes + +* **avm:** Execution test ordering ([#10226](https://github.com/AztecProtocol/aztec-packages/issues/10226)) ([49b4a6c](https://github.com/AztecProtocol/aztec-packages/commit/49b4a6c07f39711ad2a0477e1fad11e11b8ee23c)) + + +### Miscellaneous + +* **avm:** Handle parsing error ([#10203](https://github.com/AztecProtocol/aztec-packages/issues/10203)) ([3c623fc](https://github.com/AztecProtocol/aztec-packages/commit/3c623fc2d857d6792b557dc7d1ccb929274046bb)), closes [#9770](https://github.com/AztecProtocol/aztec-packages/issues/9770) +* **avm:** Zero initialization in avm public inputs and execution test fixes ([#10238](https://github.com/AztecProtocol/aztec-packages/issues/10238)) ([0c7c4c9](https://github.com/AztecProtocol/aztec-packages/commit/0c7c4c9bb0c01067abe57ccd06962d71c7279aa0)) +* CIVC VK ([#10223](https://github.com/AztecProtocol/aztec-packages/issues/10223)) ([089c34c](https://github.com/AztecProtocol/aztec-packages/commit/089c34cc3e9fb5cb493096246525c2205e646204)) +* Pull out some sync changes ([#10245](https://github.com/AztecProtocol/aztec-packages/issues/10245)) ([1bfc15e](https://github.com/AztecProtocol/aztec-packages/commit/1bfc15e08873a1f0f3743e259f418b70426b3f25)) + ## [0.65.0](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.64.0...barretenberg-v0.65.0) (2024-11-26) diff --git a/barretenberg/cpp/CMakeLists.txt b/barretenberg/cpp/CMakeLists.txt index bb9dcc227c7..06a8497d636 100644 --- a/barretenberg/cpp/CMakeLists.txt +++ b/barretenberg/cpp/CMakeLists.txt @@ -6,7 +6,7 @@ cmake_minimum_required(VERSION 3.24 FATAL_ERROR) project( Barretenberg DESCRIPTION "BN254 elliptic curve library, and PLONK SNARK prover" - VERSION 0.65.0 # x-release-please-version + VERSION 0.65.1 # x-release-please-version LANGUAGES CXX C ) # Insert version into `bb` config file diff --git a/barretenberg/cpp/src/barretenberg/bb/main.cpp b/barretenberg/cpp/src/barretenberg/bb/main.cpp index 45e9afdcc0d..72c6c44fff0 100644 --- a/barretenberg/cpp/src/barretenberg/bb/main.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/main.cpp @@ -136,7 +136,7 @@ std::string honk_vk_to_json(std::vector& data) } /** - * @brief Proves and Verifies an ACIR circuit + * @brief Proves and verifies an ACIR circuit * * Communication: * - proc_exit: A boolean value is returned indicating whether the proof is valid. @@ -332,7 +332,6 @@ void client_ivc_prove_output_all_msgpack(const std::string& bytecodePath, using Program = acir_format::AcirProgram; using ECCVMVK = ECCVMFlavor::VerificationKey; using TranslatorVK = TranslatorFlavor::VerificationKey; - using DeciderVerificationKey = ClientIVC::DeciderVerificationKey; using namespace acir_format; @@ -378,23 +377,17 @@ void client_ivc_prove_output_all_msgpack(const std::string& bytecodePath, // Write the proof and verification keys into the working directory in 'binary' format (in practice it seems this // directory is passed by bb.js) - std::string vkPath = outputDir + "/mega_vk"; // the vk of the last circuit in the stack + std::string vkPath = outputDir + "/client_ivc_vk"; // the vk of the last circuit in the stack std::string proofPath = outputDir + "/client_ivc_proof"; - std::string translatorVkPath = outputDir + "/translator_vk"; - std::string eccVkPath = outputDir + "/ecc_vk"; auto proof = ivc.prove(); auto eccvm_vk = std::make_shared(ivc.goblin.get_eccvm_proving_key()); auto translator_vk = std::make_shared(ivc.goblin.get_translator_proving_key()); - - auto last_vk = std::make_shared(ivc.honk_vk); vinfo("ensure valid proof: ", ivc.verify(proof)); vinfo("write proof and vk data to files.."); write_file(proofPath, to_buffer(proof)); - write_file(vkPath, to_buffer(ivc.honk_vk)); - write_file(translatorVkPath, to_buffer(translator_vk)); - write_file(eccVkPath, to_buffer(eccvm_vk)); + write_file(vkPath, to_buffer(ClientIVC::VerificationKey{ ivc.honk_vk, eccvm_vk, translator_vk })); } template std::shared_ptr read_to_shared_ptr(const std::filesystem::path& path) @@ -414,24 +407,20 @@ template std::shared_ptr read_to_shared_ptr(const std::filesyste * @param accumualtor_path Path to the file containing the serialized protogalaxy accumulator * @return true (resp., false) if the proof is valid (resp., invalid). */ -bool verify_client_ivc(const std::filesystem::path& proof_path, - const std::filesystem::path& mega_vk, - const std::filesystem::path& eccvm_vk_path, - const std::filesystem::path& translator_vk_path) +bool verify_client_ivc(const std::filesystem::path& proof_path, const std::filesystem::path& vk_path) { init_bn254_crs(1); init_grumpkin_crs(1 << 15); const auto proof = from_buffer(read_file(proof_path)); - const auto final_vk = read_to_shared_ptr(mega_vk); - final_vk->pcs_verification_key = std::make_shared>(); - - const auto eccvm_vk = read_to_shared_ptr(eccvm_vk_path); - eccvm_vk->pcs_verification_key = - std::make_shared>(eccvm_vk->circuit_size + 1); - const auto translator_vk = read_to_shared_ptr(translator_vk_path); - translator_vk->pcs_verification_key = std::make_shared>(); - const bool verified = ClientIVC::verify(proof, final_vk, eccvm_vk, translator_vk); + const auto vk = from_buffer(read_file(vk_path)); + + vk.mega->pcs_verification_key = std::make_shared>(); + vk.eccvm->pcs_verification_key = + std::make_shared>(vk.eccvm->circuit_size + 1); + vk.translator->pcs_verification_key = std::make_shared>(); + + const bool verified = ClientIVC::verify(proof, vk); vinfo("verified: ", verified); return verified; } @@ -519,10 +508,8 @@ void client_ivc_prove_output_all(const std::string& bytecodePath, // Write the proof and verification keys into the working directory in 'binary' format (in practice it seems this // directory is passed by bb.js) - std::string vkPath = outputPath + "/mega_vk"; // the vk of the last circuit in the stack + std::string vkPath = outputPath + "/client_ivc_vk"; // the vk of the last circuit in the stack std::string proofPath = outputPath + "/client_ivc_proof"; - std::string translatorVkPath = outputPath + "/translator_vk"; - std::string eccVkPath = outputPath + "/ecc_vk"; auto proof = ivc.prove(); auto eccvm_vk = std::make_shared(ivc.goblin.get_eccvm_proving_key()); @@ -531,9 +518,7 @@ void client_ivc_prove_output_all(const std::string& bytecodePath, vinfo("write proof and vk data to files.."); write_file(proofPath, to_buffer(proof)); - write_file(vkPath, to_buffer(ivc.honk_vk)); // maybe dereference - write_file(translatorVkPath, to_buffer(translator_vk)); - write_file(eccVkPath, to_buffer(eccvm_vk)); + write_file(vkPath, to_buffer(ClientIVC::VerificationKey{ ivc.honk_vk, eccvm_vk, translator_vk })); } /** @@ -544,19 +529,15 @@ void client_ivc_prove_output_all(const std::string& bytecodePath, */ void prove_tube(const std::string& output_path) { - using ClientIVC = stdlib::recursion::honk::ClientIVCRecursiveVerifier; - using StackHonkVK = typename MegaFlavor::VerificationKey; - using ECCVMVk = ECCVMFlavor::VerificationKey; - using TranslatorVk = TranslatorFlavor::VerificationKey; - using GoblinVerifierInput = ClientIVC::GoblinVerifierInput; - using VerifierInput = ClientIVC::VerifierInput; + using namespace stdlib::recursion::honk; + + using GoblinVerifierInput = ClientIVCRecursiveVerifier::GoblinVerifierInput; + using VerifierInput = ClientIVCRecursiveVerifier::VerifierInput; using Builder = UltraCircuitBuilder; using GrumpkinVk = bb::VerifierCommitmentKey; - std::string vkPath = output_path + "/mega_vk"; // the vk of the last circuit in the stack + std::string vkPath = output_path + "/client_ivc_vk"; // the vk of the last circuit in the stack std::string proofPath = output_path + "/client_ivc_proof"; - std::string translatorVkPath = output_path + "/translator_vk"; - std::string eccVkPath = output_path + "/ecc_vk"; // Note: this could be decreased once we optimise the size of the ClientIVC recursiveve rifier init_bn254_crs(1 << 25); @@ -564,17 +545,15 @@ void prove_tube(const std::string& output_path) // Read the proof and verification data from given files auto proof = from_buffer(read_file(proofPath)); - std::shared_ptr mega_vk = std::make_shared(from_buffer(read_file(vkPath))); - std::shared_ptr translator_vk = - std::make_shared(from_buffer(read_file(translatorVkPath))); - std::shared_ptr eccvm_vk = std::make_shared(from_buffer(read_file(eccVkPath))); + auto vk = from_buffer(read_file(vkPath)); + // We don't serialise and deserialise the Grumkin SRS so initialise with circuit_size + 1 to be able to recursively // IPA. The + 1 is to satisfy IPA verification key requirements. // TODO(https://github.com/AztecProtocol/barretenberg/issues/1025) - eccvm_vk->pcs_verification_key = std::make_shared(eccvm_vk->circuit_size + 1); + vk.eccvm->pcs_verification_key = std::make_shared(vk.eccvm->circuit_size + 1); - GoblinVerifierInput goblin_verifier_input{ eccvm_vk, translator_vk }; - VerifierInput input{ mega_vk, goblin_verifier_input }; + GoblinVerifierInput goblin_verifier_input{ vk.eccvm, vk.translator }; + VerifierInput input{ vk.mega, goblin_verifier_input }; auto builder = std::make_shared(); // Preserve the public inputs that should be passed to the base rollup by making them public inputs to the tube @@ -588,9 +567,9 @@ void prove_tube(const std::string& output_path) auto offset = bb::HONK_PROOF_PUBLIC_INPUT_OFFSET; builder->add_public_variable(proof.mega_proof[i + offset]); } - ClientIVC verifier{ builder, input }; + ClientIVCRecursiveVerifier verifier{ builder, input }; - ClientIVC::Output client_ivc_rec_verifier_output = verifier.verify(proof); + ClientIVCRecursiveVerifier::Output client_ivc_rec_verifier_output = verifier.verify(proof); PairingPointAccumulatorIndices current_aggregation_object = stdlib::recursion::init_default_agg_obj_indices(*builder); @@ -1468,12 +1447,10 @@ int main(int argc, char* argv[]) } if (command == "verify_client_ivc") { std::filesystem::path output_dir = get_option(args, "-o", "./target"); - std::filesystem::path client_ivc_proof_path = output_dir / "client_ivc_proof"; - std::filesystem::path mega_vk_path = output_dir / "mega_vk"; - std::filesystem::path eccvm_vk_path = output_dir / "ecc_vk"; - std::filesystem::path translator_vk_path = output_dir / "translator_vk"; + std::filesystem::path proof_path = output_dir / "client_ivc_proof"; + std::filesystem::path vk_path = output_dir / "client_ivc_vk"; - return verify_client_ivc(client_ivc_proof_path, mega_vk_path, eccvm_vk_path, translator_vk_path) ? 0 : 1; + return verify_client_ivc(proof_path, vk_path) ? 0 : 1; } if (command == "fold_and_verify_program") { return foldAndVerifyProgram(bytecode_path, witness_path) ? 0 : 1; diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp index 8f889fc8b2c..6412b711c94 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.cpp @@ -156,7 +156,9 @@ void ClientIVC::complete_kernel_circuit_logic(ClientCircuit& circuit) * @param circuit * @param precomputed_vk */ -void ClientIVC::accumulate(ClientCircuit& circuit, const std::shared_ptr& precomputed_vk, bool mock_vk) +void ClientIVC::accumulate(ClientCircuit& circuit, + const std::shared_ptr& precomputed_vk, + bool mock_vk) { if (auto_verify_mode && circuit.databus_propagation_data.is_kernel) { complete_kernel_circuit_logic(circuit); @@ -171,14 +173,14 @@ void ClientIVC::accumulate(ClientCircuit& circuit, const std::shared_ptr(circuit)); // Construct the proving key for circuit - std::shared_ptr proving_key; - if (!initialized) { - proving_key = std::make_shared(circuit, trace_settings); - trace_usage_tracker = ExecutionTraceUsageTracker(trace_settings); - } else { - proving_key = std::make_shared(circuit, trace_settings); - } + std::shared_ptr proving_key = std::make_shared(circuit, trace_settings); + // The commitment key is initialised with the number of points determined by the trace_settings' dyadic size. If a + // circuit overflows past the dyadic size the commitment key will not have enough points so we need to increase it + if (proving_key->proving_key.circuit_size > trace_settings.dyadic_size()) { + bn254_commitment_key = std::make_shared>(proving_key->proving_key.circuit_size); + goblin.commitment_key = bn254_commitment_key; + } proving_key->proving_key.commitment_key = bn254_commitment_key; vinfo("getting honk vk... precomputed?: ", precomputed_vk); @@ -186,13 +188,14 @@ void ClientIVC::accumulate(ClientCircuit& circuit, const std::shared_ptr(proving_key->proving_key); + honk_vk = precomputed_vk ? precomputed_vk : std::make_shared(proving_key->proving_key); if (mock_vk) { honk_vk->set_metadata(proving_key->proving_key); } vinfo("set honk vk metadata"); - // If this is the first circuit in the IVC, use oink to complete the decider proving key and generate an oink proof + // If this is the first circuit in the IVC, use oink to complete the decider proving key and generate an oink + // proof if (!initialized) { OinkProver oink_prover{ proving_key }; vinfo("computing oink proof..."); @@ -210,8 +213,8 @@ void ClientIVC::accumulate(ClientCircuit& circuit, const std::shared_ptr(builder, TraceSettings(), bn254_commitment_key); - honk_vk = std::make_shared(decider_pk->proving_key); + honk_vk = std::make_shared(decider_pk->proving_key); MegaProver prover(decider_pk); HonkProof proof = prover.construct_proof(); @@ -301,18 +304,15 @@ ClientIVC::Proof ClientIVC::prove() return { mega_proof, goblin.prove(merge_proof) }; }; -bool ClientIVC::verify(const Proof& proof, - const std::shared_ptr& mega_vk, - const std::shared_ptr& eccvm_vk, - const std::shared_ptr& translator_vk) +bool ClientIVC::verify(const Proof& proof, const VerificationKey& vk) { // Verify the hiding circuit proof - MegaVerifier verifer{ mega_vk }; + MegaVerifier verifer{ vk.mega }; bool mega_verified = verifer.verify_proof(proof.mega_proof); vinfo("Mega verified: ", mega_verified); // Goblin verification (final merge, eccvm, translator) - GoblinVerifier goblin_verifier{ eccvm_vk, translator_vk }; + GoblinVerifier goblin_verifier{ vk.eccvm, vk.translator }; bool goblin_verified = goblin_verifier.verify(proof.goblin_proof); vinfo("Goblin verified: ", goblin_verified); return goblin_verified && mega_verified; @@ -328,7 +328,7 @@ bool ClientIVC::verify(const Proof& proof) { auto eccvm_vk = std::make_shared(goblin.get_eccvm_proving_key()); auto translator_vk = std::make_shared(goblin.get_translator_proving_key()); - return verify(proof, honk_vk, eccvm_vk, translator_vk); + return verify(proof, { honk_vk, eccvm_vk, translator_vk }); } /** @@ -379,12 +379,12 @@ bool ClientIVC::prove_and_verify() * (albeit innefficient) way of separating out the cost of computing VKs from a benchmark. * * @param circuits A copy of the circuits to be accumulated (passing by reference would alter the original circuits) - * @return std::vector> + * @return std::vector> */ -std::vector> ClientIVC::precompute_folding_verification_keys( +std::vector> ClientIVC::precompute_folding_verification_keys( std::vector circuits) { - std::vector> vkeys; + std::vector> vkeys; for (auto& circuit : circuits) { accumulate(circuit); diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp index c0028a791ff..5fcf187a35d 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.hpp @@ -27,7 +27,7 @@ class ClientIVC { public: using Flavor = MegaFlavor; - using VerificationKey = Flavor::VerificationKey; + using MegaVerificationKey = Flavor::VerificationKey; using FF = Flavor::FF; using FoldProof = std::vector; using MergeProof = std::vector; @@ -73,12 +73,20 @@ class ClientIVC { MSGPACK_FIELDS(mega_proof, goblin_proof); }; + struct VerificationKey { + std::shared_ptr mega; + std::shared_ptr eccvm; + std::shared_ptr translator; + + MSGPACK_FIELDS(mega, eccvm, translator); + }; + enum class QUEUE_TYPE { OINK, PG }; // for specifying type of proof in the verification queue // An entry in the native verification queue struct VerifierInputs { std::vector proof; // oink or PG - std::shared_ptr honk_verification_key; + std::shared_ptr honk_verification_key; QUEUE_TYPE type; }; using VerificationQueue = std::vector; @@ -89,6 +97,7 @@ class ClientIVC { std::shared_ptr honk_verification_key; QUEUE_TYPE type; }; + using StdlibVerificationQueue = std::vector; // Utility for tracking the max size of each block across the full IVC @@ -101,7 +110,7 @@ class ClientIVC { ProverFoldOutput fold_output; // prover accumulator and fold proof std::shared_ptr verifier_accumulator; // verifier accumulator - std::shared_ptr honk_vk; // honk vk to be completed and folded into the accumulator + std::shared_ptr honk_vk; // honk vk to be completed and folded into the accumulator // Set of tuples {proof, verification_key, type} to be recursively verified VerificationQueue verification_queue; @@ -127,7 +136,8 @@ class ClientIVC { bool initialized = false; // Is the IVC accumulator initialized ClientIVC(TraceSettings trace_settings = {}, bool auto_verify_mode = false) - : trace_settings(trace_settings) + : trace_usage_tracker(trace_settings) + , trace_settings(trace_settings) , auto_verify_mode(auto_verify_mode) , bn254_commitment_key(trace_settings.structure.has_value() ? std::make_shared>(trace_settings.dyadic_size()) @@ -158,17 +168,14 @@ class ClientIVC { * @param mock_vk A boolean to say whether the precomputed vk shoudl have its metadata set. */ void accumulate(ClientCircuit& circuit, - const std::shared_ptr& precomputed_vk = nullptr, + const std::shared_ptr& precomputed_vk = nullptr, bool mock_vk = false); Proof prove(); HonkProof construct_and_prove_hiding_circuit(); - static bool verify(const Proof& proof, - const std::shared_ptr& mega_vk, - const std::shared_ptr& eccvm_vk, - const std::shared_ptr& translator_vk); + static bool verify(const Proof& proof, const VerificationKey& vk); bool verify(const Proof& proof); @@ -176,7 +183,7 @@ class ClientIVC { HonkProof decider_prove() const; - std::vector> precompute_folding_verification_keys( + std::vector> precompute_folding_verification_keys( std::vector circuits); }; } // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp index 8cc6d540c6a..ec0d253ce93 100644 --- a/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp +++ b/barretenberg/cpp/src/barretenberg/client_ivc/client_ivc.test.cpp @@ -2,9 +2,9 @@ #include "barretenberg/client_ivc/test_bench_shared.hpp" #include "barretenberg/goblin/goblin.hpp" #include "barretenberg/goblin/mock_circuits.hpp" +#include "barretenberg/protogalaxy/folding_test_utils.hpp" #include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" - #include using namespace bb; @@ -403,5 +403,62 @@ TEST_F(ClientIVCTests, StructuredTraceOverflow) log2_num_gates += 1; } + EXPECT_TRUE(ivc.prove_and_verify()); +}; + +/** + * @brief Test dynamic structured trace overflow block mechanism + * @details Tests the case where the required overflow capacity is not known until runtime. Accumulates two circuits, + * the second of which overflows the trace but not enough to change the dyadic circuit size and thus there is no need + * for a virtual size increase of the first key. + * + */ +TEST_F(ClientIVCTests, DynamicOverflow) +{ + // Define trace settings with zero overflow capacity + ClientIVC ivc{ { SMALL_TEST_STRUCTURE_FOR_OVERFLOWS, /*overflow_capacity=*/0 } }; + + MockCircuitProducer circuit_producer; + + const size_t NUM_CIRCUITS = 2; + + // define parameters for two circuits; the first fits within the structured trace, the second overflows + const std::vector log2_num_arith_gates = { 14, 16 }; + // Accumulate + for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { + auto circuit = circuit_producer.create_next_circuit(ivc, log2_num_arith_gates[idx]); + ivc.accumulate(circuit); + } + + EXPECT_EQ(check_accumulator_target_sum_manual(ivc.fold_output.accumulator), true); + EXPECT_TRUE(ivc.prove_and_verify()); +}; + +/** + * @brief Test dynamic trace overflow where the dyadic circuit size also increases + * @details Accumulates two circuits, the second of which overflows the trace structure and leads to an increased dyadic + * circuit size. This requires the virtual size of the polynomials in the first key to be increased accordingly which + * should be handled automatically in PG/ClientIvc. + * + */ +TEST_F(ClientIVCTests, DynamicOverflowCircuitSizeChange) +{ + uint32_t overflow_capacity = 0; + // uint32_t overflow_capacity = 1 << 1; + ClientIVC ivc{ { SMALL_TEST_STRUCTURE_FOR_OVERFLOWS, overflow_capacity } }; + + MockCircuitProducer circuit_producer; + + const size_t NUM_CIRCUITS = 2; + + // define parameters for two circuits; the first fits within the structured trace, the second overflows + const std::vector log2_num_arith_gates = { 14, 18 }; + // Accumulate + for (size_t idx = 0; idx < NUM_CIRCUITS; ++idx) { + auto circuit = circuit_producer.create_next_circuit(ivc, log2_num_arith_gates[idx]); + ivc.accumulate(circuit); + } + + EXPECT_EQ(check_accumulator_target_sum_manual(ivc.fold_output.accumulator), true); EXPECT_TRUE(ivc.prove_and_verify()); }; \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp index c3e5bae6705..c104f84a7d7 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp @@ -429,7 +429,7 @@ template class IPA { return (C_zero.normalize() == right_hand_side.normalize()); } /** - * @brief Recursively verify the correctness of an IPA proof. Unlike native verification, there is no + * @brief Recursively verify the correctness of an IPA proof, without computing G_zero. Unlike native verification, there is no * parallelisation in this function as our circuit construction does not currently support parallelisation. * * @details batch_mul is used instead of pippenger as pippenger is not implemented to be used in stdlib context for @@ -591,6 +591,144 @@ template class IPA { { return reduce_verify_internal_recursive(opening_claim, transcript); } + + /** + * @brief Fully recursively verify the correctness of an IPA proof, including computing G_zero. Unlike native verification, there is no + * parallelisation in this function as our circuit construction does not currently support parallelisation. + * + * @details batch_mul is used instead of pippenger as pippenger is not implemented to be used in stdlib context for + * now and under the hood we perform bigfield to cycle_scalar conversions for the batch_mul. That is because + * cycle_scalar has very reduced functionality at the moment and doesn't support basic arithmetic operations between + * two cycle_scalar operands (just for one cycle_group and one cycle_scalar to enable batch_mul). + * @param vk + * @param opening_claim + * @param transcript + * @return VerifierAccumulator + * @todo (https://github.com/AztecProtocol/barretenberg/issues/1018): simulator should use the native verify + * function with parallelisation + */ + static bool full_verify_recursive(const std::shared_ptr& vk, + const OpeningClaim& opening_claim, + auto& transcript) + requires Curve::is_stdlib_type + { + // Step 1. + // Receive polynomial_degree + 1 = d from the prover + auto poly_length_var = transcript->template receive_from_prover( + "IPA:poly_degree_plus_1"); // note this is base field because this is a uint32_t, which should map + // to a bb::fr, not a grumpkin::fr, which is a BaseField element for + // Grumpkin + + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1144): need checks here on poly_length. + const auto poly_length = static_cast(poly_length_var.get_value()); + info("poly_length = ", poly_length); + // Step 2. + // Receive generator challenge u and compute auxiliary generator + const Fr generator_challenge = transcript->template get_challenge("IPA:generator_challenge"); + typename Curve::Builder* builder = generator_challenge.get_context(); + + const auto log_poly_length = numeric::get_msb(static_cast(poly_length)); + if (log_poly_length > CONST_ECCVM_LOG_N) { + throw_or_abort("IPA log_poly_length is too large"); + } + auto pippenger_size = 2 * CONST_ECCVM_LOG_N; + std::vector round_challenges(CONST_ECCVM_LOG_N); + std::vector round_challenges_inv(CONST_ECCVM_LOG_N); + std::vector msm_elements(pippenger_size); + std::vector msm_scalars(pippenger_size); + + + // Step 3. + // Receive all L_i and R_i and prepare for MSM + for (size_t i = 0; i < CONST_ECCVM_LOG_N; i++) { + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1114): insecure dummy_round derivation! + stdlib::bool_t dummy_round = stdlib::witness_t(builder, i >= log_poly_length); + + std::string index = std::to_string(CONST_ECCVM_LOG_N - i - 1); + auto element_L = transcript->template receive_from_prover("IPA:L_" + index); + auto element_R = transcript->template receive_from_prover("IPA:R_" + index); + round_challenges[i] = transcript->template get_challenge("IPA:round_challenge_" + index); + round_challenges_inv[i] = round_challenges[i].invert(); + + msm_elements[2 * i] = element_L; + msm_elements[2 * i + 1] = element_R; + msm_scalars[2 * i] = Fr::conditional_assign(dummy_round, Fr(0), round_challenges_inv[i]); + msm_scalars[2 * i + 1] = Fr::conditional_assign(dummy_round, Fr(0), round_challenges[i]); + } + + // Step 4. + // Compute b_zero where b_zero can be computed using the polynomial: + // g(X) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}.X^{2^{i-1}}). + // b_zero = g(evaluation) = ∏_{i ∈ [k]} (1 + u_{i-1}^{-1}. (evaluation)^{2^{i-1}}) + + Fr b_zero = Fr(1); + Fr challenge = opening_claim.opening_pair.challenge; + for (size_t i = 0; i < CONST_ECCVM_LOG_N; i++) { + stdlib::bool_t dummy_round = stdlib::witness_t(builder, i < CONST_ECCVM_LOG_N - log_poly_length); + + Fr monomial = Fr::conditional_assign(dummy_round, Fr(0), round_challenges_inv[CONST_ECCVM_LOG_N - 1 - i] * challenge); + b_zero *= Fr(1) + monomial; + if (i != CONST_ECCVM_LOG_N - 1) // this if statement is fine because the number of iterations is constant + { + challenge = Fr::conditional_assign(dummy_round, challenge, challenge * challenge); + } + } + + // Step 5. + // Construct vector s + // We implement a linear-time algorithm to optimally compute this vector + // Note: currently requires an extra vector of size `poly_length / 2` to cache temporaries + // this might able to be optimized if we care enough, but the size of this poly shouldn't be large relative to the builder polynomial sizes + std::vector s_vec_temporaries(poly_length / 2); + std::vector s_vec(poly_length); + + Fr* previous_round_s = &s_vec_temporaries[0]; + Fr* current_round_s = &s_vec[0]; + // if number of rounds is even we need to swap these so that s_vec always contains the result + if ((log_poly_length & 1) == 0) + { + std::swap(previous_round_s, current_round_s); + } + previous_round_s[0] = Fr(1); + for (size_t i = 0; i < log_poly_length; ++i) + { + const size_t round_size = 1 << (i + 1); + const Fr round_challenge = round_challenges_inv[i]; + for (size_t j = 0; j < round_size / 2; ++j) + { + current_round_s[j * 2] = previous_round_s[j]; + current_round_s[j * 2 + 1] = previous_round_s[j] * round_challenge; + } + std::swap(current_round_s, previous_round_s); + } + // Receive G₀ from the prover + Commitment transcript_G_zero = transcript->template receive_from_prover("IPA:G_0"); + // Compute G₀ + // Unlike the native verification function, the verifier commitment key only containts the SRS so we can apply + // batch_mul directly on it. + const std::vector srs_elements = vk->get_monomial_points(); + Commitment G_zero = Commitment::batch_mul(srs_elements, s_vec); + ASSERT(G_zero.get_value() == transcript_G_zero.get_value() && "G_zero doesn't match received G_zero failed."); + + // Step 6. + // Receive a₀ from the prover + const auto a_zero = transcript->template receive_from_prover("IPA:a_0"); + + // Step 7. + // Compute R = C' + ∑_{j ∈ [k]} u_j^{-1}L_j + ∑_{j ∈ [k]} u_jR_j - G₀ * a₀ - (f(\beta) + a₀ * b₀) ⋅ U + // This is a combination of several IPA relations into a large batch mul + // which should be equal to -C + msm_elements.emplace_back(-G_zero); + msm_elements.emplace_back(-Commitment::one(builder)); + msm_scalars.emplace_back(a_zero); + msm_scalars.emplace_back(generator_challenge * a_zero.madd(b_zero, {-opening_claim.opening_pair.evaluation})); + GroupElement ipa_relation = GroupElement::batch_mul(msm_elements, msm_scalars); + ipa_relation.assert_equal(-opening_claim.commitment); + + ASSERT(ipa_relation.get_value() == -opening_claim.commitment.get_value() && "IPA relation failed."); + return (ipa_relation.get_value() == -opening_claim.commitment.get_value()); + } + /** * @brief A method that produces an IPA opening claim from Shplemini accumulator containing vectors of commitments * and scalars and a Shplonk evaluation challenge. @@ -742,8 +880,11 @@ template class IPA { * @return Polynomial */ static Polynomial create_challenge_poly(const size_t log_poly_length_1, const std::vector& u_challenges_inv_1, const size_t log_poly_length_2, const std::vector& u_challenges_inv_2, bb::fq alpha) { - Polynomial challenge_poly = construct_poly_from_u_challenges_inv(log_poly_length_1, u_challenges_inv_1); + // Always extend each to 1< challenge_poly(1<>(&builder, POLY_LENGTH, this->vk()); + auto result = RecursiveIPA::full_verify_recursive(stdlib_pcs_vkey, stdlib_claim, stdlib_transcript); + EXPECT_TRUE(result); + builder.finalize_circuit(/*ensure_nonzero=*/true); + info("Full IPA Recursive Verifier num finalized gates for length ", + POLY_LENGTH, + " = ", + builder.get_num_finalized_gates()); + EXPECT_TRUE(CircuitChecker::check(builder)); +} + +TEST_F(IPARecursiveTests, AccumulationAndFullRecursiveVerifier) +{ + const size_t POLY_LENGTH = 1024; + + // We create a circuit that does two IPA verifications. However, we don't do the full verifications and instead + // accumulate the claims into one claim. This accumulation is done in circuit. Create two accumulators, which + // contain the commitment and an opening claim. + Builder builder; + + auto [transcript_1, claim_1] = create_ipa_claim(builder, POLY_LENGTH); + auto [transcript_2, claim_2] = create_ipa_claim(builder, POLY_LENGTH); + + // Creates two IPA accumulators and accumulators from the two claims. Also constructs the accumulated h + // polynomial. + auto [output_claim, ipa_proof] = RecursiveIPA::accumulate(this->ck(), transcript_1, claim_1, transcript_2, claim_2); + builder.finalize_circuit(/*ensure_nonzero=*/false); + info("Circuit with 2 IPA Recursive Verifiers and IPA Accumulation num finalized gates = ", + builder.get_num_finalized_gates()); + + EXPECT_TRUE(CircuitChecker::check(builder)); + + Builder root_rollup; + // Fully recursively verify this proof to check it. + auto stdlib_pcs_vkey = + std::make_shared>(&root_rollup, 1 << CONST_ECCVM_LOG_N, this->vk()); + auto stdlib_verifier_transcript = + std::make_shared(convert_native_proof_to_stdlib(&root_rollup, ipa_proof)); + OpeningClaim ipa_claim; + ipa_claim.opening_pair.challenge = + Curve::ScalarField::create_from_u512_as_witness(&root_rollup, output_claim.opening_pair.challenge.get_value()); + ipa_claim.opening_pair.evaluation = + Curve::ScalarField::create_from_u512_as_witness(&root_rollup, output_claim.opening_pair.evaluation.get_value()); + ipa_claim.commitment = Curve::AffineElement::from_witness(&root_rollup, output_claim.commitment.get_value()); + auto result = RecursiveIPA::full_verify_recursive(stdlib_pcs_vkey, ipa_claim, stdlib_verifier_transcript); + root_rollup.finalize_circuit(/*ensure_nonzero=*/true); + EXPECT_TRUE(result); + info("Full IPA Recursive Verifier num finalized gates for length ", + 1 << CONST_ECCVM_LOG_N, + " = ", + root_rollup.get_num_finalized_gates()); +} + +/** + * @brief Test accumulation of IPA claims with different polynomial lengths + * + */ +TEST_F(IPARecursiveTests, AccumulationWithDifferentSizes) +{ + // We create a circuit that does two IPA verifications of different sizes. However, we don't do the full + // verifications and instead accumulate the claims into one claim. This accumulation is done in circuit. Create two + // accumulators, which contain the commitment and an opening claim. + const size_t POLY_LENGTH_1 = 16; + const size_t POLY_LENGTH_2 = 32; + Builder builder; + + auto [transcript_1, claim_1] = create_ipa_claim(builder, POLY_LENGTH_1); + auto [transcript_2, claim_2] = create_ipa_claim(builder, POLY_LENGTH_2); + + // Creates two IPA accumulators and accumulators from the two claims. Also constructs the accumulated h + // polynomial. + auto [output_claim, ipa_proof] = RecursiveIPA::accumulate(this->ck(), transcript_1, claim_1, transcript_2, claim_2); + builder.finalize_circuit(/*ensure_nonzero=*/false); + info("Circuit with 2 IPA Recursive Verifiers and IPA Accumulation num finalized gates = ", + builder.get_num_finalized_gates()); + + EXPECT_TRUE(CircuitChecker::check(builder)); + + const OpeningPair opening_pair{ bb::fq(output_claim.opening_pair.challenge.get_value()), + bb::fq(output_claim.opening_pair.evaluation.get_value()) }; + Commitment native_comm = output_claim.commitment.get_value(); + const OpeningClaim opening_claim{ opening_pair, native_comm }; + + // Natively verify this proof to check it. + auto verifier_transcript = std::make_shared(ipa_proof); + + auto result = NativeIPA::reduce_verify(this->vk(), opening_claim, verifier_transcript); + EXPECT_TRUE(result); } \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/constants.hpp b/barretenberg/cpp/src/barretenberg/constants.hpp index 696eecc0aa5..7ff2b6b002c 100644 --- a/barretenberg/cpp/src/barretenberg/constants.hpp +++ b/barretenberg/cpp/src/barretenberg/constants.hpp @@ -11,7 +11,7 @@ static constexpr uint32_t CONST_PROOF_SIZE_LOG_N = 28; // circuits being folded. static constexpr uint32_t CONST_PG_LOG_N = 20; -static constexpr uint32_t CONST_ECCVM_LOG_N = 16; +static constexpr uint32_t CONST_ECCVM_LOG_N = 15; static constexpr uint32_t MAX_LOOKUP_TABLES_SIZE = 70000; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.cpp index 7543b933f9e..4bd58eff722 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.cpp @@ -55,7 +55,6 @@ ClientIVC::VerifierInputs create_dummy_vkey_and_proof_oink(const TraceSettings& const size_t num_public_inputs = 0) { using Flavor = MegaFlavor; - using VerificationKey = ClientIVC::VerificationKey; using FF = bb::fr; MegaExecutionTraceBlocks blocks; @@ -89,7 +88,7 @@ ClientIVC::VerifierInputs create_dummy_vkey_and_proof_oink(const TraceSettings& } // Set relevant VK metadata and commitments - verifier_inputs.honk_verification_key = std::make_shared(); + verifier_inputs.honk_verification_key = std::make_shared(); verifier_inputs.honk_verification_key->circuit_size = structured_dyadic_size; verifier_inputs.honk_verification_key->num_public_inputs = total_num_public_inputs; verifier_inputs.honk_verification_key->pub_inputs_offset = blocks.pub_inputs.trace_offset; // must be set correctly @@ -146,7 +145,7 @@ ClientIVC::MergeProof create_dummy_merge_proof() * @param key_witness_indices */ void populate_dummy_vk_in_constraint(MegaCircuitBuilder& builder, - const std::shared_ptr& mock_verification_key, + const std::shared_ptr& mock_verification_key, std::vector& key_witness_indices) { using Flavor = MegaFlavor; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp index 5ab74ca80e6..7709bee73c0 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.hpp @@ -21,7 +21,7 @@ ClientIVC::VerifierInputs create_dummy_vkey_and_proof_oink(const TraceSettings& ClientIVC::MergeProof create_dummy_merge_proof(); void populate_dummy_vk_in_constraint(MegaCircuitBuilder& builder, - const std::shared_ptr& mock_verification_key, + const std::shared_ptr& mock_verification_key, std::vector& key_witness_indices); } // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.test.cpp index cdbd234948d..4ea3df6a72e 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ivc_recursion_constraint.test.cpp @@ -186,7 +186,7 @@ TEST_F(IvcRecursionConstraintTest, GenerateVK) const TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) - std::shared_ptr expected_kernel_vk; + std::shared_ptr expected_kernel_vk; size_t num_app_public_inputs = 0; { ClientIVC ivc{ trace_settings }; @@ -204,7 +204,7 @@ TEST_F(IvcRecursionConstraintTest, GenerateVK) } // Now, construct the kernel VK by mocking the post app accumulation state of the IVC - std::shared_ptr kernel_vk; + std::shared_ptr kernel_vk; { ClientIVC ivc{ trace_settings }; @@ -218,7 +218,7 @@ TEST_F(IvcRecursionConstraintTest, GenerateVK) auto proving_key = std::make_shared>(kernel, trace_settings); MegaProver prover(proving_key); - kernel_vk = std::make_shared(prover.proving_key->proving_key); + kernel_vk = std::make_shared(prover.proving_key->proving_key); } // PCS verification keys will not match so set to null before comparing @@ -234,7 +234,7 @@ TEST_F(IvcRecursionConstraintTest, GenerateVKFromConstraints) const TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; // First, construct the kernel VK by running the full IVC (accumulate one app and one kernel) - std::shared_ptr expected_kernel_vk; + std::shared_ptr expected_kernel_vk; size_t num_app_public_inputs = 0; { ClientIVC ivc{ trace_settings }; @@ -253,7 +253,7 @@ TEST_F(IvcRecursionConstraintTest, GenerateVKFromConstraints) } // Now, construct the kernel VK by mocking the post app accumulation state of the IVC - std::shared_ptr kernel_vk; + std::shared_ptr kernel_vk; { ClientIVC ivc{ trace_settings }; @@ -273,7 +273,7 @@ TEST_F(IvcRecursionConstraintTest, GenerateVKFromConstraints) // Manually construct the VK for the kernel circuit auto proving_key = std::make_shared>(kernel, ivc.trace_settings); MegaProver prover(proving_key); - kernel_vk = std::make_shared(prover.proving_key->proving_key); + kernel_vk = std::make_shared(prover.proving_key->proving_key); } // PCS verification keys will not match so set to null before comparing diff --git a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp index e2d1c598799..03c86a55b08 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp @@ -18,6 +18,22 @@ namespace bb { +/** + * @brief An arbitrary but small-ish structuring that can be used for testing with non-trivial circuits in cases when + * they overflow + */ +static constexpr TraceStructure SMALL_TEST_STRUCTURE_FOR_OVERFLOWS{ .ecc_op = 1 << 14, + .pub_inputs = 1 << 14, + .busread = 1 << 14, + .arithmetic = 1 << 15, + .delta_range = 1 << 14, + .elliptic = 1 << 14, + .aux = 1 << 14, + .poseidon2_external = 1 << 14, + .poseidon2_internal = 1 << 15, + .lookup = 1 << 14, + .overflow = 0 }; + class GoblinMockCircuits { public: using Curve = curve::BN254; diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/execution_trace_usage_tracker.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/execution_trace_usage_tracker.hpp index 7b9b1fadb6b..683a20a3d63 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/execution_trace_usage_tracker.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/execution_trace_usage_tracker.hpp @@ -31,6 +31,21 @@ struct ExecutionTraceUsageTracker { size_t max_databus_size = 0; size_t max_tables_size = 0; + // For printing only. Must match the order of the members in the arithmetization + static constexpr std::array block_labels{ "ecc_op", + "pub_inputs", + "busread", + "arithmetic", + "delta_range", + "elliptic", + "aux", + "poseidon2_external", + "poseidon2_internal", + "lookup", + "overflow", + "databus_table_data", + "lookup_table_data" }; + TraceSettings trace_settings; ExecutionTraceUsageTracker(const TraceSettings& trace_settings = TraceSettings{}) @@ -72,8 +87,12 @@ struct ExecutionTraceUsageTracker { } // The active ranges must also include the rows where the actual databus and lookup table data are stored. - // (Note: lookup tables are constructed at the end of the trace; databus data is constructed at the start). - size_t dyadic_circuit_size = fixed_sizes.get_structured_dyadic_size(); + // (Note: lookup tables are constructed at the end of the trace; databus data is constructed at the start) so we + // need to determine the dyadic size for this. We call the size function on the current circuit which will have + // the same fixed block sizes but might also have an overflow block potentially influencing the dyadic circuit + // size. + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1160) + const size_t dyadic_circuit_size = circuit.blocks.get_structured_dyadic_size(); // TODO(https://github.com/AztecProtocol/barretenberg/issues/1152): should be able to use simply Range{ 0, // max_databus_size } but this breaks for certain choices of num_threads. @@ -98,24 +117,13 @@ struct ExecutionTraceUsageTracker { }); } - // For printing only. Must match the order of the members in the arithmetization - std::vector block_labels{ "ecc_op", - "pub_inputs", - "busread", - "arithmetic", - "delta_range", - "elliptic", - "aux", - "poseidon2_external", - "poseidon2_internal", - "lookup", - "overflow" }; - void print() { info("Minimum required block sizes for structured trace: "); - for (auto [label, max_size] : zip_view(block_labels, max_sizes.get())) { - std::cout << std::left << std::setw(20) << (label + ":") << max_size << std::endl; + size_t idx = 0; + for (auto max_size : max_sizes.get()) { + std::cout << std::left << std::setw(20) << block_labels[idx] << ": " << max_size << std::endl; + idx++; } info(""); } @@ -124,8 +132,18 @@ struct ExecutionTraceUsageTracker { { info("Active regions of accumulator: "); for (auto [label, range] : zip_view(block_labels, active_ranges)) { - std::cout << std::left << std::setw(20) << (label + ":") << "(" << range.first << ", " << range.second - << ")" << std::endl; + std::cout << std::left << std::setw(20) << label << ": (" << range.first << ", " << range.second << ")" + << std::endl; + } + info(""); + } + + void print_previous_active_ranges() + { + info("Active regions of previous accumulator: "); + for (auto [label, range] : zip_view(block_labels, previous_active_ranges)) { + std::cout << std::left << std::setw(20) << label << ": (" << range.first << ", " << range.second << ")" + << std::endl; } info(""); } diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp index 44d6dce4945..a24d4131a93 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/execution_trace/mega_execution_trace.hpp @@ -30,10 +30,17 @@ template struct MegaTraceBlockData { std::vector get_labels() const { - return { "ecc_op", "pub_inputs", "busread", - "arithmetic", "delta_range", "elliptic", - "aux", "poseidon2_external", "poseidon2_internal", - "lookup" }; + return { "ecc_op", + "pub_inputs", + "busread", + "arithmetic", + "delta_range", + "elliptic", + "aux", + "poseidon2_external", + "poseidon2_internal", + "lookup", + "overflow" }; } auto get() @@ -220,10 +227,10 @@ class MegaExecutionTraceBlocks : public MegaTraceBlockData { info(""); } - size_t get_structured_dyadic_size() + size_t get_structured_dyadic_size() const { size_t total_size = 1; // start at 1 because the 0th row is unused for selectors for Honk - for (auto block : this->get()) { + for (const auto& block : this->get()) { total_size += block.get_fixed_size(); } diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_delta.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_delta.hpp index b967dc0e93a..49aeb0edd77 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_delta.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_delta.hpp @@ -48,6 +48,9 @@ typename Flavor::FF compute_public_input_delta(std::span class RelationChecker { + public: + /** + * @brief Check that the provided polynomials satisfy all relations for a given Flavor + */ + static void check_all([[maybe_unused]] const auto& polynomials, [[maybe_unused]] const auto& params) + { + // default; do nothing + } + + /** + * @brief Check that a single specified relation is satisfied for a set of polynomials + * + * @tparam Relation a linearly independent Relation to be checked + * @param polynomials prover polynomials + * @param params a RelationParameters instance + */ + template + static void check(const auto& polynomials, + const auto& params, + bool is_linearly_independent, + std::string label = "Relation") + { + // Define the appropriate accumulator type for the relation and initialize to zero + typename Relation::SumcheckArrayOfValuesOverSubrelations result; + for (auto& element : result) { + element = 0; + } + + for (size_t i = 0; i < polynomials.w_l.virtual_size(); i++) { + if (is_linearly_independent) { + // Evaluate each constraint in the relation and check that each is satisfied + Relation::accumulate(result, polynomials.get_row(i), params, 1); + size_t subrelation_idx = 0; + for (auto& element : result) { + if (element != 0) { + info("RelationChecker: ", + label, + " relation (subrelation idx: ", + subrelation_idx, + ") failed at row idx: ", + i, + "."); + ASSERT(false); + } + subrelation_idx++; + } + } + } + + if (!is_linearly_independent) { + // Result accumulated across entire execution trace should be zero + for (auto& element : result) { + if (element != 0) { + info("RelationChecker: ", label, " relation (linearly indep.) failed."); + ASSERT(false); + } + } + } + } +}; + +// Specialization for Ultra +template <> class RelationChecker : public RelationChecker { + using Base = RelationChecker; + + public: + static void check_all(const auto& polynomials, const auto& params) + { + using FF = UltraFlavor::FF; + + // Linearly independent relations (must be satisfied at each row) + Base::check>(polynomials, params, true, "UltraArithmetic"); + Base::check>(polynomials, params, true, "UltraPermutation"); + Base::check>(polynomials, params, true, "DeltaRangeConstraint"); + Base::check>(polynomials, params, true, "Elliptic"); + Base::check>(polynomials, params, true, "Auxiliary"); + Base::check>(polynomials, params, true, "Poseidon2External"); + Base::check>(polynomials, params, true, "Poseidon2Internal"); + + // Linearly dependent relations (must be satisfied as a sum across all rows) + Base::check>(polynomials, params, false, "LogDerivLookup"); + } +}; + +// Specialization for Mega +template <> class RelationChecker : public RelationChecker { + using Base = RelationChecker; + + public: + static void check_all(const auto& polynomials, const auto& params) + { + // Check relations that are shared with Ultra + RelationChecker::check_all(polynomials, params); + + using FF = MegaFlavor::FF; + + // Linearly independent relations (must be satisfied at each row) + Base::check>(polynomials, params, true, "EccOpQueue"); + + // Linearly dependent relations (must be satisfied as a sum across all rows) + Base::check>(polynomials, params, false, "DatabusLookup"); + } +}; + +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp index 3747b1bf2c7..79ee95fe7ef 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp @@ -249,6 +249,7 @@ template class Polynomial { std::size_t size() const { return coefficients_.size(); } std::size_t virtual_size() const { return coefficients_.virtual_size(); } + void increase_virtual_size(const size_t size_in) { coefficients_.increase_virtual_size(size_in); }; Fr* data() { return coefficients_.data(); } const Fr* data() const { return coefficients_.data(); } diff --git a/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp b/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp index 7dd50a99c96..191080edbe8 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/shared_shifted_virtual_zeroes_array.hpp @@ -1,6 +1,7 @@ #pragma once #include "barretenberg/common/assert.hpp" +#include "barretenberg/common/log.hpp" #include #include @@ -47,6 +48,14 @@ template struct SharedShiftedVirtualZeroesArray { const T& get(size_t index, size_t virtual_padding = 0) const { static const T zero{}; + if (index >= virtual_size_ + virtual_padding) { + info("BAD GET(): index = ", + index, + ", virtual_size_ = ", + virtual_size_, + ", virtual_padding = ", + virtual_padding); + } ASSERT(index < virtual_size_ + virtual_padding); if (index >= start_ && index < end_) { return data()[index - start_]; @@ -68,6 +77,12 @@ template struct SharedShiftedVirtualZeroesArray { // Getter for consistency with size(); size_t virtual_size() const { return virtual_size_; } + void increase_virtual_size(const size_t new_virtual_size) + { + ASSERT(new_virtual_size >= virtual_size_); // shrinking is not allowed + virtual_size_ = new_virtual_size; + } + T& operator[](size_t index) { ASSERT(index >= start_ && index < end_); diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/folding_test_utils.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/folding_test_utils.hpp new file mode 100644 index 00000000000..cd478da264f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/folding_test_utils.hpp @@ -0,0 +1,37 @@ +#include "barretenberg/polynomials/gate_separator.hpp" +#include "barretenberg/protogalaxy/protogalaxy_prover.hpp" +#include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" +#include "barretenberg/protogalaxy/protogalaxy_verifier.hpp" +#include "barretenberg/ultra_honk/decider_prover.hpp" +#include "barretenberg/ultra_honk/decider_verifier.hpp" + +namespace bb { + +/** + * @brief Utility to manually compute the target sum of an accumulator and compare it to the one produced in Protogalxy + * to attest correctness. + * + * @details As we create a ProtogalaxyProverInternal object with an empty execution trace tracker and no active_ranges + * set, compute_row_evaluations will operate on all rows. + */ +template +static bool check_accumulator_target_sum_manual(const std::shared_ptr>& accumulator) +{ + using DeciderProvingKeys = DeciderProvingKeys_; + using PGInternal = ProtogalaxyProverInternal; + + const size_t accumulator_size = accumulator->proving_key.circuit_size; + PGInternal pg_internal; + const auto expected_honk_evals = pg_internal.compute_row_evaluations( + accumulator->proving_key.polynomials, accumulator->alphas, accumulator->relation_parameters); + // Construct pow(\vec{betas*}) as in the paper + GateSeparatorPolynomial expected_gate_separators(accumulator->gate_challenges, accumulator->gate_challenges.size()); + + // Compute the corresponding target sum and create a dummy accumulator + typename Flavor::FF expected_target_sum{ 0 }; + for (size_t idx = 0; idx < accumulator_size; idx++) { + expected_target_sum += expected_honk_evals[idx] * expected_gate_separators[idx]; + } + return accumulator->target_sum == expected_target_sum; +} +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp index 29f80b807f0..bcbd70090d6 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp @@ -1,5 +1,6 @@ #include "barretenberg/goblin/mock_circuits.hpp" #include "barretenberg/polynomials/gate_separator.hpp" +#include "barretenberg/protogalaxy/folding_test_utils.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" #include "barretenberg/protogalaxy/protogalaxy_verifier.hpp" @@ -80,9 +81,10 @@ template class ProtogalaxyTests : public testing::Test { static std::tuple, std::shared_ptr> fold_and_verify( const std::vector>& proving_keys, - const std::vector>& verification_keys) + const std::vector>& verification_keys, + ExecutionTraceUsageTracker trace_usage_tracker = ExecutionTraceUsageTracker{}) { - FoldingProver folding_prover(proving_keys); + FoldingProver folding_prover(proving_keys, trace_usage_tracker); FoldingVerifier folding_verifier(verification_keys); auto [prover_accumulator, folding_proof] = folding_prover.prove(); @@ -90,27 +92,8 @@ template class ProtogalaxyTests : public testing::Test { return { prover_accumulator, verifier_accumulator }; } - static void check_accumulator_target_sum_manual(std::shared_ptr& accumulator, - bool expected_result) - { - size_t accumulator_size = accumulator->proving_key.circuit_size; - PGInternal pg_internal; - auto expected_honk_evals = pg_internal.compute_row_evaluations( - accumulator->proving_key.polynomials, accumulator->alphas, accumulator->relation_parameters); - // Construct pow(\vec{betas*}) as in the paper - GateSeparatorPolynomial expected_gate_separators(accumulator->gate_challenges, - accumulator->gate_challenges.size()); - - // Compute the corresponding target sum and create a dummy accumulator - FF expected_target_sum{ 0 }; - for (size_t idx = 0; idx < accumulator_size; idx++) { - expected_target_sum += expected_honk_evals[idx] * expected_gate_separators[idx]; - } - EXPECT_EQ(accumulator->target_sum == expected_target_sum, expected_result); - } - - static void decide_and_verify(std::shared_ptr& prover_accumulator, - std::shared_ptr& verifier_accumulator, + static void decide_and_verify(const std::shared_ptr& prover_accumulator, + const std::shared_ptr& verifier_accumulator, bool expected_result) { DeciderProver decider_prover(prover_accumulator); @@ -331,7 +314,7 @@ template class ProtogalaxyTests : public testing::Test { // Perform prover and verifier folding auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(keys), get<1>(keys)); - check_accumulator_target_sum_manual(prover_accumulator, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); // Run decider decide_and_verify(prover_accumulator, verifier_accumulator, true); @@ -414,9 +397,8 @@ template class ProtogalaxyTests : public testing::Test { auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(keys), get<1>(keys)); // Expect failure in manual target sum check and decider - bool expected_result = false; - check_accumulator_target_sum_manual(prover_accumulator, expected_result); - decide_and_verify(prover_accumulator, verifier_accumulator, expected_result); + EXPECT_FALSE(check_accumulator_target_sum_manual(prover_accumulator)); + decide_and_verify(prover_accumulator, verifier_accumulator, false); } /** @@ -427,12 +409,12 @@ template class ProtogalaxyTests : public testing::Test { { TupleOfKeys insts = construct_keys(2); auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(insts), get<1>(insts)); - check_accumulator_target_sum_manual(prover_accumulator, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); TupleOfKeys insts_2 = construct_keys(1); // just one key pair auto [prover_accumulator_2, verifier_accumulator_2] = fold_and_verify({ prover_accumulator, get<0>(insts_2)[0] }, { verifier_accumulator, get<1>(insts_2)[0] }); - check_accumulator_target_sum_manual(prover_accumulator_2, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); decide_and_verify(prover_accumulator_2, verifier_accumulator_2, true); } @@ -443,21 +425,62 @@ template class ProtogalaxyTests : public testing::Test { */ static void test_full_protogalaxy_structured_trace() { - TraceSettings trace_settings{ SMALL_TEST_STRUCTURE }; + TraceSettings trace_settings{ SMALL_TEST_STRUCTURE_FOR_OVERFLOWS }; TupleOfKeys keys_1 = construct_keys(2, trace_settings); auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(keys_1), get<1>(keys_1)); - check_accumulator_target_sum_manual(prover_accumulator, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); TupleOfKeys keys_2 = construct_keys(1, trace_settings); // just one key pair auto [prover_accumulator_2, verifier_accumulator_2] = fold_and_verify({ prover_accumulator, get<0>(keys_2)[0] }, { verifier_accumulator, get<1>(keys_2)[0] }); - check_accumulator_target_sum_manual(prover_accumulator_2, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); info(prover_accumulator_2->proving_key.circuit_size); decide_and_verify(prover_accumulator_2, verifier_accumulator_2, true); } + /** + * @brief Testing folding a larger circuit into a smaller one by increasing the virtual size of the first. + * @details Fold two circuits using a structured trace, where the second overflows the trace such that the dyadic + * size is doubled. The virtual size of the polynomials in the first key is increased internally in the PG prover to + * match the size of the second. + * + */ + static void test_fold_with_virtual_size_expansion() + { + uint32_t overflow_capacity = 0; // consider the case where the overflow is not known until runtime + TraceSettings trace_settings{ SMALL_TEST_STRUCTURE_FOR_OVERFLOWS, overflow_capacity }; + ExecutionTraceUsageTracker trace_usage_tracker = ExecutionTraceUsageTracker(trace_settings); + + std::vector> decider_pks; + std::vector> decider_vks; + + // define parameters for two circuits; the first fits within the structured trace, the second overflows + const std::vector log2_num_gates = { 14, 18 }; + for (size_t i = 0; i < 2; ++i) { + MegaCircuitBuilder builder; + + MockCircuits::add_arithmetic_gates(builder, 1 << log2_num_gates[i]); + + auto decider_proving_key = std::make_shared(builder, trace_settings); + trace_usage_tracker.update(builder); + auto verification_key = std::make_shared(decider_proving_key->proving_key); + auto decider_verification_key = std::make_shared(verification_key); + decider_pks.push_back(decider_proving_key); + decider_vks.push_back(decider_verification_key); + } + + // Ensure the dyadic size of the first key is strictly less than that of the second + EXPECT_TRUE(decider_pks[0]->proving_key.circuit_size < decider_pks[1]->proving_key.circuit_size); + + // The size discrepency should be automatically handled by the PG prover via a virtual size increase + const auto [prover_accumulator, verifier_accumulator] = + fold_and_verify(decider_pks, decider_vks, trace_usage_tracker); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); + decide_and_verify(prover_accumulator, verifier_accumulator, true); + } + /** * @brief Testing two valid rounds of folding followed by the decider for a structured trace. * @details Here we're interested in folding inhomogeneous circuits, i.e. circuits with different numbers of @@ -488,7 +511,7 @@ template class ProtogalaxyTests : public testing::Test { // Fold the first two pairs auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(keys_1), get<1>(keys_1)); - check_accumulator_target_sum_manual(prover_accumulator, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); // Construct the decider key pair for the third circuit TupleOfKeys keys_2; @@ -497,7 +520,7 @@ template class ProtogalaxyTests : public testing::Test { // Fold 3rd pair of keys into their respective accumulators auto [prover_accumulator_2, verifier_accumulator_2] = fold_and_verify({ prover_accumulator, get<0>(keys_2)[0] }, { verifier_accumulator, get<1>(keys_2)[0] }); - check_accumulator_target_sum_manual(prover_accumulator_2, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); info(prover_accumulator_2->proving_key.circuit_size); // Decide on final accumulator @@ -512,7 +535,7 @@ template class ProtogalaxyTests : public testing::Test { { TupleOfKeys insts = construct_keys(2); auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(insts), get<1>(insts)); - check_accumulator_target_sum_manual(prover_accumulator, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); // Tamper with a commitment verifier_accumulator->witness_commitments.w_l = Projective(Affine::random_element()); @@ -520,7 +543,7 @@ template class ProtogalaxyTests : public testing::Test { TupleOfKeys insts_2 = construct_keys(1); // just one decider key pair auto [prover_accumulator_2, verifier_accumulator_2] = fold_and_verify({ prover_accumulator, get<0>(insts_2)[0] }, { verifier_accumulator, get<1>(insts_2)[0] }); - check_accumulator_target_sum_manual(prover_accumulator_2, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator_2)); decide_and_verify(prover_accumulator_2, verifier_accumulator_2, false); } @@ -534,11 +557,11 @@ template class ProtogalaxyTests : public testing::Test { { TupleOfKeys insts = construct_keys(2); auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(insts), get<1>(insts)); - check_accumulator_target_sum_manual(prover_accumulator, true); + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); // Tamper with an accumulator polynomial prover_accumulator->proving_key.polynomials.w_l.at(1) = FF::random_element(); - check_accumulator_target_sum_manual(prover_accumulator, false); + EXPECT_FALSE(check_accumulator_target_sum_manual(prover_accumulator)); TupleOfKeys insts_2 = construct_keys(1); // just one decider key pair auto [prover_accumulator_2, verifier_accumulator_2] = @@ -558,8 +581,7 @@ template class ProtogalaxyTests : public testing::Test { auto [prover_accumulator, folding_proof] = folding_prover.prove(); auto verifier_accumulator = folding_verifier.verify_folding_proof(folding_proof); - check_accumulator_target_sum_manual(prover_accumulator, true); - + EXPECT_TRUE(check_accumulator_target_sum_manual(prover_accumulator)); decide_and_verify(prover_accumulator, verifier_accumulator, true); } }; @@ -612,6 +634,12 @@ TYPED_TEST(ProtogalaxyTests, FullProtogalaxyStructuredTrace) { TestFixture::test_full_protogalaxy_structured_trace(); } + +TYPED_TEST(ProtogalaxyTests, VirtualSizeExpansion) +{ + TestFixture::test_fold_with_virtual_size_expansion(); +} + TYPED_TEST(ProtogalaxyTests, FullProtogalaxyStructuredTraceInhomogeneous) { TestFixture::test_full_protogalaxy_structured_trace_inhomogeneous_circuits(); diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp index bd7c0ee1701..6fbd2e47dc0 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_impl.hpp @@ -1,5 +1,6 @@ #pragma once #include "barretenberg/common/op_count.hpp" +#include "barretenberg/plonk_honk_shared/relation_checker.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover_internal.hpp" #include "barretenberg/protogalaxy/prover_verifier_shared.hpp" #include "barretenberg/relations/relation_parameters.hpp" @@ -128,6 +129,21 @@ FoldingResult ProtogalaxyProver_target_sum = perturbator_evaluation * lagranges[0] + vanishing_polynomial_at_challenge * combiner_quotient.evaluate(combiner_challenge); + // Check whether the incoming key has a larger trace overflow than the accumulator. If so, the memory structure of + // the accumulator polynomials will not be sufficient to contain the contribution from the incoming polynomials. The + // solution is to simply reverse the order or the terms in the linear combination by swapping the polynomials and + // the lagrange coefficients between the accumulator and the incoming key. + if (keys[1]->overflow_size > result.accumulator->overflow_size) { + ASSERT(DeciderProvingKeys::NUM == 2); // this mechanism is not supported for the folding of multiple keys + // DEBUG: At this point the virtual sizes of the polynomials should already agree + ASSERT(result.accumulator->proving_key.polynomials.w_l.virtual_size() == + keys[1]->proving_key.polynomials.w_l.virtual_size()); + std::swap(result.accumulator->proving_key.polynomials, keys[1]->proving_key.polynomials); // swap the polys + std::swap(lagranges[0], lagranges[1]); // swap the lagrange coefficients so the sum is unchanged + std::swap(result.accumulator->proving_key.circuit_size, keys[1]->proving_key.circuit_size); // swap circuit size + std::swap(result.accumulator->proving_key.log_circuit_size, keys[1]->proving_key.log_circuit_size); + } + // Fold the proving key polynomials for (auto& poly : result.accumulator->proving_key.polynomials.get_unshifted()) { poly *= lagranges[0]; @@ -161,14 +177,22 @@ FoldingResult ProtogalaxyProver_proving_key.circuit_size != keys_to_fold[idx + 1]->proving_key.circuit_size) { - info("ProtogalaxyProver: circuit size mismatch!"); - info("DeciderPK ", idx, " size = ", keys_to_fold[idx]->proving_key.circuit_size); - info("DeciderPK ", idx + 1, " size = ", keys_to_fold[idx + 1]->proving_key.circuit_size); - ASSERT(false); + size_t max_circuit_size = 0; + for (size_t idx = 0; idx < DeciderProvingKeys::NUM; ++idx) { + max_circuit_size = std::max(max_circuit_size, keys_to_fold[idx]->proving_key.circuit_size); + } + for (size_t idx = 0; idx < DeciderProvingKeys::NUM; ++idx) { + if (keys_to_fold[idx]->proving_key.circuit_size != max_circuit_size) { + info("ProtogalaxyProver: circuit size mismatch - increasing virtual size of key ", + idx, + " from ", + keys_to_fold[idx]->proving_key.circuit_size, + " to ", + max_circuit_size); + keys_to_fold[idx]->proving_key.polynomials.increase_polynomials_virtual_size(max_circuit_size); } } + run_oink_prover_on_each_incomplete_key(); vinfo("oink prover on each incomplete key"); diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp index 4c5d6a9a57a..efad2d0e072 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover_internal.hpp @@ -135,7 +135,8 @@ template class ProtogalaxyProverInternal { std::vector linearly_dependent_contribution_accumulators(num_threads); // Distribute the execution trace rows across threads so that each handles an equal number of active rows - trace_usage_tracker.construct_thread_ranges(num_threads, polynomial_size, /*use_prev_accumulator=*/true); + trace_usage_tracker.construct_thread_ranges( + num_threads, polynomial_size, /*use_prev_accumulator_tracker=*/true); parallel_for(num_threads, [&](size_t thread_idx) { const size_t start = trace_usage_tracker.thread_ranges[thread_idx].first; @@ -143,7 +144,7 @@ template class ProtogalaxyProverInternal { for (size_t idx = start; idx < end; idx++) { // The contribution is only non-trivial at a given row if the accumulator is active at that row - if (trace_usage_tracker.check_is_active(idx, /*use_prev_accumulator=*/true)) { + if (trace_usage_tracker.check_is_active(idx, true)) { const AllValues row = polynomials.get_row(idx); // Evaluate all subrelations on given row. Separator is 1 since we are not summing across rows here. const RelationEvaluations evals = @@ -343,7 +344,9 @@ template class ProtogalaxyProverInternal { constexpr bool skip_zero_computations = std::same_as; // Determine the number of threads over which to distribute the work - const size_t common_polynomial_size = keys[0]->proving_key.circuit_size; + // The polynomial size is given by the virtual size since the computation includes + // the incoming key which could have nontrivial values on the larger domain in case of overflow. + const size_t common_polynomial_size = keys[0]->proving_key.polynomials.w_l.virtual_size(); const size_t num_threads = compute_num_threads(common_polynomial_size); // Univariates are optimised for usual PG, but we need the unoptimised version for tests (it's a version that diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp index b0a8a27a6e8..3d7cbec843d 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp @@ -107,6 +107,11 @@ std::shared_ptr ProtogalaxyVerifier next_accumulator->verification_key = std::make_shared(*accumulator->verification_key); next_accumulator->is_accumulator = true; + // Set the accumulator circuit size data based on the max of the keys being accumulated + const size_t accumulator_log_circuit_size = keys_to_fold.get_max_log_circuit_size(); + next_accumulator->verification_key->log_circuit_size = accumulator_log_circuit_size; + next_accumulator->verification_key->circuit_size = 1 << accumulator_log_circuit_size; + // Compute next folding parameters const auto [vanishing_polynomial_at_challenge, lagranges] = compute_vanishing_polynomial_and_lagrange_evaluations(combiner_challenge); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.test.cpp index fd54166a9a9..037a470649b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.test.cpp @@ -60,7 +60,6 @@ template class stdlib_biggroup : public testing::Test { { Builder builder; affine_element input_a(element::random_element()); - affine_element input_b(element::random_element()); element_ct a = element_ct::from_witness(&builder, input_a); a.set_origin_tag(next_submitted_value_origin_tag); @@ -76,6 +75,7 @@ template class stdlib_biggroup : public testing::Test { EXPECT_EQ(a.get_origin_tag(), first_second_third_merged_tag); #ifndef NDEBUG + affine_element input_b(element::random_element()); // Working with instant death tagged element causes an exception element_ct b = element_ct::from_witness(&builder, input_b); b.set_origin_tag(instant_death_tag); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.cpp index 566a8d83397..10739d2aa45 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/protogalaxy_recursive_verifier.cpp @@ -169,6 +169,11 @@ std::shared_ptr ProtogalaxyRecursiv perturbator_evaluation * lagranges[0] + vanishing_polynomial_at_challenge * combiner_quotient_at_challenge; accumulator->gate_challenges = update_gate_challenges(perturbator_challenge, accumulator->gate_challenges, deltas); + // Set the accumulator circuit size data based on the max of the keys being accumulated + const size_t accumulator_log_circuit_size = keys_to_fold.get_max_log_circuit_size(); + accumulator->verification_key->log_circuit_size = accumulator_log_circuit_size; + accumulator->verification_key->circuit_size = 1 << accumulator_log_circuit_size; + // Fold the relation parameters for (auto [combination, to_combine] : zip_view(accumulator->alphas, keys_to_fold.get_alphas())) { combination = linear_combination(to_combine, lagranges); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/recursive_decider_verification_keys.hpp b/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/recursive_decider_verification_keys.hpp index 6e75211df15..9a2361b7a9b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/recursive_decider_verification_keys.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/protogalaxy_verifier/recursive_decider_verification_keys.hpp @@ -35,6 +35,22 @@ template struct RecursiveDeciderVerific idx++; } } + + /** + * @brief Get the max log circuit size from the set of decider verification keys + * + * @return size_t + */ + size_t get_max_log_circuit_size() const + { + size_t max_log_circuit_size{ 0 }; + for (auto key : _data) { + max_log_circuit_size = + std::max(max_log_circuit_size, static_cast(key->verification_key->log_circuit_size)); + } + return max_log_circuit_size; + } + /** * @brief Get the precomputed commitments grouped by commitment index * @example If the commitments are grouped as in diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp index aaac39d7a00..f7110df3ee0 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mega_flavor.hpp @@ -421,6 +421,13 @@ class MegaFlavor { shifted = to_be_shifted.shifted(); } } + + void increase_polynomials_virtual_size(const size_t size_in) + { + for (auto& polynomial : this->get_all()) { + polynomial.increase_virtual_size(size_in); + } + } }; /** @@ -1037,4 +1044,4 @@ class MegaFlavor { using Transcript = Transcript_; }; -} // namespace bb +} // namespace bb \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp index 8d69028950f..6ff45fb338d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_flavor.hpp @@ -343,6 +343,13 @@ class UltraFlavor { shifted = to_be_shifted.shifted(); } } + + void increase_polynomials_virtual_size(const size_t size_in) + { + for (auto& polynomial : this->get_all()) { + polynomial.increase_virtual_size(size_in); + } + } }; /** * @brief The proving key is responsible for storing the polynomials used by the prover. diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_keys.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_keys.hpp index 49b72bbd0e2..770cede8297 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_keys.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_keys.hpp @@ -108,6 +108,21 @@ template struct DeciderVerificationKeys_ { } }; + /** + * @brief Get the max log circuit size from the set of decider verification keys + * + * @return size_t + */ + size_t get_max_log_circuit_size() const + { + size_t max_log_circuit_size{ 0 }; + for (auto key : _data) { + max_log_circuit_size = + std::max(max_log_circuit_size, static_cast(key->verification_key->log_circuit_size)); + } + return max_log_circuit_size; + } + /** * @brief Get the precomputed commitments grouped by commitment index * @example If the commitments are grouped as in diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp index c706a8e69ad..7e114c40a3f 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_proving_key.hpp @@ -48,6 +48,8 @@ template class DeciderProvingKey_ { size_t final_active_wire_idx{ 0 }; // idx of last non-trivial wire value in the trace size_t dyadic_circuit_size{ 0 }; // final power-of-2 circuit size + size_t overflow_size{ 0 }; // size of the structured execution trace overflow + DeciderProvingKey_(Circuit& circuit, TraceSettings trace_settings = {}, std::shared_ptr commitment_key = nullptr) @@ -67,6 +69,7 @@ template class DeciderProvingKey_ { circuit.blocks.set_fixed_block_sizes(trace_settings); // The structuring is set circuit.blocks.summarize(); move_structured_trace_overflow_to_overflow_block(circuit); + overflow_size = circuit.blocks.overflow.size(); dyadic_circuit_size = compute_structured_dyadic_size(circuit); // set the dyadic size accordingly } else { dyadic_circuit_size = compute_dyadic_size(circuit); // set dyadic size directly from circuit block sizes @@ -102,6 +105,7 @@ template class DeciderProvingKey_ { proving_key = ProvingKey(dyadic_circuit_size, circuit.public_inputs.size(), commitment_key); // If not using structured trace OR if using structured trace but overflow has occurred (overflow block in // use), allocate full size polys + // is_structured = false; if ((IsMegaFlavor && !is_structured) || (is_structured && circuit.blocks.has_overflow)) { // Allocate full size polynomials proving_key.polynomials = typename Flavor::ProverPolynomials(dyadic_circuit_size); @@ -256,7 +260,7 @@ template class DeciderProvingKey_ { proving_key.polynomials.lagrange_first = Polynomial( /* size=*/1, /*virtual size=*/dyadic_circuit_size, /*start_idx=*/0); - // Even though lagrange_last has a singe non-zero element, we cannot set its size to 0 as different + // Even though lagrange_last has a single non-zero element, we cannot set its size to 0 as different // keys being folded might have lagrange_last set at different indexes and folding does not work // correctly unless the polynomial is allocated in the correct range to accomodate this proving_key.polynomials.lagrange_last = Polynomial( diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp index 132b84f5aad..0804bb3c97b 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/mega_honk.test.cpp @@ -5,6 +5,7 @@ #include "barretenberg/circuit_checker/circuit_checker.hpp" #include "barretenberg/common/log.hpp" #include "barretenberg/goblin/mock_circuits.hpp" +#include "barretenberg/plonk_honk_shared/relation_checker.hpp" #include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" #include "barretenberg/ultra_honk/merge_prover.hpp" @@ -128,6 +129,56 @@ TYPED_TEST(MegaHonkTests, BasicStructured) EXPECT_TRUE(verifier.verify_proof(proof)); } +/** + * @brief Test that increasing the virtual size of a valid set of prover polynomials still results in a valid Megahonk + * proof + * + */ +TYPED_TEST(MegaHonkTests, DynamicVirtualSizeIncrease) +{ + using Flavor = TypeParam; + typename Flavor::CircuitBuilder builder; + using Prover = UltraProver_; + using Verifier = UltraVerifier_; + + GoblinMockCircuits::construct_simple_circuit(builder); + + auto builder_copy = builder; + + // Construct and verify Honk proof using a structured trace + TraceSettings trace_settings{ SMALL_TEST_STRUCTURE_FOR_OVERFLOWS }; + auto proving_key = std::make_shared>(builder, trace_settings); + auto proving_key_copy = std::make_shared>(builder_copy, trace_settings); + auto circuit_size = proving_key->proving_key.circuit_size; + + auto doubled_circuit_size = 2 * circuit_size; + proving_key_copy->proving_key.polynomials.increase_polynomials_virtual_size(doubled_circuit_size); + // TODO(https://github.com/AztecProtocol/barretenberg/issues/1158) + // proving_key_copy->proving_key.circuit_size = doubled_circuit_size; + + Prover prover(proving_key); + auto verification_key = std::make_shared(proving_key->proving_key); + + Prover prover_copy(proving_key_copy); + auto verification_key_copy = std::make_shared(proving_key_copy->proving_key); + + for (auto [entry, entry_copy] : zip_view(verification_key->get_all(), verification_key_copy->get_all())) { + EXPECT_EQ(entry, entry_copy); + } + + Verifier verifier(verification_key); + auto proof = prover.construct_proof(); + + RelationChecker::check_all(proving_key->proving_key.polynomials, proving_key->relation_parameters); + EXPECT_TRUE(verifier.verify_proof(proof)); + + Verifier verifier_copy(verification_key_copy); + auto proof_copy = prover_copy.construct_proof(); + + RelationChecker::check_all(proving_key->proving_key.polynomials, proving_key->relation_parameters); + EXPECT_TRUE(verifier_copy.verify_proof(proof_copy)); +} + /** * @brief Test proof construction/verification for a circuit with ECC op gates, public inputs, and basic arithmetic * gates @@ -314,3 +365,49 @@ TYPED_TEST(MegaHonkTests, StructuredTraceOverflow) EXPECT_TRUE(builder.blocks.has_overflow); } } + +/** + * @brief A sanity check that a simple std::swap on a ProverPolynomials object works as expected + * @details Constuct two valid proving keys. Tamper with the prover_polynomials of one key then swap the + * prover_polynomials of the two keys. The key who received the tampered polys leads to a failed verification while the + * other succeeds. + * + */ +TYPED_TEST(MegaHonkTests, PolySwap) +{ + using Flavor = TypeParam; + using Builder = Flavor::CircuitBuilder; + + TraceSettings trace_settings{ SMALL_TEST_STRUCTURE_FOR_OVERFLOWS }; + + // Construct a simple circuit and make a copy of it + Builder builder; + GoblinMockCircuits::construct_simple_circuit(builder); + auto builder_copy = builder; + + // Construct two identical proving keys + auto proving_key_1 = std::make_shared(builder, trace_settings); + auto proving_key_2 = std::make_shared(builder_copy, trace_settings); + + // Tamper with the polys of pkey 1 in such a way that verification should fail + proving_key_1->proving_key.polynomials.w_l.at(5) = 10; + + // Swap the polys of the two proving keys; result should be pkey 1 is valid and pkey 2 should fail + std::swap(proving_key_1->proving_key.polynomials, proving_key_2->proving_key.polynomials); + + { // Verification based on pkey 1 should succeed + typename TestFixture::Prover prover(proving_key_1); + auto verification_key = std::make_shared(proving_key_1->proving_key); + typename TestFixture::Verifier verifier(verification_key); + auto proof = prover.construct_proof(); + EXPECT_TRUE(verifier.verify_proof(proof)); + } + + { // Verification based on pkey 2 should fail + typename TestFixture::Prover prover(proving_key_2); + auto verification_key = std::make_shared(proving_key_2->proving_key); + typename TestFixture::Verifier verifier(verification_key); + auto proof = prover.construct_proof(); + EXPECT_FALSE(verifier.verify_proof(proof)); + } +} diff --git a/barretenberg/ts/CHANGELOG.md b/barretenberg/ts/CHANGELOG.md index 424f385b656..f25e4b03415 100644 --- a/barretenberg/ts/CHANGELOG.md +++ b/barretenberg/ts/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## [0.65.1](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg.js-v0.65.0...barretenberg.js-v0.65.1) (2024-11-27) + + +### Features + +* Speed up transaction execution ([#10172](https://github.com/AztecProtocol/aztec-packages/issues/10172)) ([da265b6](https://github.com/AztecProtocol/aztec-packages/commit/da265b6b7d61a0d991fa23bd044f711513a0e86c)) + + +### Bug Fixes + +* Add pako as a dependency in bb.js ([#10186](https://github.com/AztecProtocol/aztec-packages/issues/10186)) ([b773c14](https://github.com/AztecProtocol/aztec-packages/commit/b773c14a8fe8bf425dc755b3a156e500e9924c1e)) + ## [0.65.0](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg.js-v0.64.0...barretenberg.js-v0.65.0) (2024-11-26) diff --git a/barretenberg/ts/package.json b/barretenberg/ts/package.json index 3f8ed54e7b3..edc70e8efec 100644 --- a/barretenberg/ts/package.json +++ b/barretenberg/ts/package.json @@ -1,7 +1,7 @@ { "name": "@aztec/bb.js", "packageManager": "yarn@1.22.22", - "version": "0.65.0", + "version": "0.65.1", "homepage": "https://github.com/AztecProtocol/aztec-packages/tree/master/barretenberg/ts", "license": "MIT", "type": "module", @@ -56,6 +56,7 @@ "commander": "^10.0.1", "debug": "^4.3.4", "fflate": "^0.8.0", + "pako": "^2.1.0", "tslib": "^2.4.0" }, "devDependencies": { @@ -76,7 +77,6 @@ "html-webpack-plugin": "^5.5.1", "idb-keyval": "^6.2.1", "jest": "^29.5.0", - "pako": "^2.1.0", "prettier": "^2.8.4", "resolve-typescript-plugin": "^2.0.1", "ts-jest": "^29.1.0", diff --git a/docs/deploy_preview.sh b/docs/deploy_preview.sh index af544278342..82fec611e31 100755 --- a/docs/deploy_preview.sh +++ b/docs/deploy_preview.sh @@ -4,20 +4,22 @@ set -eu PR_NUMBER=$1 AZTEC_BOT_COMMENTER_GITHUB_TOKEN="$2" -API_URL="https://api.github.com/repos/AztecProtocol/aztec-packages/pulls/${PR_NUMBER}/files" - -echo "API URL: $API_URL" - -DOCS_CHANGED=$(curl -L \ - -H "Authorization: Bearer $AZTEC_BOT_COMMENTER_GITHUB_TOKEN" \ - "${API_URL}" | \ - jq '[.[] | select(.filename | startswith("docs/"))] | length > 0') - -echo "Docs changed: $DOCS_CHANGED" - -if [ "$DOCS_CHANGED" = "false" ]; then - echo "No docs changed, not deploying" - exit 0 +if [ -n "$PR_NUMBER" ] ; then + API_URL="https://api.github.com/repos/AztecProtocol/aztec-packages/pulls/${PR_NUMBER}/files" + + echo "API URL: $API_URL" + + DOCS_CHANGED=$(curl -L \ + -H "Authorization: Bearer $AZTEC_BOT_COMMENTER_GITHUB_TOKEN" \ + "${API_URL}" | \ + jq '[.[] | select(.filename | startswith("docs/"))] | length > 0') + + echo "Docs changed: $DOCS_CHANGED" + + if [ "$DOCS_CHANGED" = "false" ]; then + echo "No docs changed, not deploying" + exit 0 + fi fi # Regular deploy if the argument is not "master" and docs changed @@ -27,5 +29,5 @@ echo "Unique deploy URL: $DOCS_PREVIEW_URL" cd ../yarn-project/scripts if [ -n "$PR_NUMBER" ] ; then - AZTEC_BOT_COMMENTER_GITHUB_TOKEN=$AZTEC_BOT_COMMENTER_GITHUB_TOKEN PR_NUMBER=$PR_NUMBER DOCS_PREVIEW_URL=$DOCS_PREVIEW_URL yarn docs-preview-comment + AZTEC_BOT_COMMENTER_GITHUB_TOKEN=$AZTEC_BOT_COMMENTER_GITHUB_TOKEN PR_NUMBER=$PR_NUMBER DOCS_PREVIEW_URL=$DOCS_PREVIEW_URL yarn docs-preview-comment fi diff --git a/docs/docs/migration_notes.md b/docs/docs/migration_notes.md index 2d3c4822628..bb841a9952e 100644 --- a/docs/docs/migration_notes.md +++ b/docs/docs/migration_notes.md @@ -6,7 +6,7 @@ keywords: [sandbox, aztec, notes, migration, updating, upgrading] Aztec is in full-speed development. Literally every version breaks compatibility with the previous ones. This page attempts to target errors and difficulties you might encounter when upgrading, and how to resolve them. -## TBD +## 0.65 ### [aztec.nr] Removed SharedImmutable diff --git a/noir-projects/aztec-nr/.gitrepo b/noir-projects/aztec-nr/.gitrepo index 6166af52d94..9e7ccc9d1da 100644 --- a/noir-projects/aztec-nr/.gitrepo +++ b/noir-projects/aztec-nr/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/aztec-nr branch = master - commit = 10eb08d76cc3bb7cc4cd4d3ef64a42b702f4c5b9 + commit = 9e8a380f05a851ae84c456b7debe658414971a20 method = merge cmdver = 0.4.6 - parent = c5a4ee5304750adab8295dd15fa1682d0e9d24a8 + parent = d157084819c626ca76e78a9f92bbfa7f41123227 diff --git a/noir-projects/noir-protocol-circuits/scripts/flamegraph.sh b/noir-projects/noir-protocol-circuits/scripts/flamegraph.sh index df6269bece9..4f0868e573a 100755 --- a/noir-projects/noir-protocol-circuits/scripts/flamegraph.sh +++ b/noir-projects/noir-protocol-circuits/scripts/flamegraph.sh @@ -1,34 +1,85 @@ #!/usr/bin/env bash set -eu -EXAMPLE_CMD="$0 private_kernel_init" +EXAMPLE_CMD="$0 private_kernel_init rollup_merge" -# First arg is the circuit name. -if [[ $# -eq 0 || ($1 == -* && $1 != "-h") ]]; then - echo "Please specify the name of the circuit." - echo "e.g.: $EXAMPLE_CMD" - exit 1 -fi - -CIRCUIT_NAME=$1 +# Parse global options. +CIRCUIT_NAMES=() SERVE=false PORT=5000 +ALLOW_NO_CIRCUIT_NAMES=false + +# Get the directory of the script. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# and of the artifact +ARTIFACT_DIR="$SCRIPT_DIR/../target" + +# Function to get filenames from a directory +get_filenames() { + local dir="$1" + # Return filenames (without extensions) from the directory + for file in "$dir"/*; do + if [[ -f "$file" ]]; then + filename="$(basename "$file" .${file##*.})" + echo "$filename" + fi + done +} + +NAUGHTY_LIST=("empty_nested") # files with no opcodes, which break the flamegraph tool. + +get_valid_circuit_names() { + # Capture the output of function call in an array: + ALL_CIRCUIT_NAMES=($(get_filenames "$ARTIFACT_DIR")) + for circuit_name in "${ALL_CIRCUIT_NAMES[@]}"; do + # Skip files that include the substring "simulated" + if [[ "$circuit_name" == *"simulated"* ]]; then + continue + fi + # Skip the file if it's on the naughty list: + if [[ " ${NAUGHTY_LIST[@]} " =~ " ${circuit_name} " ]]; then + continue + fi + CIRCUIT_NAMES+=("$circuit_name") + done +} + while [[ $# -gt 0 ]]; do case $1 in -h|--help) - echo "Generates a flamegraph for the specified protocol circuit." + echo "Generates flamegraphs for the specified protocol circuits." echo "" echo "Usage:" - echo " $0 " + echo " $0 [ ...] [options]" echo "" - echo " e.g.: $EXAMPLE_CMD" + echo " e.g.: $EXAMPLE_CMD -s -p 8080" echo "" - echo "Arguments:" - echo " -s Serve the file over http" + echo "Options:" + echo " -s Serve the file(s) over http" echo " -p Specify custom port. Default: ${PORT}" echo "" + echo "If you're feeling lazy, you can also just list available (compiled) circuit names with:" + echo " $0 -l" + exit 0 + ;; + -l|--list) + echo "Available circuits (that have been compiled):" + get_valid_circuit_names + for circuit_name in "${CIRCUIT_NAMES[@]}"; do + echo "$circuit_name" + done exit 0 ;; + -a|--all) + echo "This will probably take a while..." + get_valid_circuit_names + shift + ;; + -n|--allow-no-circuit-names) + # Enables the existing flamegraphs to be served quickly. + ALLOW_NO_CIRCUIT_NAMES=true + shift + ;; -s|--serve) SERVE=true shift @@ -43,22 +94,23 @@ while [[ $# -gt 0 ]]; do shift 2 ;; *) + # Treat any argument not matching an option as a CIRCUIT_NAME. + CIRCUIT_NAMES+=("$1") shift - ;; + ;; esac done -# Get the directory of the script. -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -# Check if the artifact exists. -ARTIFACT="$SCRIPT_DIR/../target/$CIRCUIT_NAME.json" -if [[ ! -f $ARTIFACT ]]; then - echo "Cannot find artifact: ${ARTIFACT}" - exit 1 +# Ensure at least one CIRCUIT_NAME was specified. +if [[ ! $ALLOW_NO_CIRCUIT_NAMES ]]; then + if [[ ${#CIRCUIT_NAMES[@]} -eq 0 ]]; then + echo "Please specify at least one circuit name." + echo "e.g.: $EXAMPLE_CMD" + exit 1 + fi fi -# Build profier if it's not available. +# Build profiler if it's not available. PROFILER="$SCRIPT_DIR/../../../noir/noir-repo/target/release/noir-profiler" if [ ! -f $PROFILER ]; then echo "Profiler not found, building profiler" @@ -67,33 +119,49 @@ if [ ! -f $PROFILER ]; then cd "$SCRIPT_DIR" fi -# We create dest directory and use it as an output for the generated main.svg file. +# Create the output directory. DEST="$SCRIPT_DIR/../dest" mkdir -p $DEST MEGA_HONK_CIRCUIT_PATTERNS=$(jq -r '.[]' "$SCRIPT_DIR/../../mega_honk_circuits.json") -# Check if the target circuit is a mega honk circuit. -ARTIFACT_FILE_NAME=$(basename -s .json "$ARTIFACT") +# Process each CIRCUIT_NAME. +for CIRCUIT_NAME in "${CIRCUIT_NAMES[@]}"; do + ( + echo "" + echo "Doing $CIRCUIT_NAME..." + # Check if the artifact exists. + ARTIFACT="$ARTIFACT_DIR/$CIRCUIT_NAME.json" + if [[ ! -f $ARTIFACT ]]; then + artifact_error="Cannot find artifact: ${ARTIFACT}" + echo "$artifact_error" + fi -IS_MEGA_HONK_CIRCUIT="false" -for pattern in $MEGA_HONK_CIRCUIT_PATTERNS; do - if echo "$ARTIFACT_FILE_NAME" | grep -qE "$pattern"; then - IS_MEGA_HONK_CIRCUIT="true" - break - fi -done + ARTIFACT_FILE_NAME=$(basename -s .json "$ARTIFACT") -# At last, generate the flamegraph. -# If it's a mega honk circuit, we need to set the backend_gates_command argument to "gates_mega_honk". -if [ "$IS_MEGA_HONK_CIRCUIT" = "true" ]; then - $PROFILER gates-flamegraph --artifact-path "${ARTIFACT}" --backend-path "$SCRIPT_DIR/../../../barretenberg/cpp/build/bin/bb" --output "$DEST" --backend-gates-command "gates_mega_honk" -- -h -else - $PROFILER gates-flamegraph --artifact-path "${ARTIFACT}" --backend-path "$SCRIPT_DIR/../../../barretenberg/cpp/build/bin/bb" --output "$DEST" -- -h -fi + # Determine if the circuit is a mega honk circuit. + IS_MEGA_HONK_CIRCUIT="false" + for pattern in $MEGA_HONK_CIRCUIT_PATTERNS; do + if echo "$ARTIFACT_FILE_NAME" | grep -qE "$pattern"; then + IS_MEGA_HONK_CIRCUIT="true" + break + fi + done + + # Generate the flamegraph. + if [ "$IS_MEGA_HONK_CIRCUIT" = "true" ]; then + $PROFILER gates --artifact-path "${ARTIFACT}" --backend-path "$SCRIPT_DIR/../../../barretenberg/cpp/build/bin/bb" --output "$DEST" --output-filename "$CIRCUIT_NAME" --backend-gates-command "gates_mega_honk" -- -h + else + $PROFILER gates --artifact-path "${ARTIFACT}" --backend-path "$SCRIPT_DIR/../../../barretenberg/cpp/build/bin/bb" --output "$DEST" --output-filename "$CIRCUIT_NAME" -- -h + fi -# Serve the file over http if -s is set. + echo "Flamegraph generated for circuit: $CIRCUIT_NAME" + ) & # These parenthesis `( stuff ) &` mean "do all this in parallel" +done +wait # wait for parallel processes to finish + +# Serve the files over HTTP if -s is set. if $SERVE; then - echo "Serving flamegraph at http://0.0.0.0:${PORT}/main.svg" - python3 -m http.server --directory "$SCRIPT_DIR/../dest" $PORT -fi \ No newline at end of file + echo "Serving flamegraphs at http://0.0.0.0:${PORT}/" + python3 -m http.server --directory "$DEST" $PORT +fi diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index d98f20ce6e7..7e5f88bcf66 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -823,7 +823,7 @@ impl<'f> Context<'f> { #[cfg(test)] mod test { - use acvm::acir::AcirField; + use acvm::{acir::AcirField, FieldElement}; use crate::ssa::{ function_builder::FunctionBuilder, @@ -1018,123 +1018,84 @@ mod test { // b7 b8 // ↘ ↙ // b9 - let main_id = Id::test_new(0); - let mut builder = FunctionBuilder::new("main".into(), main_id); - - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - let b3 = builder.insert_block(); - let b4 = builder.insert_block(); - let b5 = builder.insert_block(); - let b6 = builder.insert_block(); - let b7 = builder.insert_block(); - let b8 = builder.insert_block(); - let b9 = builder.insert_block(); - - let c1 = builder.add_parameter(Type::bool()); - let c4 = builder.add_parameter(Type::bool()); - - let r1 = builder.insert_allocate(Type::field()); - - let store_value = |builder: &mut FunctionBuilder, value: u128| { - let value = builder.field_constant(value); - builder.insert_store(r1, value); - }; - - let test_function = Id::test_new(1); - - let call_test_function = |builder: &mut FunctionBuilder, block: u128| { - let block = builder.field_constant(block); - let load = builder.insert_load(r1, Type::field()); - builder.insert_call(test_function, vec![block, load], Vec::new()); - }; - - let switch_store_and_test_function = - |builder: &mut FunctionBuilder, block, block_number: u128| { - builder.switch_to_block(block); - store_value(builder, block_number); - call_test_function(builder, block_number); - }; - - let switch_and_test_function = - |builder: &mut FunctionBuilder, block, block_number: u128| { - builder.switch_to_block(block); - call_test_function(builder, block_number); - }; - - store_value(&mut builder, 0); - call_test_function(&mut builder, 0); - builder.terminate_with_jmp(b1, vec![]); - - switch_store_and_test_function(&mut builder, b1, 1); - builder.terminate_with_jmpif(c1, b2, b3); - - switch_store_and_test_function(&mut builder, b2, 2); - builder.terminate_with_jmp(b4, vec![]); - - switch_store_and_test_function(&mut builder, b3, 3); - builder.terminate_with_jmp(b8, vec![]); - - switch_and_test_function(&mut builder, b4, 4); - builder.terminate_with_jmpif(c4, b5, b6); - - switch_store_and_test_function(&mut builder, b5, 5); - builder.terminate_with_jmp(b7, vec![]); - - switch_store_and_test_function(&mut builder, b6, 6); - builder.terminate_with_jmp(b7, vec![]); - - switch_and_test_function(&mut builder, b7, 7); - builder.terminate_with_jmp(b9, vec![]); - - switch_and_test_function(&mut builder, b8, 8); - builder.terminate_with_jmp(b9, vec![]); - - switch_and_test_function(&mut builder, b9, 9); - let load = builder.insert_load(r1, Type::field()); - builder.terminate_with_return(vec![load]); + let src = " + acir(inline) fn main f0 { + b0(v0: u1, v1: u1): + v2 = allocate -> &mut Field + store Field 0 at v2 + v4 = load v2 -> Field + // call v1(Field 0, v4) + jmp b1() + b1(): + store Field 1 at v2 + v6 = load v2 -> Field + // call v1(Field 1, v6) + jmpif v0 then: b2, else: b3 + b2(): + store Field 2 at v2 + v8 = load v2 -> Field + // call v1(Field 2, v8) + jmp b4() + b4(): + v12 = load v2 -> Field + // call v1(Field 4, v12) + jmpif v1 then: b5, else: b6 + b5(): + store Field 5 at v2 + v14 = load v2 -> Field + // call v1(Field 5, v14) + jmp b7() + b7(): + v18 = load v2 -> Field + // call v1(Field 7, v18) + jmp b9() + b9(): + v22 = load v2 -> Field + // call v1(Field 9, v22) + v23 = load v2 -> Field + return v23 + b6(): + store Field 6 at v2 + v16 = load v2 -> Field + // call v1(Field 6, v16) + jmp b7() + b3(): + store Field 3 at v2 + v10 = load v2 -> Field + // call v1(Field 3, v10) + jmp b8() + b8(): + v20 = load v2 -> Field + // call v1(Field 8, v20) + jmp b9() + } + "; - let ssa = builder.finish().flatten_cfg().mem2reg(); + let ssa = Ssa::from_str(src).unwrap(); + let ssa = ssa.flatten_cfg().mem2reg(); // Expected results after mem2reg removes the allocation and each load and store: - // - // fn main f0 { - // b0(v0: u1, v1: u1): - // call test_function(Field 0, Field 0) - // call test_function(Field 1, Field 1) - // enable_side_effects v0 - // call test_function(Field 2, Field 2) - // call test_function(Field 4, Field 2) - // v29 = and v0, v1 - // enable_side_effects v29 - // call test_function(Field 5, Field 5) - // v32 = not v1 - // v33 = and v0, v32 - // enable_side_effects v33 - // call test_function(Field 6, Field 6) - // enable_side_effects v0 - // v36 = mul v1, Field 5 - // v37 = mul v32, Field 2 - // v38 = add v36, v37 - // v39 = mul v1, Field 5 - // v40 = mul v32, Field 6 - // v41 = add v39, v40 - // call test_function(Field 7, v42) - // v43 = not v0 - // enable_side_effects v43 - // store Field 3 at v2 - // call test_function(Field 3, Field 3) - // call test_function(Field 8, Field 3) - // enable_side_effects Field 1 - // v47 = mul v0, v41 - // v48 = mul v43, Field 1 - // v49 = add v47, v48 - // v50 = mul v0, v44 - // v51 = mul v43, Field 3 - // v52 = add v50, v51 - // call test_function(Field 9, v53) - // return v54 - // } + let expected = " + acir(inline) fn main f0 { + b0(v0: u1, v1: u1): + v2 = allocate -> &mut Field + enable_side_effects v0 + v3 = mul v0, v1 + enable_side_effects v3 + v4 = not v1 + v5 = mul v0, v4 + enable_side_effects v0 + v6 = cast v3 as Field + v8 = mul v6, Field -1 + v10 = add Field 6, v8 + v11 = not v0 + enable_side_effects u1 1 + v13 = cast v0 as Field + v15 = sub v10, Field 3 + v16 = mul v13, v15 + v17 = add Field 3, v16 + return v17 + }"; let main = ssa.main(); let ret = match main.dfg[main.entry_block()].terminator() { @@ -1143,7 +1104,12 @@ mod test { }; let merged_values = get_all_constants_reachable_from_instruction(&main.dfg, ret); - assert_eq!(merged_values, vec![1, 3, 5, 6]); + assert_eq!( + merged_values, + vec![FieldElement::from(3u128), FieldElement::from(6u128), -FieldElement::from(1u128)] + ); + + assert_normalized_ssa_equals(ssa, expected); } #[test] @@ -1224,7 +1190,7 @@ mod test { fn get_all_constants_reachable_from_instruction( dfg: &DataFlowGraph, value: ValueId, - ) -> Vec { + ) -> Vec { match dfg[value] { Value::Instruction { instruction, .. } => { let mut values = vec![]; @@ -1242,7 +1208,7 @@ mod test { values.dedup(); values } - Value::NumericConstant { constant, .. } => vec![constant.to_u128()], + Value::NumericConstant { constant, .. } => vec![constant], _ => Vec::new(), } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs index f91487fd73e..6cf7070e65e 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs @@ -1089,6 +1089,7 @@ mod test { } #[test] + #[ignore] #[should_panic( expected = "Attempted to recur more than 1000 times during inlining function 'main': acir(inline) fn main f0 {" )] diff --git a/noir/noir-repo/tooling/profiler/src/cli/gates_flamegraph_cmd.rs b/noir/noir-repo/tooling/profiler/src/cli/gates_flamegraph_cmd.rs index c3ae29de058..e68a8cd5bd2 100644 --- a/noir/noir-repo/tooling/profiler/src/cli/gates_flamegraph_cmd.rs +++ b/noir/noir-repo/tooling/profiler/src/cli/gates_flamegraph_cmd.rs @@ -31,6 +31,10 @@ pub(crate) struct GatesFlamegraphCommand { /// The output folder for the flamegraph svg files #[clap(long, short)] output: String, + + /// The output name for the flamegraph svg files + #[clap(long, short = 'f')] + output_filename: Option, } pub(crate) fn run(args: GatesFlamegraphCommand) -> eyre::Result<()> { @@ -43,6 +47,7 @@ pub(crate) fn run(args: GatesFlamegraphCommand) -> eyre::Result<()> { }, &InfernoFlamegraphGenerator { count_name: "gates".to_string() }, &PathBuf::from(args.output), + args.output_filename, ) } @@ -51,6 +56,7 @@ fn run_with_provider( gates_provider: &Provider, flamegraph_generator: &Generator, output_path: &Path, + output_filename: Option, ) -> eyre::Result<()> { let mut program = read_program_from_file(artifact_path).context("Error reading program from file")?; @@ -91,13 +97,18 @@ fn run_with_provider( }) .collect(); + let output_filename = if let Some(output_filename) = &output_filename { + format!("{}::{}::gates.svg", output_filename, func_name) + } else { + format!("{}::gates.svg", func_name) + }; flamegraph_generator.generate_flamegraph( samples, &debug_artifact.debug_symbols[func_idx], &debug_artifact, artifact_path.to_str().unwrap(), &func_name, - &Path::new(&output_path).join(Path::new(&format!("{}_gates.svg", &func_name))), + &Path::new(&output_path).join(Path::new(&output_filename)), )?; } @@ -189,11 +200,17 @@ mod tests { }; let flamegraph_generator = TestFlamegraphGenerator::default(); - super::run_with_provider(&artifact_path, &provider, &flamegraph_generator, temp_dir.path()) - .expect("should run without errors"); + super::run_with_provider( + &artifact_path, + &provider, + &flamegraph_generator, + temp_dir.path(), + Some(String::from("test_filename")), + ) + .expect("should run without errors"); // Check that the output file was written to - let output_file = temp_dir.path().join("main_gates.svg"); + let output_file = temp_dir.path().join("test_filename::main::gates.svg"); assert!(output_file.exists()); } } diff --git a/noir/noir-repo/yarn.lock b/noir/noir-repo/yarn.lock index 03cea21026e..3c8df2b1772 100644 --- a/noir/noir-repo/yarn.lock +++ b/noir/noir-repo/yarn.lock @@ -229,6 +229,7 @@ __metadata: commander: ^10.0.1 debug: ^4.3.4 fflate: ^0.8.0 + pako: ^2.1.0 tslib: ^2.4.0 bin: bb.js: ./dest/node/main.js diff --git a/noir/scripts/test_native.sh b/noir/scripts/test_native.sh index 1f6d633935d..e0b3618f836 100755 --- a/noir/scripts/test_native.sh +++ b/noir/scripts/test_native.sh @@ -14,4 +14,6 @@ RUSTFLAGS=-Dwarnings cargo clippy --workspace --locked --release ./.github/scripts/cargo-binstall-install.sh cargo-binstall cargo-nextest --version 0.9.67 -y --secure +# See https://github.com/AztecProtocol/aztec-packages/pull/10080 +RUST_MIN_STACK=8388608 cargo nextest run --workspace --locked --release -E '!test(hello_world_example) & !test(simple_verifier_codegen)' diff --git a/yarn-project/aztec/CHANGELOG.md b/yarn-project/aztec/CHANGELOG.md index 2f8bfc7a242..63e3afde6aa 100644 --- a/yarn-project/aztec/CHANGELOG.md +++ b/yarn-project/aztec/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.65.1](https://github.com/AztecProtocol/aztec-packages/compare/aztec-package-v0.65.0...aztec-package-v0.65.1) (2024-11-27) + + +### Miscellaneous + +* Delete old serialization methods ([#9951](https://github.com/AztecProtocol/aztec-packages/issues/9951)) ([10d3f6f](https://github.com/AztecProtocol/aztec-packages/commit/10d3f6fe851dc73f5f12edec26b028fe526f0be6)) + ## [0.65.0](https://github.com/AztecProtocol/aztec-packages/compare/aztec-package-v0.64.0...aztec-package-v0.65.0) (2024-11-26) diff --git a/yarn-project/aztec/package.json b/yarn-project/aztec/package.json index 5fbb4579579..4555701a969 100644 --- a/yarn-project/aztec/package.json +++ b/yarn-project/aztec/package.json @@ -1,6 +1,6 @@ { "name": "@aztec/aztec", - "version": "0.65.0", + "version": "0.65.1", "type": "module", "exports": { ".": "./dest/index.js" diff --git a/yarn-project/bb-prover/src/bb/execute.ts b/yarn-project/bb-prover/src/bb/execute.ts index a1f9af822e7..e73a210f3f9 100644 --- a/yarn-project/bb-prover/src/bb/execute.ts +++ b/yarn-project/bb-prover/src/bb/execute.ts @@ -431,10 +431,8 @@ export async function generateTubeProof( } // // Paths for the inputs - const vkPath = join(workingDirectory, 'mega_vk.bin'); + const vkPath = join(workingDirectory, 'client_ivc_vk.bin'); const proofPath = join(workingDirectory, 'client_ivc_proof.bin'); - const translatorVkPath = join(workingDirectory, 'translator_vk.bin'); - const eccVkPath = join(workingDirectory, 'ecc_vk.bin'); // The proof is written to e.g. /workingDirectory/proof const outputPath = workingDirectory; @@ -450,7 +448,7 @@ export async function generateTubeProof( } try { - if (!filePresent(vkPath) || !filePresent(proofPath) || !filePresent(translatorVkPath) || !filePresent(eccVkPath)) { + if (!filePresent(vkPath) || !filePresent(proofPath)) { return { status: BB_RESULT.FAILURE, reason: `Client IVC input files not present in ${workingDirectory}` }; } const args = ['-o', outputPath, '-v']; diff --git a/yarn-project/circuits.js/src/structs/client_ivc_proof.ts b/yarn-project/circuits.js/src/structs/client_ivc_proof.ts index a35b8f6c84f..9c47db7e845 100644 --- a/yarn-project/circuits.js/src/structs/client_ivc_proof.ts +++ b/yarn-project/circuits.js/src/structs/client_ivc_proof.ts @@ -13,10 +13,8 @@ export class ClientIvcProof { // produced by the sequencer when making the tube proof // TODO(https://github.com/AztecProtocol/aztec-packages/issues/7370): Need to precompute private kernel tail VK so we can verify this immediately in the tx pool // which parts of these are needed to quickly verify that we have a correct IVC proof - public megaVkBuffer: Buffer, public clientIvcProofBuffer: Buffer, - public translatorVkBuffer: Buffer, - public eccVkBuffer: Buffer, + public clientIvcVkBuffer: Buffer, ) {} public isEmpty() { @@ -24,7 +22,7 @@ export class ClientIvcProof { } static empty() { - return new ClientIvcProof(Buffer.from(''), Buffer.from(''), Buffer.from(''), Buffer.from('')); + return new ClientIvcProof(Buffer.from(''), Buffer.from('')); } /** @@ -34,12 +32,10 @@ export class ClientIvcProof { * @returns the encapsulated client ivc proof */ static async readFromOutputDirectory(directory: string) { - const [megaVkBuffer, clientIvcProofBuffer, translatorVkBuffer, eccVkBuffer] = await Promise.all( - ['mega_vk', 'client_ivc_proof', 'translator_vk', 'ecc_vk'].map(fileName => - fs.readFile(path.join(directory, fileName)), - ), + const [clientIvcVkBuffer, clientIvcProofBuffer] = await Promise.all( + ['client_ivc_vk', 'client_ivc_proof'].map(fileName => fs.readFile(path.join(directory, fileName))), ); - return new ClientIvcProof(megaVkBuffer, clientIvcProofBuffer, translatorVkBuffer, eccVkBuffer); + return new ClientIvcProof(clientIvcProofBuffer, clientIvcVkBuffer); } /** @@ -56,12 +52,10 @@ export class ClientIvcProof { * @param directory the directory of results */ async writeToOutputDirectory(directory: string) { - const { megaVkBuffer, clientIvcProofBuffer, translatorVkBuffer, eccVkBuffer } = this; + const { clientIvcProofBuffer, clientIvcVkBuffer } = this; const fileData = [ - ['mega_vk', megaVkBuffer], ['client_ivc_proof', clientIvcProofBuffer], - ['translator_vk', translatorVkBuffer], - ['ecc_vk', eccVkBuffer], + ['client_ivc_vk', clientIvcVkBuffer], ] as const; await Promise.all(fileData.map(([fileName, buffer]) => fs.writeFile(path.join(directory, fileName), buffer))); } @@ -76,19 +70,15 @@ export class ClientIvcProof { static fromBuffer(buffer: Buffer | BufferReader): ClientIvcProof { const reader = BufferReader.asReader(buffer); - return new ClientIvcProof(reader.readBuffer(), reader.readBuffer(), reader.readBuffer(), reader.readBuffer()); + return new ClientIvcProof(reader.readBuffer(), reader.readBuffer()); } public toBuffer() { return serializeToBuffer( - this.megaVkBuffer.length, - this.megaVkBuffer, this.clientIvcProofBuffer.length, this.clientIvcProofBuffer, - this.translatorVkBuffer.length, - this.translatorVkBuffer, - this.eccVkBuffer.length, - this.eccVkBuffer, + this.clientIvcVkBuffer.length, + this.clientIvcVkBuffer, ); } } diff --git a/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts b/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts index d4f3702c4ef..fb400a50510 100644 --- a/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_private_voting_contract.test.ts @@ -31,9 +31,12 @@ describe('e2e_voting_contract', () => { expect(await votingContract.methods.get_vote(candidate).simulate()).toBe(1n); // We try voting again, but our TX is dropped due to trying to emit duplicate nullifiers - await expect(votingContract.methods.cast_vote(candidate).send().wait()).rejects.toThrow( - 'Reason: Tx dropped by P2P node.', - ); + // first confirm that it fails simulation + await expect(votingContract.methods.cast_vote(candidate).send().wait()).rejects.toThrow(/Nullifier collision/); + // if we skip simulation, tx is dropped + await expect( + votingContract.methods.cast_vote(candidate).send({ skipPublicSimulation: true }).wait(), + ).rejects.toThrow('Reason: Tx dropped by P2P node.'); }); }); }); diff --git a/yarn-project/end-to-end/src/fixtures/fixtures.ts b/yarn-project/end-to-end/src/fixtures/fixtures.ts index ccaa85f3516..79d10d0d2da 100644 --- a/yarn-project/end-to-end/src/fixtures/fixtures.ts +++ b/yarn-project/end-to-end/src/fixtures/fixtures.ts @@ -16,7 +16,7 @@ export const U128_UNDERFLOW_ERROR = "Assertion failed: attempt to subtract with export const U128_OVERFLOW_ERROR = "Assertion failed: attempt to add with overflow 'hi == high'"; export const BITSIZE_TOO_BIG_ERROR = "Assertion failed: call to assert_max_bit_size 'self.__assert_max_bit_size'"; // TODO(https://github.com/AztecProtocol/aztec-packages/issues/5818): Make these a fixed error after transition. -export const DUPLICATE_NULLIFIER_ERROR = /dropped|duplicate nullifier|reverted/; +export const DUPLICATE_NULLIFIER_ERROR = /dropped|duplicate nullifier|reverted|Nullifier collision/; export const NO_L1_TO_L2_MSG_ERROR = /No non-nullified L1 to L2 message found for message hash|Tried to consume nonexistent L1-to-L2 message/; export const STATIC_CALL_STATE_MODIFICATION_ERROR = diff --git a/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts b/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts index 6ec6832ff53..bf0cf8934e0 100644 --- a/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts +++ b/yarn-project/end-to-end/src/prover-coordination/e2e_prover_coordination.test.ts @@ -128,6 +128,10 @@ describe('e2e_prover_coordination', () => { await performEscrow(10000000n); }); + afterEach(async () => { + await snapshotManager.teardown(); + }); + const expectProofClaimOnL1 = async (expected: { epochToProve: bigint; basisPointFee: number; diff --git a/yarn-project/simulator/src/avm/avm_simulator.test.ts b/yarn-project/simulator/src/avm/avm_simulator.test.ts index 00366a84374..06811af1557 100644 --- a/yarn-project/simulator/src/avm/avm_simulator.test.ts +++ b/yarn-project/simulator/src/avm/avm_simulator.test.ts @@ -8,7 +8,7 @@ import { SerializableContractInstance, } from '@aztec/circuits.js'; import { Grumpkin } from '@aztec/circuits.js/barretenberg'; -import { computePublicDataTreeLeafSlot, computeVarArgsHash } from '@aztec/circuits.js/hash'; +import { computePublicDataTreeLeafSlot, computeVarArgsHash, siloNullifier } from '@aztec/circuits.js/hash'; import { makeContractClassPublic, makeContractInstanceFromClassId } from '@aztec/circuits.js/testing'; import { FunctionSelector } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; @@ -54,11 +54,7 @@ import { EmitUnencryptedLog, type Instruction, Jump, - L1ToL2MessageExists, - NoteHashExists, - NullifierExists, Return, - SLoad, SStore, SendL2ToL1Message, Set, @@ -534,6 +530,7 @@ describe('AVM simulator: transpiled Noir contracts', () => { const listSlot1 = new Fr(listSlotNumber1); const value0 = new Fr(420); const value1 = new Fr(69); + const siloedNullifier0 = siloNullifier(address, value0); let worldStateDB: WorldStateDB; let trace: PublicSideEffectTraceInterface; @@ -605,7 +602,7 @@ describe('AVM simulator: transpiled Noir contracts', () => { const isPending = false; // leafIndex is returned from DB call for nullifiers, so it is absent on DB miss const _tracedLeafIndex = exists && !isPending ? leafIndex : Fr.ZERO; - expect(trace.traceNullifierCheck).toHaveBeenCalledWith(address, /*nullifier=*/ value0, exists); + expect(trace.traceNullifierCheck).toHaveBeenCalledWith(siloedNullifier0, exists); }); }); @@ -671,7 +668,7 @@ describe('AVM simulator: transpiled Noir contracts', () => { expect(results.output).toEqual([]); expect(trace.traceNewNullifier).toHaveBeenCalledTimes(1); - expect(trace.traceNewNullifier).toHaveBeenCalledWith(expect.objectContaining(address), /*nullifier=*/ value0); + expect(trace.traceNewNullifier).toHaveBeenCalledWith(siloedNullifier0); }); describe('Cached nullifiers', () => { @@ -686,10 +683,10 @@ describe('AVM simulator: transpiled Noir contracts', () => { // New nullifier and nullifier existence check should be traced expect(trace.traceNewNullifier).toHaveBeenCalledTimes(1); - expect(trace.traceNewNullifier).toHaveBeenCalledWith(expect.objectContaining(address), /*nullifier=*/ value0); + expect(trace.traceNewNullifier).toHaveBeenCalledWith(siloedNullifier0); expect(trace.traceNullifierCheck).toHaveBeenCalledTimes(1); // leafIndex is returned from DB call for nullifiers, so it is absent on DB miss - expect(trace.traceNullifierCheck).toHaveBeenCalledWith(address, /*nullifier=*/ value0, /*exists=*/ true); + expect(trace.traceNullifierCheck).toHaveBeenCalledWith(siloedNullifier0, /*exists=*/ true); }); it(`Emits same nullifier twice (expect failure)`, async () => { const calldata = [value0]; @@ -703,7 +700,7 @@ describe('AVM simulator: transpiled Noir contracts', () => { // Nullifier should be traced exactly once expect(trace.traceNewNullifier).toHaveBeenCalledTimes(1); - expect(trace.traceNewNullifier).toHaveBeenCalledWith(expect.objectContaining(address), /*nullifier=*/ value0); + expect(trace.traceNewNullifier).toHaveBeenCalledWith(siloedNullifier0); }); }); @@ -1102,27 +1099,8 @@ describe('AVM simulator: transpiled Noir contracts', () => { it.each([ ['Public storage writes', () => new SStore(/*indirect=*/ 0, /*srcOffset=*/ 0, /*slotOffset=*/ 0)], - ['Public storage reads', () => new SLoad(/*indirect=*/ 0, /*slotOffset=*/ 0, /*dstOffset=*/ 0)], - [ - 'Note hash checks', - () => new NoteHashExists(/*indirect=*/ 0, /*noteHashOffset=*/ 0, /*leafIndexOffest=*/ 0, /*existsOffset=*/ 1), - ], ['New note hashes', () => new EmitNoteHash(/*indirect=*/ 0, /*noteHashOffset=*/ 0)], - [ - 'Nullifier checks', - () => new NullifierExists(/*indirect=*/ 0, /*nullifierOffset=*/ 0, /*addressOffest=*/ 0, /*existsOffset=*/ 1), - ], ['New nullifiers', () => new EmitNullifier(/*indirect=*/ 0, /*noteHashOffset=*/ 0)], - [ - 'L1 to L2 message checks', - () => - new L1ToL2MessageExists( - /*indirect=*/ 0, - /*msgHashOffset=*/ 0, - /*msgLeafIndexOffest=*/ 0, - /*existsOffset=*/ 1, - ), - ], ['New unencrypted logs', () => new EmitUnencryptedLog(/*indirect=*/ 0, /*logOffset=*/ 0, /*logSizeOffest=*/ 1)], [ 'New L1 to L2 messages', diff --git a/yarn-project/simulator/src/avm/avm_tree.test.ts b/yarn-project/simulator/src/avm/avm_tree.test.ts index 375ecd46c4c..aa0ab6bc07b 100644 --- a/yarn-project/simulator/src/avm/avm_tree.test.ts +++ b/yarn-project/simulator/src/avm/avm_tree.test.ts @@ -567,6 +567,7 @@ describe('Batch Insertion', () => { }); }); +// This benchmark also performs a convenient sanity check /* eslint no-console: ["error", { allow: ["time", "timeEnd"] }] */ describe('A basic benchmark', () => { it('Should benchmark writes', async () => { @@ -576,14 +577,30 @@ describe('A basic benchmark', () => { const slots = leaves.map((_, i) => new Fr(i + 128)); const container = await AvmEphemeralForest.create(copyState); + await publicDataInsertWorldState(new Fr(0), new Fr(128)); // Updating the first slot, triggers the index 0 to be added to the minimum stored key in the container await container.writePublicStorage(new Fr(0), new Fr(128)); + + // Check Roots before benchmarking + let wsRoot = await getWorldStateRoot(MerkleTreeId.PUBLIC_DATA_TREE); + let computedRoot = container.treeMap.get(MerkleTreeId.PUBLIC_DATA_TREE)!.getRoot(); + expect(computedRoot.toBuffer()).toEqual(wsRoot); + console.time('benchmark'); // These writes are all new leaves and should be impacted by the key sorted algorithm of the tree. for (let i = 0; i < leaves.length; i++) { await container.writePublicStorage(slots[i], leaves[i]); } console.timeEnd('benchmark'); + + // Update worldstate for sanity check + for (let i = 0; i < leaves.length; i++) { + await publicDataInsertWorldState(slots[i], leaves[i]); + } + // Check roots + wsRoot = await getWorldStateRoot(MerkleTreeId.PUBLIC_DATA_TREE); + computedRoot = container.treeMap.get(MerkleTreeId.PUBLIC_DATA_TREE)!.getRoot(); + expect(computedRoot.toBuffer()).toEqual(wsRoot); }); }); diff --git a/yarn-project/simulator/src/avm/avm_tree.ts b/yarn-project/simulator/src/avm/avm_tree.ts index d1c18a913a4..86c4b160d7d 100644 --- a/yarn-project/simulator/src/avm/avm_tree.ts +++ b/yarn-project/simulator/src/avm/avm_tree.ts @@ -4,6 +4,7 @@ import { poseidon2Hash } from '@aztec/foundation/crypto'; import { Fr } from '@aztec/foundation/fields'; import { type IndexedTreeLeafPreimage, type TreeLeafPreimage } from '@aztec/foundation/trees'; +import { strict as assert } from 'assert'; import cloneDeep from 'lodash.clonedeep'; /****************************************************/ @@ -147,11 +148,18 @@ export class AvmEphemeralForest { // This only works for the public data tree const treeId = MerkleTreeId.PUBLIC_DATA_TREE; const tree = this.treeMap.get(treeId)!; - const { preimage, index, update }: PreimageWitness = await this.getLeafOrLowLeafInfo( - treeId, - slot, - ); + const [leafOrLowLeafInfo, pathAbsentInEphemeralTree] = await this._getLeafOrLowLeafInfo< + typeof treeId, + PublicDataTreeLeafPreimage + >(treeId, slot); + const { preimage, index, update } = leafOrLowLeafInfo; const siblingPath = await this.getSiblingPath(treeId, index); + + if (pathAbsentInEphemeralTree) { + // Since we have never seen this before - we should insert it into our tree as it is about to be updated. + this.treeMap.get(treeId)!.insertSiblingPath(index, siblingPath); + } + if (update) { const updatedPreimage = cloneDeep(preimage); const existingPublicDataSiblingPath = siblingPath; @@ -222,7 +230,7 @@ export class AvmEphemeralForest { if (foundIndex === -1) { // New element, we splice it into the correct location const spliceIndex = - this.searchForKey( + indexOrNextLowestInArray( keys[i], existingKeyVector.map(x => x[0]), ) + 1; @@ -242,15 +250,20 @@ export class AvmEphemeralForest { async appendNullifier(nullifier: Fr): Promise> { const treeId = MerkleTreeId.NULLIFIER_TREE; const tree = this.treeMap.get(treeId)!; - const { preimage, index, update }: PreimageWitness = await this.getLeafOrLowLeafInfo( - treeId, - nullifier, - ); + const [leafOrLowLeafInfo, pathAbsentInEphemeralTree] = await this._getLeafOrLowLeafInfo< + typeof treeId, + NullifierLeafPreimage + >(treeId, nullifier); + const { preimage, index, update } = leafOrLowLeafInfo; const siblingPath = await this.getSiblingPath(treeId, index); - if (update) { - throw new Error('Not allowed to update a nullifier'); + if (pathAbsentInEphemeralTree) { + // Since we have never seen this before - we should insert it into our tree as it is about to be updated. + this.treeMap.get(treeId)!.insertSiblingPath(index, siblingPath); } + + assert(!update, 'Nullifier already exists in the tree. Cannot update a nullifier!'); + // We are writing a new entry const insertionIndex = tree.leafCount; const updatedLowNullifier = cloneDeep(preimage); @@ -311,17 +324,17 @@ export class AvmEphemeralForest { } /** - * This is wrapper around treeId to get values in the indexedUpdates map + * This is wrapper around treeId to get values in the indexedUpdates map. + * Should only be called if we know the value exists. */ - private getIndexedUpdates(treeId: ID, index: bigint): T { + private getIndexedUpdate(treeId: ID, index: bigint): T { const updates = this.indexedUpdates.get(treeId); - if (updates === undefined) { - throw new Error('No updates found'); - } + assert(updates !== undefined, `No updates exist in the ephemeral ${MerkleTreeId[treeId]} tree.`); const preimage = updates.get(index); - if (preimage === undefined) { - throw new Error('No updates found'); - } + assert( + updates !== undefined, + `No update exists in the ephemeral ${MerkleTreeId[treeId]} tree for leafIndex ${index}.`, + ); return preimage as T; } @@ -336,94 +349,165 @@ export class AvmEphemeralForest { return updates.has(index); } - private searchForKey(key: Fr, arr: Fr[]): number { - // We are looking for the index of the largest element in the array that is less than the key - let start = 0; - let end = arr.length; - // Note that the easiest way is to increment the search key by 1 and then do a binary search - const searchKey = key.add(Fr.ONE); - while (start < end) { - const mid = Math.floor((start + end) / 2); - if (arr[mid].cmp(searchKey) < 0) { - // The key + 1 is greater than the arr element, so we can continue searching the top half - start = mid + 1; - } else { - // The key + 1 is LT or EQ the arr element, so we can continue searching the bottom half - end = mid; - } - } - // We either found key + 1 or start is now at the index of the largest element that we would have inserted key + 1 - // Therefore start - 1 is the index of the element just below - note it can be -1 if the first element in the array is - // greater than the key - return start - 1; - } - /** - * This gets the low leaf preimage and the index of the low leaf in the indexed tree given a value (slot or nullifier value) - * If the value is not found in the tree, it does an external lookup to the merkleDB + * Get the leaf or low leaf preimage and its index in the indexed tree given a key (slot or nullifier value). + * If the key is not found in the tree, it does an external lookup to the underlying merkle DB. * @param treeId - The tree we are looking up in - * @param key - The key for which we are look up the low leaf for. + * @param key - The key for which we are look up the leaf or low leaf for. * @param T - The type of the preimage (PublicData or Nullifier) - * @returns The low leaf preimage and the index of the low leaf in the indexed tree + * @returns The leaf or low leaf info (preimage & leaf index). */ async getLeafOrLowLeafInfo( treeId: ID, key: Fr, ): Promise> { + const [leafOrLowLeafInfo, _] = await this._getLeafOrLowLeafInfo(treeId, key); + return leafOrLowLeafInfo; + } + + /** + * Internal helper to get the leaf or low leaf preimage and its index in the indexed tree given a key (slot or nullifier value). + * If the key is not found in the tree, it does an external lookup to the underlying merkle DB. + * Indicates whethe the sibling path is absent in the ephemeral tree. + * @param treeId - The tree we are looking up in + * @param key - The key for which we are look up the leaf or low leaf for. + * @param T - The type of the preimage (PublicData or Nullifier) + * @returns [ + * preimageWitness - The leaf or low leaf info (preimage & leaf index), + * pathAbsentInEphemeralTree - whether its sibling path is absent in the ephemeral tree (useful during insertions) + * ] + */ + async _getLeafOrLowLeafInfo( + treeId: ID, + key: Fr, + ): Promise<[PreimageWitness, /*pathAbsentInEphemeralTree=*/ boolean]> { + const bigIntKey = key.toBigInt(); + // In this function, "min" refers to the leaf with the + // largest key <= the specified key in the indexedUpdates. + // In other words, the leaf with the "next lowest" key in indexedUpdates. + + // First, search the indexed updates (no DB fallback) to find + // the leafIndex of the leaf with the largest key <= the specified key. + const minIndexedLeafIndex = this._getLeafIndexOrNextLowestInIndexedUpdates(treeId, key); + if (minIndexedLeafIndex === -1n) { + // No leaf is present in the indexed updates that is <= the key, + // so retrieve the leaf or low leaf from the underlying DB. + const leafOrLowLeafPreimage: PreimageWitness = await this._getLeafOrLowLeafWitnessInExternalDb( + treeId, + bigIntKey, + ); + return [leafOrLowLeafPreimage, /*pathAbsentInEphemeralTree=*/ true]; + } else { + // A leaf was found in the indexed updates that is <= the key + const minPreimage: T = this.getIndexedUpdate(treeId, minIndexedLeafIndex); + if (minPreimage.getKey() === bigIntKey) { + // the index found is an exact match, no need to search further + const leafInfo = { preimage: minPreimage, index: minIndexedLeafIndex, update: true }; + return [leafInfo, /*pathAbsentInEphemeralTree=*/ false]; + } else { + // We are starting with the leaf with largest key <= the specified key + // Starting at that "min leaf", search for specified key in both the indexed updates + // and the underlying DB. If not found, return its low leaf. + const [leafOrLowLeafInfo, pathAbsentInEphemeralTree] = await this._searchForLeafOrLowLeaf( + treeId, + bigIntKey, + minPreimage, + minIndexedLeafIndex, + ); + // We did not find it - this is unexpected... the leaf OR low leaf should always be present + assert(leafOrLowLeafInfo !== undefined, 'Could not find leaf or low leaf. This should not happen!'); + return [leafOrLowLeafInfo, pathAbsentInEphemeralTree]; + } + } + } + + /** + * Helper to search for the leaf with the specified key in the indexedUpdates + * and return its leafIndex. + * If not present, return the leafIndex of the largest leaf <= the specified key + * (the leafIndex of the next lowest key). + * + * If no entry exists in indexedUpdates <= the specified key, return -1. + * @returns - The leafIndex of the leaf with the largest key <= the specified key. + */ + private _getLeafIndexOrNextLowestInIndexedUpdates(treeId: ID, key: Fr): bigint { const keyOrderedVector = this.indexedSortedKeys.get(treeId)!; - const vectorIndex = this.searchForKey( + const indexInVector = indexOrNextLowestInArray( key, keyOrderedVector.map(x => x[0]), ); - // We have a match in our local updates - let minPreimage = undefined; - - if (vectorIndex !== -1) { - const [_, leafIndex] = keyOrderedVector[vectorIndex]; - minPreimage = { - preimage: this.getIndexedUpdates(treeId, leafIndex) as T, - index: leafIndex, - }; - } - // This can probably be done better, we want to say if the minInfo is undefined (because this is our first operation) we do the external lookup - const start = minPreimage?.preimage; - const bigIntKey = key.toBigInt(); - // If we don't have a first element or if that first element is already greater than the target key, we need to do an external lookup - // The low public data witness is in the previous tree - if (start === undefined || start.getKey() > key.toBigInt()) { - // This function returns the leaf index to the actual element if it exists or the leaf index to the low leaf otherwise - const { index, alreadyPresent } = (await this.treeDb.getPreviousValueIndex(treeId, bigIntKey))!; - const preimage = await this.treeDb.getLeafPreimage(treeId, index); - - // Since we have never seen this before - we should insert it into our tree, as we know we will modify this leaf node - const siblingPath = await this.getSiblingPath(treeId, index); - // const siblingPath = (await this.treeDb.getSiblingPath(treeId, index)).toFields(); + if (indexInVector !== -1) { + const [_, leafIndex] = keyOrderedVector[indexInVector]; + return leafIndex; + } else { + // no leaf <= the specified key was found + return -1n; + } + } - // Is it enough to just insert the sibling path without inserting the leaf? - now probably since we will update this low nullifier index in append - this.treeMap.get(treeId)!.insertSiblingPath(index, siblingPath); - this.treeMap.get(treeId)!.updateLeaf(this.hashPreimage(preimage as T), index); + /** + * Query the external DB to get leaf if present, low leaf if absent + */ + private async _getLeafOrLowLeafWitnessInExternalDb( + treeId: ID, + key: bigint, + ): Promise> { + // "key" is siloed slot (leafSlot) or siloed nullifier + const previousValueIndex = await this.treeDb.getPreviousValueIndex(treeId, key); + assert( + previousValueIndex !== undefined, + `${MerkleTreeId[treeId]} low leaf index should always be found (even if target leaf does not exist)`, + ); + const { index: leafIndex, alreadyPresent } = previousValueIndex; - const lowPublicDataPreimage = preimage as T; + const leafPreimage = await this.treeDb.getLeafPreimage(treeId, leafIndex); + assert( + leafPreimage !== undefined, + `${MerkleTreeId[treeId]} low leaf preimage should never be undefined (even if target leaf does not exist)`, + ); - return { preimage: lowPublicDataPreimage, index: index, update: alreadyPresent }; - } + return { preimage: leafPreimage as T, index: leafIndex, update: alreadyPresent }; + } - // We look for the low element by bouncing between our local indexedUpdates map or the external DB - // The conditions we are looking for are: - // (1) Exact Match: curr.nextKey == key (this is only valid for public data tree) - // (2) Sandwich Match: curr.nextKey > key and curr.key < key - // (3) Max Condition: curr.next_index == 0 and curr.key < key - // Note the min condition does not need to be handled since indexed trees are prefilled with at least the 0 element + /** + * Search for the leaf for the specified key. + * Some leaf with key <= the specified key is expected to be present in the ephemeral tree's "indexed updates". + * While searching, this function bounces between our local indexedUpdates and the external DB. + * + * @param key - The key for which we are look up the leaf or low leaf for. + * @param minPreimage - The leaf with the largest key <= the specified key. Expected to be present in local indexedUpdates. + * @param minIndex - The index of the leaf with the largest key <= the specified key. + * @param T - The type of the preimage (PublicData or Nullifier) + * @returns [ + * preimageWitness | undefined - The leaf or low leaf info (preimage & leaf index), + * pathAbsentInEphemeralTree - whether its sibling path is absent in the ephemeral tree (useful during insertions) + * ] + * + * @details We look for the low element by bouncing between our local indexedUpdates map or the external DB + * The conditions we are looking for are: + * (1) Exact Match: curr.nextKey == key (this is only valid for public data tree) + * (2) Sandwich Match: curr.nextKey > key and curr.key < key + * (3) Max Condition: curr.next_index == 0 and curr.key < key + * Note the min condition does not need to be handled since indexed trees are prefilled with at least the 0 element + */ + private async _searchForLeafOrLowLeaf( + treeId: ID, + key: bigint, + minPreimage: T, + minIndex: bigint, + ): Promise<[PreimageWitness | undefined, /*pathAbsentInEphemeralTree=*/ boolean]> { let found = false; - let curr = minPreimage!.preimage as T; + let curr = minPreimage as T; let result: PreimageWitness | undefined = undefined; // Temp to avoid infinite loops - the limit is the number of leaves we may have to read const LIMIT = 2n ** BigInt(getTreeHeight(treeId)) - 1n; let counter = 0n; - let lowPublicDataIndex = minPreimage!.index; + let lowPublicDataIndex = minIndex; + let pathAbsentInEphemeralTree = false; while (!found && counter < LIMIT) { + const bigIntKey = key; if (curr.getKey() === bigIntKey) { // We found an exact match - therefore this is an update found = true; @@ -437,30 +521,23 @@ export class AvmEphemeralForest { else { lowPublicDataIndex = curr.getNextIndex(); if (this.hasLocalUpdates(treeId, lowPublicDataIndex)) { - curr = this.getIndexedUpdates(treeId, lowPublicDataIndex)!; + curr = this.getIndexedUpdate(treeId, lowPublicDataIndex)!; + pathAbsentInEphemeralTree = false; } else { const preimage: IndexedTreeLeafPreimage = (await this.treeDb.getLeafPreimage(treeId, lowPublicDataIndex))!; curr = preimage as T; + pathAbsentInEphemeralTree = true; } } counter++; } - // We did not find it - this is unexpected - if (result === undefined) { - throw new Error('No previous value found or ran out of iterations'); - } - return result; + return [result, pathAbsentInEphemeralTree]; } /** * This hashes the preimage to a field element */ hashPreimage(preimage: T): Fr { - // Watch for this edge-case, we are hashing the key=0 leaf to 0. - // This is for backward compatibility with the world state implementation - if (preimage.getKey() === 0n) { - return Fr.zero(); - } const input = preimage.toHashInputs().map(x => Fr.fromBuffer(x)); return poseidon2Hash(input); } @@ -808,3 +885,28 @@ export class EphemeralAvmTree { } } } + +/** + * Return the index of the key in the array, or index-1 if they key is not found. + */ +function indexOrNextLowestInArray(key: Fr, arr: Fr[]): number { + // We are looking for the index of the largest element in the array that is less than the key + let start = 0; + let end = arr.length; + // Note that the easiest way is to increment the search key by 1 and then do a binary search + const keyPlus1 = key.add(Fr.ONE); + while (start < end) { + const mid = Math.floor((start + end) / 2); + if (arr[mid].cmp(keyPlus1) < 0) { + // The key + 1 is greater than the midpoint, so we can continue searching the top half + start = mid + 1; + } else { + // The key + 1 is LT or EQ the arr element, so we can continue searching the bottom half + end = mid; + } + } + // We either found key + 1 or start is now at the index of the largest element that we would have inserted key + 1 + // Therefore start - 1 is the index of the element just below - note it can be -1 if the first element in the array is + // greater than the key + return start - 1; +} diff --git a/yarn-project/simulator/src/avm/journal/journal.test.ts b/yarn-project/simulator/src/avm/journal/journal.test.ts index 976f5f748ea..2665b1aec57 100644 --- a/yarn-project/simulator/src/avm/journal/journal.test.ts +++ b/yarn-project/simulator/src/avm/journal/journal.test.ts @@ -1,4 +1,5 @@ import { AztecAddress, SerializableContractInstance } from '@aztec/circuits.js'; +import { siloNullifier } from '@aztec/circuits.js/hash'; import { Fr } from '@aztec/foundation/fields'; import { mock } from 'jest-mock-extended'; @@ -84,22 +85,25 @@ describe('journal', () => { it('checkNullifierExists works for missing nullifiers', async () => { const exists = await persistableState.checkNullifierExists(address, utxo); expect(exists).toEqual(false); + const siloedNullifier = siloNullifier(address, utxo); expect(trace.traceNullifierCheck).toHaveBeenCalledTimes(1); - expect(trace.traceNullifierCheck).toHaveBeenCalledWith(address, utxo, exists); + expect(trace.traceNullifierCheck).toHaveBeenCalledWith(siloedNullifier, exists); }); it('checkNullifierExists works for existing nullifiers', async () => { mockNullifierExists(worldStateDB, leafIndex, utxo); const exists = await persistableState.checkNullifierExists(address, utxo); expect(exists).toEqual(true); + const siloedNullifier = siloNullifier(address, utxo); expect(trace.traceNullifierCheck).toHaveBeenCalledTimes(1); - expect(trace.traceNullifierCheck).toHaveBeenCalledWith(address, utxo, exists); + expect(trace.traceNullifierCheck).toHaveBeenCalledWith(siloedNullifier, exists); }); it('writeNullifier works', async () => { await persistableState.writeNullifier(address, utxo); - expect(trace.traceNewNullifier).toHaveBeenCalledWith(expect.objectContaining(address), /*nullifier=*/ utxo); - expect(trace.traceNewNullifier).toHaveBeenCalledWith(address, /*nullifier=*/ utxo); + const siloedNullifier = siloNullifier(address, utxo); + expect(trace.traceNewNullifier).toHaveBeenCalledTimes(1); + expect(trace.traceNewNullifier).toHaveBeenCalledWith(siloedNullifier); }); it('checkL1ToL2MessageExists works for missing message', async () => { diff --git a/yarn-project/simulator/src/avm/journal/journal.ts b/yarn-project/simulator/src/avm/journal/journal.ts index 90bb0e75b89..9a3ffa5273a 100644 --- a/yarn-project/simulator/src/avm/journal/journal.ts +++ b/yarn-project/simulator/src/avm/journal/journal.ts @@ -12,7 +12,7 @@ import { Fr } from '@aztec/foundation/fields'; import { jsonStringify } from '@aztec/foundation/json-rpc'; import { createDebugLogger } from '@aztec/foundation/log'; -import assert from 'assert'; +import { strict as assert } from 'assert'; import { getPublicFunctionDebugName } from '../../common/debug_fn_name.js'; import { type WorldStateDB } from '../../public/public_db_sources.js'; @@ -50,7 +50,7 @@ export class AvmPersistableStateManager { private readonly nullifiers: NullifierManager = new NullifierManager(worldStateDB), private readonly doMerkleOperations: boolean = false, /** Ephmeral forest for merkle tree operations */ - public readonly merkleTrees: AvmEphemeralForest, + public merkleTrees: AvmEphemeralForest, ) {} /** @@ -77,14 +77,18 @@ export class AvmPersistableStateManager { /** * Create a new state manager */ - public static async create(worldStateDB: WorldStateDB, trace: PublicSideEffectTraceInterface) { + public static async create( + worldStateDB: WorldStateDB, + trace: PublicSideEffectTraceInterface, + doMerkleOperations: boolean = false, + ) { const ephemeralForest = await AvmEphemeralForest.create(worldStateDB.getMerkleInterface()); return new AvmPersistableStateManager( worldStateDB, trace, /*publicStorage=*/ new PublicStorage(worldStateDB), /*nullifiers=*/ new NullifierManager(worldStateDB), - /*doMerkleOperations=*/ true, + /*doMerkleOperations=*/ doMerkleOperations, ephemeralForest, ); } @@ -117,14 +121,6 @@ export class AvmPersistableStateManager { this._merge(forkedState, /*reverted=*/ true); } - /** - * Commit cached storage writes to the DB. - * Keeps public storage up to date from tx to tx within a block. - */ - public async commitStorageWritesToDB() { - await this.publicStorage.commitToDB(); - } - private _merge(forkedState: AvmPersistableStateManager, reverted: boolean) { // sanity check to avoid merging the same forked trace twice assert( @@ -135,6 +131,14 @@ export class AvmPersistableStateManager { this.publicStorage.acceptAndMerge(forkedState.publicStorage); this.nullifiers.acceptAndMerge(forkedState.nullifiers); this.trace.merge(forkedState.trace, reverted); + if (!reverted) { + this.merkleTrees = forkedState.merkleTrees; + if (this.doMerkleOperations) { + this.log.debug( + `Rolled back nullifier tree to root ${this.merkleTrees.treeMap.get(MerkleTreeId.NULLIFIER_TREE)!.getRoot()}`, + ); + } + } } /** @@ -293,9 +297,10 @@ export class AvmPersistableStateManager { * @returns exists - whether the nullifier exists in the nullifier set */ public async checkNullifierExists(contractAddress: AztecAddress, nullifier: Fr): Promise { - const [exists, isPending, _] = await this.nullifiers.checkExists(contractAddress, nullifier); - + this.log.debug(`Checking existence of nullifier (address=${contractAddress}, nullifier=${nullifier})`); const siloedNullifier = siloNullifier(contractAddress, nullifier); + const [exists, isPending, _] = await this.nullifiers.checkExists(siloedNullifier); + this.log.debug(`Checked siloed nullifier ${siloedNullifier} (exists=${exists}, pending=${isPending})`); if (this.doMerkleOperations) { // Get leaf if present, low leaf if absent @@ -310,11 +315,9 @@ export class AvmPersistableStateManager { assert(update == exists, 'WorldStateDB contains nullifier leaf, but merkle tree does not.... This is a bug!'); - this.log.debug( - `nullifiers(${contractAddress})@${nullifier} ?? leafIndex: ${leafIndex}, exists: ${exists}, pending: ${isPending}.`, - ); - - if (!exists) { + if (exists) { + this.log.debug(`Siloed nullifier ${siloedNullifier} exists at leafIndex=${leafIndex}`); + } else { // Sanity check that the leaf value is skipped by low leaf when it doesn't exist assert( siloedNullifier.toBigInt() > leafPreimage.nullifier.toBigInt() && @@ -323,20 +326,9 @@ export class AvmPersistableStateManager { ); } - this.trace.traceNullifierCheck( - contractAddress, - nullifier, // FIXME: Should this be siloed? - exists, - leafPreimage, - new Fr(leafIndex), - leafPath, - ); + this.trace.traceNullifierCheck(siloedNullifier, exists, leafPreimage, new Fr(leafIndex), leafPath); } else { - this.trace.traceNullifierCheck( - contractAddress, - nullifier, // FIXME: Should this be siloed? - exists, - ); + this.trace.traceNullifierCheck(siloedNullifier, exists); } return Promise.resolve(exists); } @@ -347,9 +339,17 @@ export class AvmPersistableStateManager { * @param nullifier - the unsiloed nullifier to write */ public async writeNullifier(contractAddress: AztecAddress, nullifier: Fr) { - this.log.debug(`nullifiers(${contractAddress}) += ${nullifier}.`); - + this.log.debug(`Inserting new nullifier (address=${nullifier}, nullifier=${contractAddress})`); const siloedNullifier = siloNullifier(contractAddress, nullifier); + await this.writeSiloedNullifier(siloedNullifier); + } + + /** + * Write a nullifier to the nullifier set, trace the write. + * @param siloedNullifier - the siloed nullifier to write + */ + public async writeSiloedNullifier(siloedNullifier: Fr) { + this.log.debug(`Inserting siloed nullifier=${siloedNullifier}`); if (this.doMerkleOperations) { // Maybe overkill, but we should check if the nullifier is already present in the tree before attempting to insert @@ -360,34 +360,35 @@ export class AvmPersistableStateManager { siloedNullifier, ); if (update) { - this.log.verbose(`Nullifier already present in tree: ${nullifier} at index ${index}.`); + this.log.verbose(`Siloed nullifier ${siloedNullifier} already present in tree at index ${index}!`); // If the nullifier is already present, we should not insert it again // instead we provide the direct membership path const path = await this.merkleTrees.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, index); // This just becomes a nullifier read hint this.trace.traceNullifierCheck( - contractAddress, - nullifier, + siloedNullifier, /*exists=*/ update, preimage as NullifierLeafPreimage, new Fr(index), path, ); throw new NullifierCollisionError( - `Nullifier ${nullifier} at contract ${contractAddress} already exists in parent cache or host.`, + `Siloed nullifier ${siloedNullifier} already exists in parent cache or host.`, ); } else { // Cache pending nullifiers for later access - await this.nullifiers.append(contractAddress, nullifier); + await this.nullifiers.append(siloedNullifier); // We append the new nullifier const appendResult = await this.merkleTrees.appendNullifier(siloedNullifier); + this.log.debug( + `Nullifier tree root after insertion ${this.merkleTrees.treeMap.get(MerkleTreeId.NULLIFIER_TREE)!.getRoot()}`, + ); const lowLeafPreimage = appendResult.lowWitness.preimage as NullifierLeafPreimage; const lowLeafIndex = appendResult.lowWitness.index; const lowLeafPath = appendResult.lowWitness.siblingPath; const insertionPath = appendResult.insertionPath; this.trace.traceNewNullifier( - contractAddress, - nullifier, + siloedNullifier, lowLeafPreimage, new Fr(lowLeafIndex), lowLeafPath, @@ -396,8 +397,14 @@ export class AvmPersistableStateManager { } } else { // Cache pending nullifiers for later access - await this.nullifiers.append(contractAddress, nullifier); - this.trace.traceNewNullifier(contractAddress, nullifier); + await this.nullifiers.append(siloedNullifier); + this.trace.traceNewNullifier(siloedNullifier); + } + } + + public async writeSiloedNullifiersFromPrivate(siloedNullifiers: Fr[]) { + for (const siloedNullifier of siloedNullifiers.filter(n => !n.isEmpty())) { + await this.writeSiloedNullifier(siloedNullifier); } } diff --git a/yarn-project/simulator/src/avm/journal/nullifiers.test.ts b/yarn-project/simulator/src/avm/journal/nullifiers.test.ts index 39b4c875f0e..02fc2a04495 100644 --- a/yarn-project/simulator/src/avm/journal/nullifiers.test.ts +++ b/yarn-project/simulator/src/avm/journal/nullifiers.test.ts @@ -1,4 +1,3 @@ -import { AztecAddress } from '@aztec/circuits.js'; import { Fr } from '@aztec/foundation/fields'; import { type MockProxy, mock } from 'jest-mock-extended'; @@ -17,64 +16,59 @@ describe('avm nullifier caching', () => { describe('Nullifier caching and existence checks', () => { it('Reading a non-existent nullifier works (gets zero & DNE)', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); // never written! - const [exists, isPending, gotIndex] = await nullifiers.checkExists(contractAddress, nullifier); + const [exists, isPending, gotIndex] = await nullifiers.checkExists(nullifier); // doesn't exist, not pending, index is zero (non-existent) expect(exists).toEqual(false); expect(isPending).toEqual(false); expect(gotIndex).toEqual(Fr.ZERO); }); it('Should cache nullifier, existence check works after creation', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); // Write to cache - await nullifiers.append(contractAddress, nullifier); - const [exists, isPending, gotIndex] = await nullifiers.checkExists(contractAddress, nullifier); + await nullifiers.append(nullifier); + const [exists, isPending, gotIndex] = await nullifiers.checkExists(nullifier); // exists (in cache), isPending, index is zero (not in tree) expect(exists).toEqual(true); expect(isPending).toEqual(true); expect(gotIndex).toEqual(Fr.ZERO); }); it('Existence check works on fallback to host (gets index, exists, not-pending)', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); const storedLeafIndex = BigInt(420); commitmentsDb.getNullifierIndex.mockResolvedValue(storedLeafIndex); - const [exists, isPending, gotIndex] = await nullifiers.checkExists(contractAddress, nullifier); + const [exists, isPending, gotIndex] = await nullifiers.checkExists(nullifier); // exists (in host), not pending, tree index retrieved from host expect(exists).toEqual(true); expect(isPending).toEqual(false); expect(gotIndex).toEqual(gotIndex); }); it('Existence check works on fallback to parent (gets value, exists, is pending)', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); const childNullifiers = nullifiers.fork(); // Write to parent cache - await nullifiers.append(contractAddress, nullifier); + await nullifiers.append(nullifier); // Get from child cache - const [exists, isPending, gotIndex] = await childNullifiers.checkExists(contractAddress, nullifier); + const [exists, isPending, gotIndex] = await childNullifiers.checkExists(nullifier); // exists (in parent), isPending, index is zero (not in tree) expect(exists).toEqual(true); expect(isPending).toEqual(true); expect(gotIndex).toEqual(Fr.ZERO); }); it('Existence check works on fallback to grandparent (gets value, exists, is pending)', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); const childNullifiers = nullifiers.fork(); const grandChildNullifiers = childNullifiers.fork(); // Write to parent cache - await nullifiers.append(contractAddress, nullifier); + await nullifiers.append(nullifier); // Get from child cache - const [exists, isPending, gotIndex] = await grandChildNullifiers.checkExists(contractAddress, nullifier); + const [exists, isPending, gotIndex] = await grandChildNullifiers.checkExists(nullifier); // exists (in parent), isPending, index is zero (not in tree) expect(exists).toEqual(true); expect(isPending).toEqual(true); @@ -84,79 +78,74 @@ describe('avm nullifier caching', () => { describe('Nullifier collision failures', () => { it('Cant append nullifier that already exists in cache', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); // same nullifier for both! // Append a nullifier to cache - await nullifiers.append(contractAddress, nullifier); + await nullifiers.append(nullifier); // Can't append again - await expect(nullifiers.append(contractAddress, nullifier)).rejects.toThrow( - `Nullifier ${nullifier} at contract ${contractAddress} already exists in parent cache or host.`, + await expect(nullifiers.append(nullifier)).rejects.toThrow( + `Siloed nullifier ${nullifier} already exists in parent cache or host.`, ); }); it('Cant append nullifier that already exists in parent cache', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); // same nullifier for both! // Append a nullifier to parent - await nullifiers.append(contractAddress, nullifier); + await nullifiers.append(nullifier); const childNullifiers = nullifiers.fork(); // Can't append again in child - await expect(childNullifiers.append(contractAddress, nullifier)).rejects.toThrow( - `Nullifier ${nullifier} at contract ${contractAddress} already exists in parent cache or host.`, + await expect(childNullifiers.append(nullifier)).rejects.toThrow( + `Siloed nullifier ${nullifier} already exists in parent cache or host.`, ); }); it('Cant append nullifier that already exist in host', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); // same nullifier for both! const storedLeafIndex = BigInt(420); // Nullifier exists in host commitmentsDb.getNullifierIndex.mockResolvedValue(storedLeafIndex); // Can't append to cache - await expect(nullifiers.append(contractAddress, nullifier)).rejects.toThrow( - `Nullifier ${nullifier} at contract ${contractAddress} already exists in parent cache or host.`, + await expect(nullifiers.append(nullifier)).rejects.toThrow( + `Siloed nullifier ${nullifier} already exists in parent cache or host.`, ); }); }); describe('Nullifier cache merging', () => { it('Should be able to merge two nullifier caches together', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier0 = new Fr(2); const nullifier1 = new Fr(3); // Append a nullifier to parent - await nullifiers.append(contractAddress, nullifier0); + await nullifiers.append(nullifier0); const childNullifiers = nullifiers.fork(); // Append a nullifier to child - await childNullifiers.append(contractAddress, nullifier1); + await childNullifiers.append(nullifier1); // Parent accepts child's nullifiers nullifiers.acceptAndMerge(childNullifiers); // After merge, parent has both nullifiers - const results0 = await nullifiers.checkExists(contractAddress, nullifier0); + const results0 = await nullifiers.checkExists(nullifier0); expect(results0).toEqual([/*exists=*/ true, /*isPending=*/ true, /*leafIndex=*/ Fr.ZERO]); - const results1 = await nullifiers.checkExists(contractAddress, nullifier1); + const results1 = await nullifiers.checkExists(nullifier1); expect(results1).toEqual([/*exists=*/ true, /*isPending=*/ true, /*leafIndex=*/ Fr.ZERO]); }); it('Cant merge two nullifier caches with colliding entries', async () => { - const contractAddress = AztecAddress.fromNumber(1); const nullifier = new Fr(2); // Append a nullifier to parent - await nullifiers.append(contractAddress, nullifier); + await nullifiers.append(nullifier); // Create child cache, don't derive from parent so we can concoct a collision on merge const childNullifiers = new NullifierManager(commitmentsDb); // Append a nullifier to child - await childNullifiers.append(contractAddress, nullifier); + await childNullifiers.append(nullifier); // Parent accepts child's nullifiers expect(() => nullifiers.acceptAndMerge(childNullifiers)).toThrow( - `Failed to accept child call's nullifiers. Nullifier ${nullifier.toBigInt()} already exists at contract ${contractAddress.toBigInt()}.`, + `Failed to merge in fork's cached nullifiers. Siloed nullifier ${nullifier.toBigInt()} already exists in parent cache.`, ); }); }); diff --git a/yarn-project/simulator/src/avm/journal/nullifiers.ts b/yarn-project/simulator/src/avm/journal/nullifiers.ts index 33e615349b3..1af35cc9e4c 100644 --- a/yarn-project/simulator/src/avm/journal/nullifiers.ts +++ b/yarn-project/simulator/src/avm/journal/nullifiers.ts @@ -1,20 +1,18 @@ -import { type AztecAddress } from '@aztec/circuits.js'; -import { siloNullifier } from '@aztec/circuits.js/hash'; import { Fr } from '@aztec/foundation/fields'; import type { CommitmentsDB } from '../../index.js'; /** * A class to manage new nullifier staging and existence checks during a contract call's AVM simulation. - * Maintains a nullifier cache, and ensures that existence checks fall back to the correct source. + * Maintains a siloed nullifier cache, and ensures that existence checks fall back to the correct source. * When a contract call completes, its cached nullifier set can be merged into its parent's. */ export class NullifierManager { constructor( /** Reference to node storage. Checked on parent cache-miss. */ private readonly hostNullifiers: CommitmentsDB, - /** Cached nullifiers. */ - private readonly cache: NullifierCache = new NullifierCache(), + /** Cache of siloed nullifiers. */ + private cache: Set = new Set(), /** Parent nullifier manager to fall back on */ private readonly parent?: NullifierManager, ) {} @@ -22,32 +20,34 @@ export class NullifierManager { /** * Create a new nullifiers manager with some preloaded pending siloed nullifiers */ - public static newWithPendingSiloedNullifiers(hostNullifiers: CommitmentsDB, pendingSiloedNullifiers: Fr[]) { - const cache = new NullifierCache(pendingSiloedNullifiers); - return new NullifierManager(hostNullifiers, cache); + public static newWithPendingSiloedNullifiers(hostNullifiers: CommitmentsDB, pendingSiloedNullifiers?: Fr[]) { + const cachedSiloedNullifiers = new Set(); + if (pendingSiloedNullifiers !== undefined) { + pendingSiloedNullifiers.forEach(nullifier => cachedSiloedNullifiers.add(nullifier.toBigInt())); + } + return new NullifierManager(hostNullifiers, cachedSiloedNullifiers); } /** * Create a new nullifiers manager forked from this one */ public fork() { - return new NullifierManager(this.hostNullifiers, new NullifierCache(), this); + return new NullifierManager(this.hostNullifiers, new Set(), this); } /** * Get a nullifier's existence in this' cache or parent's (recursively). * DOES NOT CHECK HOST STORAGE! - * @param contractAddress - the address of the contract whose storage is being read from - * @param nullifier - the nullifier to check for + * @param siloedNullifier - the nullifier to check for * @returns exists: whether the nullifier exists in cache here or in parent's */ - private checkExistsHereOrParent(contractAddress: AztecAddress, nullifier: Fr): boolean { + private checkExistsHereOrParent(siloedNullifier: Fr): boolean { // First check this cache - let existsAsPending = this.cache.exists(contractAddress, nullifier); + let existsAsPending = this.cache.has(siloedNullifier.toBigInt()); // Then try parent's nullifier cache if (!existsAsPending && this.parent) { // Note: this will recurse to grandparent/etc until a cache-hit is encountered. - existsAsPending = this.parent.checkExistsHereOrParent(contractAddress, nullifier); + existsAsPending = this.parent.checkExistsHereOrParent(siloedNullifier); } return existsAsPending; } @@ -59,24 +59,22 @@ export class NullifierManager { * 3. Fall back to the host state. * 4. Not found! Nullifier does not exist. * - * @param contractAddress - the address of the contract whose storage is being read from - * @param nullifier - the nullifier to check for + * @param siloedNullifier - the nullifier to check for * @returns exists: whether the nullifier exists at all, * isPending: whether the nullifier was found in a cache, * leafIndex: the nullifier's leaf index if it exists and is not pending (comes from host state). */ public async checkExists( - contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, ): Promise<[/*exists=*/ boolean, /*isPending=*/ boolean, /*leafIndex=*/ Fr]> { // Check this cache and parent's (recursively) - const existsAsPending = this.checkExistsHereOrParent(contractAddress, nullifier); + const existsAsPending = this.checkExistsHereOrParent(siloedNullifier); // Finally try the host's Aztec state (a trip to the database) // If the value is found in the database, it will be associated with a leaf index! let leafIndex: bigint | undefined = undefined; if (!existsAsPending) { // silo the nullifier before checking for its existence in the host - leafIndex = await this.hostNullifiers.getNullifierIndex(siloNullifier(contractAddress, nullifier)); + leafIndex = await this.hostNullifiers.getNullifierIndex(siloedNullifier); } const exists = existsAsPending || leafIndex !== undefined; leafIndex = leafIndex === undefined ? BigInt(0) : leafIndex; @@ -86,17 +84,14 @@ export class NullifierManager { /** * Stage a new nullifier (append it to the cache). * - * @param contractAddress - the address of the contract that the nullifier is associated with - * @param nullifier - the nullifier to stage + * @param siloedNullifier - the nullifier to stage */ - public async append(contractAddress: AztecAddress, nullifier: Fr) { - const [exists, ,] = await this.checkExists(contractAddress, nullifier); + public async append(siloedNullifier: Fr) { + const [exists, ,] = await this.checkExists(siloedNullifier); if (exists) { - throw new NullifierCollisionError( - `Nullifier ${nullifier} at contract ${contractAddress} already exists in parent cache or host.`, - ); + throw new NullifierCollisionError(`Siloed nullifier ${siloedNullifier} already exists in parent cache or host.`); } - this.cache.append(contractAddress, nullifier); + this.cache.add(siloedNullifier.toBigInt()); } /** @@ -105,101 +100,14 @@ export class NullifierManager { * @param incomingNullifiers - the incoming cached nullifiers to merge into this instance's */ public acceptAndMerge(incomingNullifiers: NullifierManager) { - this.cache.acceptAndMerge(incomingNullifiers.cache); - } -} - -/** - * A class to cache nullifiers created during a contract call's AVM simulation. - * "append" updates a map, "exists" checks that map. - * An instance of this class can merge another instance's cached nullifiers into its own. - */ -export class NullifierCache { - /** - * Map for staging nullifiers. - * One inner-set per contract storage address, - * each entry being a nullifier. - */ - private cachePerContract: Map> = new Map(); - private siloedNullifiers: Set = new Set(); - - /** - * @parem siloedNullifierFrs: optional list of pending siloed nullifiers to initialize this cache with - */ - constructor(siloedNullifierFrs?: Fr[]) { - if (siloedNullifierFrs !== undefined) { - siloedNullifierFrs.forEach(nullifier => this.siloedNullifiers.add(nullifier.toBigInt())); - } - } - - /** - * Check whether a nullifier exists in the cache. - * - * @param contractAddress - the address of the contract that the nullifier is associated with - * @param nullifier - the nullifier to check existence of - * @returns whether the nullifier is found in the cache - */ - public exists(contractAddress: AztecAddress, nullifier: Fr): boolean { - const exists = - this.cachePerContract.get(contractAddress.toBigInt())?.has(nullifier.toBigInt()) || - this.siloedNullifiers.has(siloNullifier(contractAddress, nullifier).toBigInt()); - return !!exists; - } - - /** - * Stage a new nullifier (append it to the cache). - * - * @param contractAddress - the address of the contract that the nullifier is associated with - * @param nullifier - the nullifier to stage - */ - public append(contractAddress: AztecAddress, nullifier: Fr) { - if (this.exists(contractAddress, nullifier)) { - throw new NullifierCollisionError( - `Nullifier ${nullifier} at contract ${contractAddress} already exists in cache.`, - ); - } - - let nullifiersForContract = this.cachePerContract.get(contractAddress.toBigInt()); - // If this contract's nullifier set has no cached nullifiers, create a new Set to store them - if (!nullifiersForContract) { - nullifiersForContract = new Set(); - this.cachePerContract.set(contractAddress.toBigInt(), nullifiersForContract); - } - nullifiersForContract.add(nullifier.toBigInt()); - } - - /** - * Merge another cache's nullifiers into this instance's. - * - * Cached nullifiers in "incoming" must not collide with any present in "this". - * - * In practice, "this" is a parent call's pending nullifiers, and "incoming" is a nested call's. - * - * @param incomingNullifiers - the incoming cached nullifiers to merge into this instance's - */ - public acceptAndMerge(incomingNullifiers: NullifierCache) { - // Merge siloed nullifiers. - this.siloedNullifiers = new Set([...this.siloedNullifiers, ...incomingNullifiers.siloedNullifiers]); - // Iterate over all contracts with staged writes in the child. - for (const [incomingAddress, incomingCacheAtContract] of incomingNullifiers.cachePerContract) { - const thisCacheAtContract = this.cachePerContract.get(incomingAddress); - if (!thisCacheAtContract) { - // This contract has no nullifiers cached here - // so just accept incoming cache as-is for this contract. - this.cachePerContract.set(incomingAddress, incomingCacheAtContract); - } else { - // "Incoming" and "this" both have cached nullifiers for this contract. - // Merge in incoming nullifiers, erroring if there are any duplicates. - for (const nullifier of incomingCacheAtContract) { - if (thisCacheAtContract.has(nullifier)) { - throw new NullifierCollisionError( - `Failed to accept child call's nullifiers. Nullifier ${nullifier} already exists at contract ${incomingAddress}.`, - ); - } - thisCacheAtContract.add(nullifier); - } + for (const incomingNullifier of incomingNullifiers.cache) { + if (this.cache.has(incomingNullifier)) { + throw new NullifierCollisionError( + `Failed to merge in fork's cached nullifiers. Siloed nullifier ${incomingNullifier} already exists in parent cache.`, + ); } } + this.cache = new Set([...this.cache, ...incomingNullifiers.cache]); } } diff --git a/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts b/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts index 65210cf2157..f38a335ebd5 100644 --- a/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts +++ b/yarn-project/simulator/src/avm/opcodes/accrued_substate.test.ts @@ -1,4 +1,5 @@ import { AztecAddress, Fr } from '@aztec/circuits.js'; +import { siloNullifier } from '@aztec/circuits.js/hash'; import { mock } from 'jest-mock-extended'; @@ -35,6 +36,7 @@ describe('Accrued Substate', () => { const leafIndex = new Fr(7); const leafIndexOffset = 1; const existsOffset = 2; + const siloedNullifier0 = siloNullifier(address, value0); beforeEach(() => { worldStateDB = mock(); @@ -170,7 +172,7 @@ describe('Accrued Substate', () => { const isPending = false; // leafIndex is returned from DB call for nullifiers, so it is absent on DB miss const _tracedLeafIndex = exists && !isPending ? leafIndex : Fr.ZERO; - expect(trace.traceNullifierCheck).toHaveBeenCalledWith(address, /*nullifier=*/ value0, exists); + expect(trace.traceNullifierCheck).toHaveBeenCalledWith(siloedNullifier0, exists); }); }); }); @@ -192,7 +194,7 @@ describe('Accrued Substate', () => { context.machineState.memory.set(value0Offset, new Field(value0)); await new EmitNullifier(/*indirect=*/ 0, /*offset=*/ value0Offset).execute(context); expect(trace.traceNewNullifier).toHaveBeenCalledTimes(1); - expect(trace.traceNewNullifier).toHaveBeenCalledWith(expect.objectContaining(address), /*nullifier=*/ value0); + expect(trace.traceNewNullifier).toHaveBeenCalledWith(siloedNullifier0); }); it('Nullifier collision reverts (same nullifier emitted twice)', async () => { @@ -204,7 +206,7 @@ describe('Accrued Substate', () => { ), ); expect(trace.traceNewNullifier).toHaveBeenCalledTimes(1); - expect(trace.traceNewNullifier).toHaveBeenCalledWith(expect.objectContaining(address), /*nullifier=*/ value0); + expect(trace.traceNewNullifier).toHaveBeenCalledWith(siloedNullifier0); }); it('Nullifier collision reverts (nullifier exists in host state)', async () => { diff --git a/yarn-project/simulator/src/public/dual_side_effect_trace.ts b/yarn-project/simulator/src/public/dual_side_effect_trace.ts index ff396d68496..f6285e0e355 100644 --- a/yarn-project/simulator/src/public/dual_side_effect_trace.ts +++ b/yarn-project/simulator/src/public/dual_side_effect_trace.ts @@ -94,50 +94,26 @@ export class DualSideEffectTrace implements PublicSideEffectTraceInterface { } public traceNullifierCheck( - contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, exists: boolean, lowLeafPreimage: NullifierLeafPreimage, lowLeafIndex: Fr, lowLeafPath: Fr[], ) { - this.innerCallTrace.traceNullifierCheck( - contractAddress, - nullifier, - exists, - lowLeafPreimage, - lowLeafIndex, - lowLeafPath, - ); - this.enqueuedCallTrace.traceNullifierCheck( - contractAddress, - nullifier, - exists, - lowLeafPreimage, - lowLeafIndex, - lowLeafPath, - ); + this.innerCallTrace.traceNullifierCheck(siloedNullifier, exists, lowLeafPreimage, lowLeafIndex, lowLeafPath); + this.enqueuedCallTrace.traceNullifierCheck(siloedNullifier, exists, lowLeafPreimage, lowLeafIndex, lowLeafPath); } public traceNewNullifier( - contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, lowLeafPreimage: NullifierLeafPreimage, lowLeafIndex: Fr, lowLeafPath: Fr[], insertionPath: Fr[], ) { - this.innerCallTrace.traceNewNullifier( - contractAddress, - nullifier, - lowLeafPreimage, - lowLeafIndex, - lowLeafPath, - insertionPath, - ); + this.innerCallTrace.traceNewNullifier(siloedNullifier, lowLeafPreimage, lowLeafIndex, lowLeafPath, insertionPath); this.enqueuedCallTrace.traceNewNullifier( - contractAddress, - nullifier, + siloedNullifier, lowLeafPreimage, lowLeafIndex, lowLeafPath, diff --git a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts index 96b415a2e04..6f84f4de2ad 100644 --- a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts +++ b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.test.ts @@ -1,30 +1,27 @@ import { UnencryptedL2Log } from '@aztec/circuit-types'; import { + AvmAppendTreeHint, + AvmNullifierReadTreeHint, + AvmNullifierWriteTreeHint, + AvmPublicDataReadTreeHint, + AvmPublicDataWriteTreeHint, AztecAddress, EthAddress, L2ToL1Message, LogHash, - MAX_L1_TO_L2_MSG_READ_REQUESTS_PER_TX, MAX_L2_TO_L1_MSGS_PER_TX, MAX_NOTE_HASHES_PER_TX, - MAX_NOTE_HASH_READ_REQUESTS_PER_TX, MAX_NULLIFIERS_PER_TX, - MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX, - MAX_NULLIFIER_READ_REQUESTS_PER_TX, - MAX_PUBLIC_DATA_READS_PER_TX, MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, MAX_UNENCRYPTED_LOGS_PER_TX, NoteHash, Nullifier, NullifierLeafPreimage, - PublicDataRead, PublicDataTreeLeafPreimage, PublicDataUpdateRequest, - ReadRequest, SerializableContractInstance, - TreeLeafReadRequest, } from '@aztec/circuits.js'; -import { computePublicDataTreeLeafSlot, siloNullifier } from '@aztec/circuits.js/hash'; +import { computePublicDataTreeLeafSlot } from '@aztec/circuits.js/hash'; import { Fr } from '@aztec/foundation/fields'; import { randomInt } from 'crypto'; @@ -36,118 +33,106 @@ describe('Enqueued-call Side Effect Trace', () => { const address = AztecAddress.random(); const utxo = Fr.random(); const leafIndex = Fr.random(); + const lowLeafIndex = Fr.random(); const slot = Fr.random(); const value = Fr.random(); const recipient = Fr.random(); const content = Fr.random(); const log = [Fr.random(), Fr.random(), Fr.random()]; const contractInstance = SerializableContractInstance.default(); + const siblingPath = [Fr.random(), Fr.random(), Fr.random(), Fr.random()]; + const lowLeafSiblingPath = [Fr.random(), Fr.random(), Fr.random()]; let startCounter: number; - let startCounterFr: Fr; let startCounterPlus1: number; let trace: PublicEnqueuedCallSideEffectTrace; beforeEach(() => { startCounter = randomInt(/*max=*/ 1000000); - startCounterFr = new Fr(startCounter); startCounterPlus1 = startCounter + 1; trace = new PublicEnqueuedCallSideEffectTrace(startCounter); }); it('Should trace storage reads', () => { const leafPreimage = new PublicDataTreeLeafPreimage(slot, value, Fr.ZERO, 0n); - trace.tracePublicStorageRead(address, slot, value, leafPreimage, Fr.ZERO, []); + trace.tracePublicStorageRead(address, slot, value, leafPreimage, leafIndex, siblingPath); expect(trace.getCounter()).toBe(startCounterPlus1); - const leafSlot = computePublicDataTreeLeafSlot(address, slot); - const expected = [new PublicDataRead(leafSlot, value, startCounter /*contractAddress*/)]; - expect(trace.getSideEffects().publicDataReads).toEqual(expected); - - expect(trace.getAvmCircuitHints().storageValues.items).toEqual([{ key: startCounterFr, value }]); + const expected = new AvmPublicDataReadTreeHint(leafPreimage, leafIndex, siblingPath); + expect(trace.getAvmCircuitHints().storageReadRequest.items).toEqual([expected]); }); it('Should trace storage writes', () => { const lowLeafPreimage = new PublicDataTreeLeafPreimage(slot, value, Fr.ZERO, 0n); const newLeafPreimage = new PublicDataTreeLeafPreimage(slot, value, Fr.ZERO, 0n); - trace.tracePublicStorageWrite(address, slot, value, lowLeafPreimage, Fr.ZERO, [], newLeafPreimage, []); + trace.tracePublicStorageWrite( + address, + slot, + value, + lowLeafPreimage, + lowLeafIndex, + lowLeafSiblingPath, + newLeafPreimage, + siblingPath, + ); expect(trace.getCounter()).toBe(startCounterPlus1); const leafSlot = computePublicDataTreeLeafSlot(address, slot); const expected = [new PublicDataUpdateRequest(leafSlot, value, startCounter /*contractAddress*/)]; expect(trace.getSideEffects().publicDataWrites).toEqual(expected); + + const readHint = new AvmPublicDataReadTreeHint(lowLeafPreimage, lowLeafIndex, lowLeafSiblingPath); + const expectedHint = new AvmPublicDataWriteTreeHint(readHint, newLeafPreimage, siblingPath); + expect(trace.getAvmCircuitHints().storageUpdateRequest.items).toEqual([expectedHint]); }); it('Should trace note hash checks', () => { const exists = true; - trace.traceNoteHashCheck(address, utxo, leafIndex, exists, []); - - const expected = [new TreeLeafReadRequest(utxo, leafIndex)]; - expect(trace.getSideEffects().noteHashReadRequests).toEqual(expected); - - expect(trace.getAvmCircuitHints().noteHashExists.items).toEqual([{ key: leafIndex, value: new Fr(exists) }]); + trace.traceNoteHashCheck(address, utxo, leafIndex, exists, siblingPath); + const expected = new AvmAppendTreeHint(leafIndex, utxo, siblingPath); + expect(trace.getAvmCircuitHints().noteHashReadRequest.items).toEqual([expected]); }); it('Should trace note hashes', () => { - trace.traceNewNoteHash(address, utxo, Fr.ZERO, []); + trace.traceNewNoteHash(address, utxo, leafIndex, siblingPath); expect(trace.getCounter()).toBe(startCounterPlus1); const expected = [new NoteHash(utxo, startCounter).scope(address)]; expect(trace.getSideEffects().noteHashes).toEqual(expected); + + const expectedHint = new AvmAppendTreeHint(leafIndex, utxo, siblingPath); + expect(trace.getAvmCircuitHints().noteHashWriteRequest.items).toEqual([expectedHint]); }); it('Should trace nullifier checks', () => { const exists = true; const lowLeafPreimage = new NullifierLeafPreimage(utxo, Fr.ZERO, 0n); - trace.traceNullifierCheck(address, utxo, exists, lowLeafPreimage, Fr.ZERO, []); - expect(trace.getCounter()).toBe(startCounterPlus1); - - const { nullifierReadRequests, nullifierNonExistentReadRequests } = trace.getSideEffects(); - const expected = [new ReadRequest(utxo, startCounter).scope(address)]; - expect(nullifierReadRequests).toEqual(expected); - expect(nullifierNonExistentReadRequests).toEqual([]); - - expect(trace.getAvmCircuitHints().nullifierExists.items).toEqual([{ key: startCounterFr, value: new Fr(exists) }]); - }); - - it('Should trace non-existent nullifier checks', () => { - const exists = false; - const lowLeafPreimage = new NullifierLeafPreimage(utxo, Fr.ZERO, 0n); - trace.traceNullifierCheck(address, utxo, exists, lowLeafPreimage, Fr.ZERO, []); + trace.traceNullifierCheck(utxo, exists, lowLeafPreimage, leafIndex, siblingPath); expect(trace.getCounter()).toBe(startCounterPlus1); - const { nullifierReadRequests, nullifierNonExistentReadRequests } = trace.getSideEffects(); - expect(nullifierReadRequests).toEqual([]); - - const expected = [new ReadRequest(utxo, startCounter).scope(address)]; - expect(nullifierNonExistentReadRequests).toEqual(expected); - - expect(trace.getAvmCircuitHints().nullifierExists.items).toEqual([{ key: startCounterFr, value: new Fr(exists) }]); + const expected = new AvmNullifierReadTreeHint(lowLeafPreimage, leafIndex, siblingPath); + expect(trace.getAvmCircuitHints().nullifierReadRequest.items).toEqual([expected]); }); it('Should trace nullifiers', () => { const lowLeafPreimage = new NullifierLeafPreimage(utxo, Fr.ZERO, 0n); - trace.traceNewNullifier(address, utxo, lowLeafPreimage, Fr.ZERO, [], []); + trace.traceNewNullifier(utxo, lowLeafPreimage, lowLeafIndex, lowLeafSiblingPath, siblingPath); expect(trace.getCounter()).toBe(startCounterPlus1); - const expected = [new Nullifier(siloNullifier(address, utxo), startCounter, Fr.ZERO)]; + const expected = [new Nullifier(utxo, startCounter, Fr.ZERO)]; expect(trace.getSideEffects().nullifiers).toEqual(expected); + + const readHint = new AvmNullifierReadTreeHint(lowLeafPreimage, lowLeafIndex, lowLeafSiblingPath); + const expectedHint = new AvmNullifierWriteTreeHint(readHint, siblingPath); + expect(trace.getAvmCircuitHints().nullifierWriteHints.items).toEqual([expectedHint]); }); it('Should trace L1ToL2 Message checks', () => { const exists = true; - trace.traceL1ToL2MessageCheck(address, utxo, leafIndex, exists, []); - - const expected = [new TreeLeafReadRequest(utxo, leafIndex)]; - expect(trace.getSideEffects().l1ToL2MsgReadRequests).toEqual(expected); - - expect(trace.getAvmCircuitHints().l1ToL2MessageExists.items).toEqual([ - { - key: leafIndex, - value: new Fr(exists), - }, - ]); + trace.traceL1ToL2MessageCheck(address, utxo, leafIndex, exists, siblingPath); + const expected = new AvmAppendTreeHint(leafIndex, utxo, siblingPath); + expect(trace.getAvmCircuitHints().l1ToL2MessageReadRequest.items).toEqual([expected]); }); it('Should trace new L2ToL1 messages', () => { @@ -187,17 +172,6 @@ describe('Enqueued-call Side Effect Trace', () => { ]); }); describe('Maximum accesses', () => { - it('Should enforce maximum number of public storage reads', () => { - for (let i = 0; i < MAX_PUBLIC_DATA_READS_PER_TX; i++) { - const leafPreimage = new PublicDataTreeLeafPreimage(new Fr(i), new Fr(i), Fr.ZERO, 0n); - trace.tracePublicStorageRead(address, slot, value, leafPreimage, Fr.ZERO, []); - } - const leafPreimage = new PublicDataTreeLeafPreimage(new Fr(42), new Fr(42), Fr.ZERO, 0n); - expect(() => trace.tracePublicStorageRead(address, slot, value, leafPreimage, Fr.ZERO, [])).toThrow( - SideEffectLimitReachedError, - ); - }); - it('Should enforce maximum number of public storage writes', () => { for (let i = 0; i < MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX; i++) { const lowLeafPreimage = new PublicDataTreeLeafPreimage(new Fr(i), new Fr(i), Fr.ZERO, 0n); @@ -219,15 +193,6 @@ describe('Enqueued-call Side Effect Trace', () => { ).toThrow(SideEffectLimitReachedError); }); - it('Should enforce maximum number of note hash checks', () => { - for (let i = 0; i < MAX_NOTE_HASH_READ_REQUESTS_PER_TX; i++) { - trace.traceNoteHashCheck(AztecAddress.fromNumber(i), new Fr(i), new Fr(i), true, []); - } - expect(() => trace.traceNoteHashCheck(AztecAddress.fromNumber(42), new Fr(42), new Fr(42), true, [])).toThrow( - SideEffectLimitReachedError, - ); - }); - it('Should enforce maximum number of new note hashes', () => { for (let i = 0; i < MAX_NOTE_HASHES_PER_TX; i++) { trace.traceNewNoteHash(AztecAddress.fromNumber(i), new Fr(i), Fr.ZERO, []); @@ -237,54 +202,15 @@ describe('Enqueued-call Side Effect Trace', () => { ); }); - it('Should enforce maximum number of nullifier checks', () => { - for (let i = 0; i < MAX_NULLIFIER_READ_REQUESTS_PER_TX; i++) { - const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); - } - const lowLeafPreimage = new NullifierLeafPreimage(new Fr(41), Fr.ZERO, 0n); - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), true, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); - // NOTE: also cannot do a non-existent check once existent checks have filled up - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), false, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); - }); - - it('Should enforce maximum number of nullifier non-existent checks', () => { - for (let i = 0; i < MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX; i++) { - const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); - } - const lowLeafPreimage = new NullifierLeafPreimage(new Fr(41), Fr.ZERO, 0n); - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), false, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); - // NOTE: also cannot do a existent check once non-existent checks have filled up - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), true, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); - }); - it('Should enforce maximum number of new nullifiers', () => { for (let i = 0; i < MAX_NULLIFIERS_PER_TX; i++) { const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i + 1), Fr.ZERO, 0n); - trace.traceNewNullifier(AztecAddress.fromNumber(i), new Fr(i), lowLeafPreimage, Fr.ZERO, [], []); + trace.traceNewNullifier(new Fr(i), lowLeafPreimage, Fr.ZERO, [], []); } const lowLeafPreimage = new NullifierLeafPreimage(new Fr(41), Fr.ZERO, 0n); - expect(() => - trace.traceNewNullifier(AztecAddress.fromNumber(42), new Fr(42), lowLeafPreimage, Fr.ZERO, [], []), - ).toThrow(SideEffectLimitReachedError); - }); - - it('Should enforce maximum number of L1 to L2 message checks', () => { - for (let i = 0; i < MAX_L1_TO_L2_MSG_READ_REQUESTS_PER_TX; i++) { - trace.traceL1ToL2MessageCheck(AztecAddress.fromNumber(i), new Fr(i), new Fr(i), true, []); - } - expect(() => - trace.traceL1ToL2MessageCheck(AztecAddress.fromNumber(42), new Fr(42), new Fr(42), true, []), - ).toThrow(SideEffectLimitReachedError); + expect(() => trace.traceNewNullifier(new Fr(42), lowLeafPreimage, Fr.ZERO, [], [])).toThrow( + SideEffectLimitReachedError, + ); }); it('Should enforce maximum number of new l2 to l1 messages', () => { @@ -305,86 +231,30 @@ describe('Enqueued-call Side Effect Trace', () => { ); }); - it('Should enforce maximum number of nullifier checks for GETCONTRACTINSTANCE', () => { - for (let i = 0; i < MAX_NULLIFIER_READ_REQUESTS_PER_TX; i++) { - const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); - } - expect(() => trace.traceGetContractInstance(address, /*exists=*/ true, contractInstance)).toThrow( - SideEffectLimitReachedError, - ); - // NOTE: also cannot do a existent check once non-existent checks have filled up - expect(() => trace.traceGetContractInstance(address, /*exists=*/ false, contractInstance)).toThrow( - SideEffectLimitReachedError, - ); - }); - - it('Should enforce maximum number of nullifier non-existent checks for GETCONTRACTINSTANCE', () => { - for (let i = 0; i < MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX; i++) { - const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); - } - expect(() => trace.traceGetContractInstance(address, /*exists=*/ false, contractInstance)).toThrow( - SideEffectLimitReachedError, - ); - // NOTE: also cannot do a existent check once non-existent checks have filled up - expect(() => trace.traceGetContractInstance(address, /*exists=*/ true, contractInstance)).toThrow( - SideEffectLimitReachedError, - ); - }); - it('PreviousValidationRequestArrayLengths and PreviousAccumulatedDataArrayLengths contribute to limits', () => { trace = new PublicEnqueuedCallSideEffectTrace( 0, new SideEffectArrayLengths( - MAX_PUBLIC_DATA_READS_PER_TX, MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, - MAX_NOTE_HASH_READ_REQUESTS_PER_TX, MAX_NOTE_HASHES_PER_TX, - MAX_NULLIFIER_READ_REQUESTS_PER_TX, - MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX, MAX_NULLIFIERS_PER_TX, - MAX_L1_TO_L2_MSG_READ_REQUESTS_PER_TX, MAX_L2_TO_L1_MSGS_PER_TX, MAX_UNENCRYPTED_LOGS_PER_TX, ), ); - expect(() => trace.tracePublicStorageRead(AztecAddress.fromNumber(42), new Fr(42), new Fr(42))).toThrow( - SideEffectLimitReachedError, - ); expect(() => trace.tracePublicStorageWrite(AztecAddress.fromNumber(42), new Fr(42), new Fr(42))).toThrow( SideEffectLimitReachedError, ); - expect(() => trace.traceNoteHashCheck(AztecAddress.fromNumber(42), new Fr(42), new Fr(42), true)).toThrow( - SideEffectLimitReachedError, - ); expect(() => trace.traceNewNoteHash(AztecAddress.fromNumber(42), new Fr(42), new Fr(42))).toThrow( SideEffectLimitReachedError, ); - expect(() => trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), false)).toThrow( - SideEffectLimitReachedError, - ); - expect(() => trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), true)).toThrow( - SideEffectLimitReachedError, - ); - expect(() => trace.traceNewNullifier(AztecAddress.fromNumber(42), new Fr(42))).toThrow( - SideEffectLimitReachedError, - ); - expect(() => trace.traceL1ToL2MessageCheck(AztecAddress.fromNumber(42), new Fr(42), new Fr(42), true)).toThrow( - SideEffectLimitReachedError, - ); + expect(() => trace.traceNewNullifier(new Fr(42))).toThrow(SideEffectLimitReachedError); expect(() => trace.traceNewL2ToL1Message(AztecAddress.fromNumber(42), new Fr(42), new Fr(42))).toThrow( SideEffectLimitReachedError, ); expect(() => trace.traceUnencryptedLog(AztecAddress.fromNumber(42), [new Fr(42), new Fr(42)])).toThrow( SideEffectLimitReachedError, ); - expect(() => trace.traceGetContractInstance(address, /*exists=*/ false, contractInstance)).toThrow( - SideEffectLimitReachedError, - ); - expect(() => trace.traceGetContractInstance(address, /*exists=*/ true, contractInstance)).toThrow( - SideEffectLimitReachedError, - ); }); }); @@ -404,11 +274,11 @@ describe('Enqueued-call Side Effect Trace', () => { // counter does not increment for note hash checks nestedTrace.traceNewNoteHash(address, utxo, Fr.ZERO, []); testCounter++; - nestedTrace.traceNullifierCheck(address, utxo, true, lowLeafPreimage, Fr.ZERO, []); + nestedTrace.traceNullifierCheck(utxo, true, lowLeafPreimage, Fr.ZERO, []); testCounter++; - nestedTrace.traceNullifierCheck(address, utxo, true, lowLeafPreimage, Fr.ZERO, []); + nestedTrace.traceNullifierCheck(utxo, true, lowLeafPreimage, Fr.ZERO, []); testCounter++; - nestedTrace.traceNewNullifier(address, utxo, lowLeafPreimage, Fr.ZERO, [], []); + nestedTrace.traceNewNullifier(utxo, lowLeafPreimage, Fr.ZERO, [], []); testCounter++; nestedTrace.traceL1ToL2MessageCheck(address, utxo, leafIndex, existsDefault, []); // counter does not increment for l1tol2 message checks @@ -431,20 +301,33 @@ describe('Enqueued-call Side Effect Trace', () => { const childSideEffects = nestedTrace.getSideEffects(); // TODO(dbanks12): confirm that all hints were merged from child if (reverted) { - expect(parentSideEffects.publicDataReads).toEqual([]); expect(parentSideEffects.publicDataWrites).toEqual([]); - expect(parentSideEffects.noteHashReadRequests).toEqual([]); expect(parentSideEffects.noteHashes).toEqual([]); - expect(parentSideEffects.nullifierReadRequests).toEqual([]); - expect(parentSideEffects.nullifierNonExistentReadRequests).toEqual([]); expect(parentSideEffects.nullifiers).toEqual([]); - expect(parentSideEffects.l1ToL2MsgReadRequests).toEqual([]); expect(parentSideEffects.l2ToL1Msgs).toEqual([]); expect(parentSideEffects.unencryptedLogs).toEqual([]); expect(parentSideEffects.unencryptedLogsHashes).toEqual([]); } else { expect(parentSideEffects).toEqual(childSideEffects); } + + const parentHints = trace.getAvmCircuitHints(); + const childHints = nestedTrace.getAvmCircuitHints(); + expect(parentHints.enqueuedCalls.items).toEqual(childHints.enqueuedCalls.items); + expect(parentHints.storageValues.items).toEqual(childHints.storageValues.items); + expect(parentHints.noteHashExists.items).toEqual(childHints.noteHashExists.items); + expect(parentHints.nullifierExists.items).toEqual(childHints.nullifierExists.items); + expect(parentHints.l1ToL2MessageExists.items).toEqual(childHints.l1ToL2MessageExists.items); + expect(parentHints.externalCalls.items).toEqual(childHints.externalCalls.items); + expect(parentHints.contractInstances.items).toEqual(childHints.contractInstances.items); + expect(parentHints.contractBytecodeHints.items).toEqual(childHints.contractBytecodeHints.items); + expect(parentHints.storageReadRequest.items).toEqual(childHints.storageReadRequest.items); + expect(parentHints.storageUpdateRequest.items).toEqual(childHints.storageUpdateRequest.items); + expect(parentHints.nullifierReadRequest.items).toEqual(childHints.nullifierReadRequest.items); + expect(parentHints.nullifierWriteHints.items).toEqual(childHints.nullifierWriteHints.items); + expect(parentHints.noteHashReadRequest.items).toEqual(childHints.noteHashReadRequest.items); + expect(parentHints.noteHashWriteRequest.items).toEqual(childHints.noteHashWriteRequest.items); + expect(parentHints.l1ToL2MessageReadRequest.items).toEqual(childHints.l1ToL2MessageReadRequest.items); }); }); }); diff --git a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts index 41298d80f63..84e85adcd64 100644 --- a/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts +++ b/yarn-project/simulator/src/public/enqueued_call_side_effect_trace.ts @@ -8,7 +8,6 @@ import { AvmEnqueuedCallHint, AvmExecutionHints, AvmExternalCallHint, - AvmKeyValueHint, AvmNullifierReadTreeHint, AvmNullifierWriteTreeHint, AvmPublicDataReadTreeHint, @@ -23,14 +22,9 @@ import { L2ToL1Message, LogHash, MAX_ENQUEUED_CALLS_PER_TX, - MAX_L1_TO_L2_MSG_READ_REQUESTS_PER_TX, MAX_L2_TO_L1_MSGS_PER_TX, MAX_NOTE_HASHES_PER_TX, - MAX_NOTE_HASH_READ_REQUESTS_PER_TX, MAX_NULLIFIERS_PER_TX, - MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX, - MAX_NULLIFIER_READ_REQUESTS_PER_TX, - MAX_PUBLIC_DATA_READS_PER_TX, MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, MAX_UNENCRYPTED_LOGS_PER_TX, NOTE_HASH_TREE_HEIGHT, @@ -42,20 +36,16 @@ import { PrivateToAvmAccumulatedData, PrivateToAvmAccumulatedDataArrayLengths, PublicCallRequest, - PublicDataRead, PublicDataTreeLeafPreimage, PublicDataUpdateRequest, PublicDataWrite, - ReadRequest, ScopedL2ToL1Message, ScopedLogHash, type ScopedNoteHash, - type ScopedReadRequest, SerializableContractInstance, - TreeLeafReadRequest, type TreeSnapshots, } from '@aztec/circuits.js'; -import { computePublicDataTreeLeafSlot, siloNullifier } from '@aztec/circuits.js/hash'; +import { computePublicDataTreeLeafSlot } from '@aztec/circuits.js/hash'; import { padArrayEnd } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/fields'; import { jsonStringify } from '@aztec/foundation/json-rpc'; @@ -82,17 +72,9 @@ const emptyL1ToL2MessagePath = () => new Array(L1_TO_L2_MSG_TREE_HEIGHT).fill(Fr export type SideEffects = { enqueuedCalls: PublicCallRequest[]; - publicDataReads: PublicDataRead[]; publicDataWrites: PublicDataUpdateRequest[]; - - noteHashReadRequests: TreeLeafReadRequest[]; noteHashes: ScopedNoteHash[]; - - nullifierReadRequests: ScopedReadRequest[]; - nullifierNonExistentReadRequests: ScopedReadRequest[]; nullifiers: Nullifier[]; - - l1ToL2MsgReadRequests: TreeLeafReadRequest[]; l2ToL1Msgs: ScopedL2ToL1Message[]; unencryptedLogs: UnencryptedL2Log[]; @@ -101,24 +83,15 @@ export type SideEffects = { export class SideEffectArrayLengths { constructor( - public readonly publicDataReads: number, public readonly publicDataWrites: number, - - public readonly noteHashReadRequests: number, public readonly noteHashes: number, - - public readonly nullifierReadRequests: number, - public readonly nullifierNonExistentReadRequests: number, public readonly nullifiers: number, - - public readonly l1ToL2MsgReadRequests: number, public readonly l2ToL1Msgs: number, - public readonly unencryptedLogs: number, ) {} static empty() { - return new this(0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + return new this(0, 0, 0, 0, 0); } } @@ -133,19 +106,10 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI private enqueuedCalls: PublicCallRequest[] = []; - private publicDataReads: PublicDataRead[] = []; private publicDataWrites: PublicDataUpdateRequest[] = []; - - private noteHashReadRequests: TreeLeafReadRequest[] = []; private noteHashes: ScopedNoteHash[] = []; - - private nullifierReadRequests: ScopedReadRequest[] = []; - private nullifierNonExistentReadRequests: ScopedReadRequest[] = []; private nullifiers: Nullifier[] = []; - - private l1ToL2MsgReadRequests: TreeLeafReadRequest[] = []; private l2ToL1Messages: ScopedL2ToL1Message[] = []; - private unencryptedLogs: UnencryptedL2Log[] = []; private unencryptedLogsHashes: ScopedLogHash[] = []; @@ -171,15 +135,9 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI return new PublicEnqueuedCallSideEffectTrace( this.sideEffectCounter, new SideEffectArrayLengths( - this.previousSideEffectArrayLengths.publicDataReads + this.publicDataReads.length, this.previousSideEffectArrayLengths.publicDataWrites + this.publicDataWrites.length, - this.previousSideEffectArrayLengths.noteHashReadRequests + this.noteHashReadRequests.length, this.previousSideEffectArrayLengths.noteHashes + this.noteHashes.length, - this.previousSideEffectArrayLengths.nullifierReadRequests + this.nullifierReadRequests.length, - this.previousSideEffectArrayLengths.nullifierNonExistentReadRequests + - this.nullifierNonExistentReadRequests.length, this.previousSideEffectArrayLengths.nullifiers + this.nullifiers.length, - this.previousSideEffectArrayLengths.l1ToL2MsgReadRequests + this.l1ToL2MsgReadRequests.length, this.previousSideEffectArrayLengths.l2ToL1Msgs + this.l2ToL1Messages.length, this.previousSideEffectArrayLengths.unencryptedLogs + this.unencryptedLogs.length, ), @@ -194,23 +152,42 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI ); forkedTrace.alreadyMergedIntoParent = true; - // TODO(dbanks12): accept & merge forked trace's hints! this.sideEffectCounter = forkedTrace.sideEffectCounter; this.enqueuedCalls.push(...forkedTrace.enqueuedCalls); if (!reverted) { - this.publicDataReads.push(...forkedTrace.publicDataReads); this.publicDataWrites.push(...forkedTrace.publicDataWrites); - this.noteHashReadRequests.push(...forkedTrace.noteHashReadRequests); this.noteHashes.push(...forkedTrace.noteHashes); - this.nullifierReadRequests.push(...forkedTrace.nullifierReadRequests); - this.nullifierNonExistentReadRequests.push(...forkedTrace.nullifierNonExistentReadRequests); this.nullifiers.push(...forkedTrace.nullifiers); - this.l1ToL2MsgReadRequests.push(...forkedTrace.l1ToL2MsgReadRequests); this.l2ToL1Messages.push(...forkedTrace.l2ToL1Messages); this.unencryptedLogs.push(...forkedTrace.unencryptedLogs); this.unencryptedLogsHashes.push(...forkedTrace.unencryptedLogsHashes); } + this.mergeHints(forkedTrace); + } + + private mergeHints(forkedTrace: this) { + this.avmCircuitHints.enqueuedCalls.items.push(...forkedTrace.avmCircuitHints.enqueuedCalls.items); + + this.avmCircuitHints.storageValues.items.push(...forkedTrace.avmCircuitHints.storageValues.items); + this.avmCircuitHints.noteHashExists.items.push(...forkedTrace.avmCircuitHints.noteHashExists.items); + this.avmCircuitHints.nullifierExists.items.push(...forkedTrace.avmCircuitHints.nullifierExists.items); + this.avmCircuitHints.l1ToL2MessageExists.items.push(...forkedTrace.avmCircuitHints.l1ToL2MessageExists.items); + + this.avmCircuitHints.externalCalls.items.push(...forkedTrace.avmCircuitHints.externalCalls.items); + + this.avmCircuitHints.contractInstances.items.push(...forkedTrace.avmCircuitHints.contractInstances.items); + this.avmCircuitHints.contractBytecodeHints.items.push(...forkedTrace.avmCircuitHints.contractBytecodeHints.items); + + this.avmCircuitHints.storageReadRequest.items.push(...forkedTrace.avmCircuitHints.storageReadRequest.items); + this.avmCircuitHints.storageUpdateRequest.items.push(...forkedTrace.avmCircuitHints.storageUpdateRequest.items); + this.avmCircuitHints.nullifierReadRequest.items.push(...forkedTrace.avmCircuitHints.nullifierReadRequest.items); + this.avmCircuitHints.nullifierWriteHints.items.push(...forkedTrace.avmCircuitHints.nullifierWriteHints.items); + this.avmCircuitHints.noteHashReadRequest.items.push(...forkedTrace.avmCircuitHints.noteHashReadRequest.items); + this.avmCircuitHints.noteHashWriteRequest.items.push(...forkedTrace.avmCircuitHints.noteHashWriteRequest.items); + this.avmCircuitHints.l1ToL2MessageReadRequest.items.push( + ...forkedTrace.avmCircuitHints.l1ToL2MessageReadRequest.items, + ); } public getCounter() { @@ -222,7 +199,7 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI } public tracePublicStorageRead( - contractAddress: AztecAddress, + _contractAddress: AztecAddress, slot: Fr, value: Fr, leafPreimage: PublicDataTreeLeafPreimage = PublicDataTreeLeafPreimage.empty(), @@ -233,21 +210,7 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI // if we have real merkle hint content, make sure the value matches the the provided preimage assert(leafPreimage.value.equals(value), 'Value mismatch when tracing in public data write'); } - // NOTE: exists and cached are unused for now but may be used for optimizations or kernel hints later - if ( - this.publicDataReads.length + this.previousSideEffectArrayLengths.publicDataReads >= - MAX_PUBLIC_DATA_READS_PER_TX - ) { - throw new SideEffectLimitReachedError('public data (contract storage) read', MAX_PUBLIC_DATA_READS_PER_TX); - } - const leafSlot = computePublicDataTreeLeafSlot(contractAddress, slot); - - this.publicDataReads.push(new PublicDataRead(leafSlot, value, this.sideEffectCounter)); - - this.avmCircuitHints.storageValues.items.push( - new AvmKeyValueHint(/*key=*/ new Fr(this.sideEffectCounter), /*value=*/ value), - ); this.avmCircuitHints.storageReadRequest.items.push(new AvmPublicDataReadTreeHint(leafPreimage, leafIndex, path)); this.log.debug(`SLOAD cnt: ${this.sideEffectCounter} val: ${value} slot: ${slot}`); this.incrementSideEffectCounter(); @@ -297,22 +260,9 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI _contractAddress: AztecAddress, noteHash: Fr, leafIndex: Fr, - exists: boolean, + _exists: boolean, path: Fr[] = emptyNoteHashPath(), ) { - // NOTE: contractAddress is unused because noteHash is an already-siloed leaf - if ( - this.noteHashReadRequests.length + this.previousSideEffectArrayLengths.noteHashReadRequests >= - MAX_NOTE_HASH_READ_REQUESTS_PER_TX - ) { - throw new SideEffectLimitReachedError('note hash read request', MAX_NOTE_HASH_READ_REQUESTS_PER_TX); - } - - // note hash is already siloed here - this.noteHashReadRequests.push(new TreeLeafReadRequest(noteHash, leafIndex)); - this.avmCircuitHints.noteHashExists.items.push( - new AvmKeyValueHint(/*key=*/ new Fr(leafIndex), /*value=*/ exists ? Fr.ONE : Fr.ZERO), - ); // New Hinting this.avmCircuitHints.noteHashReadRequest.items.push(new AvmAppendTreeHint(leafIndex, noteHash, path)); // NOTE: counter does not increment for note hash checks (because it doesn't rely on pending note hashes) @@ -337,29 +287,12 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI } public traceNullifierCheck( - contractAddress: AztecAddress, - nullifier: Fr, - exists: boolean, + _siloedNullifier: Fr, + _exists: boolean, lowLeafPreimage: NullifierLeafPreimage = NullifierLeafPreimage.empty(), lowLeafIndex: Fr = Fr.zero(), lowLeafPath: Fr[] = emptyNullifierPath(), ) { - // NOTE: isPending and leafIndex are unused for now but may be used for optimizations or kernel hints later - this.enforceLimitOnNullifierChecks(); - - // TODO(dbanks12): use siloed nullifier instead of scoped once public kernel stops siloing - // and once VM public inputs are meant to contain siloed nullifiers. - //const siloedNullifier = siloNullifier(contractAddress, nullifier); - const readRequest = new ReadRequest(nullifier, this.sideEffectCounter).scope(contractAddress); - if (exists) { - this.nullifierReadRequests.push(readRequest); - } else { - this.nullifierNonExistentReadRequests.push(readRequest); - } - this.avmCircuitHints.nullifierExists.items.push( - new AvmKeyValueHint(/*key=*/ new Fr(this.sideEffectCounter), /*value=*/ new Fr(exists ? 1 : 0)), - ); - // New Hints this.avmCircuitHints.nullifierReadRequest.items.push( new AvmNullifierReadTreeHint(lowLeafPreimage, lowLeafIndex, lowLeafPath), ); @@ -368,8 +301,7 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI } public traceNewNullifier( - contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, lowLeafPreimage: NullifierLeafPreimage = NullifierLeafPreimage.empty(), lowLeafIndex: Fr = Fr.zero(), lowLeafPath: Fr[] = emptyNullifierPath(), @@ -379,10 +311,8 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI throw new SideEffectLimitReachedError('nullifier', MAX_NULLIFIERS_PER_TX); } - const siloedNullifier = siloNullifier(contractAddress, nullifier); this.nullifiers.push(new Nullifier(siloedNullifier, this.sideEffectCounter, /*noteHash=*/ Fr.ZERO)); - // New hinting const lowLeafReadHint = new AvmNullifierReadTreeHint(lowLeafPreimage, lowLeafIndex, lowLeafPath); this.avmCircuitHints.nullifierWriteHints.items.push(new AvmNullifierWriteTreeHint(lowLeafReadHint, insertionPath)); this.log.debug(`NEW_NULLIFIER cnt: ${this.sideEffectCounter}`); @@ -394,22 +324,9 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI _contractAddress: AztecAddress, msgHash: Fr, msgLeafIndex: Fr, - exists: boolean, + _exists: boolean, path: Fr[] = emptyL1ToL2MessagePath(), ) { - // NOTE: contractAddress is unused because msgHash is an already-siloed leaf - if ( - this.l1ToL2MsgReadRequests.length + this.previousSideEffectArrayLengths.l1ToL2MsgReadRequests >= - MAX_L1_TO_L2_MSG_READ_REQUESTS_PER_TX - ) { - throw new SideEffectLimitReachedError('l1 to l2 message read request', MAX_L1_TO_L2_MSG_READ_REQUESTS_PER_TX); - } - - this.l1ToL2MsgReadRequests.push(new TreeLeafReadRequest(msgHash, msgLeafIndex)); - this.avmCircuitHints.l1ToL2MessageExists.items.push( - new AvmKeyValueHint(/*key=*/ new Fr(msgLeafIndex), /*value=*/ exists ? Fr.ONE : Fr.ZERO), - ); - // New Hinting this.avmCircuitHints.l1ToL2MessageReadRequest.items.push(new AvmAppendTreeHint(msgLeafIndex, msgHash, path)); } @@ -451,8 +368,6 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI exists: boolean, instance: SerializableContractInstance = SerializableContractInstance.default(), ) { - this.enforceLimitOnNullifierChecks('(contract address nullifier from GETCONTRACTINSTANCE)'); - this.avmCircuitHints.contractInstances.items.push( new AvmContractInstanceHint( contractAddress, @@ -560,14 +475,9 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI public getSideEffects(): SideEffects { return { enqueuedCalls: this.enqueuedCalls, - publicDataReads: this.publicDataReads, publicDataWrites: this.publicDataWrites, - noteHashReadRequests: this.noteHashReadRequests, noteHashes: this.noteHashes, - nullifierReadRequests: this.nullifierReadRequests, - nullifierNonExistentReadRequests: this.nullifierNonExistentReadRequests, nullifiers: this.nullifiers, - l1ToL2MsgReadRequests: this.l1ToL2MsgReadRequests, l2ToL1Msgs: this.l2ToL1Messages, unencryptedLogs: this.unencryptedLogs, unencryptedLogsHashes: this.unencryptedLogsHashes, @@ -689,32 +599,4 @@ export class PublicEnqueuedCallSideEffectTrace implements PublicSideEffectTraceI ), ); } - - private enforceLimitOnNullifierChecks(errorMsgOrigin: string = '') { - // NOTE: Why error if _either_ limit was reached? If user code emits either an existent or non-existent - // nullifier read request (NULLIFIEREXISTS, GETCONTRACTINSTANCE, *CALL), and one of the limits has been - // reached (MAX_NULLIFIER_NON_EXISTENT_RRS vs MAX_NULLIFIER_RRS), but not the other, we must prevent the - // sequencer from lying and saying "this nullifier exists, but MAX_NULLIFIER_RRS has been reached, so I'm - // going to skip the read request and just revert instead" when the nullifier actually doesn't exist - // (or vice versa). So, if either maximum has been reached, any nullifier-reading operation must error. - if ( - this.nullifierReadRequests.length + this.previousSideEffectArrayLengths.nullifierReadRequests >= - MAX_NULLIFIER_READ_REQUESTS_PER_TX - ) { - throw new SideEffectLimitReachedError( - `nullifier read request ${errorMsgOrigin}`, - MAX_NULLIFIER_READ_REQUESTS_PER_TX, - ); - } - if ( - this.nullifierNonExistentReadRequests.length + - this.previousSideEffectArrayLengths.nullifierNonExistentReadRequests >= - MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX - ) { - throw new SideEffectLimitReachedError( - `nullifier non-existent read request ${errorMsgOrigin}`, - MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX, - ); - } - } } diff --git a/yarn-project/simulator/src/public/fixtures/index.ts b/yarn-project/simulator/src/public/fixtures/index.ts index d4c69bfd9ae..512cbf93d30 100644 --- a/yarn-project/simulator/src/public/fixtures/index.ts +++ b/yarn-project/simulator/src/public/fixtures/index.ts @@ -5,7 +5,7 @@ import { type ContractClassPublic, type ContractInstanceWithAddress, DEFAULT_GAS_LIMIT, - type FunctionSelector, + FunctionSelector, Gas, GasFees, GasSettings, @@ -23,15 +23,16 @@ import { computePublicBytecodeCommitment, } from '@aztec/circuits.js'; import { makeContractClassPublic, makeContractInstanceFromClassId } from '@aztec/circuits.js/testing'; -import { type ContractArtifact } from '@aztec/foundation/abi'; +import { type ContractArtifact, type FunctionArtifact } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr, Point } from '@aztec/foundation/fields'; import { openTmpStore } from '@aztec/kv-store/utils'; +import { AvmTestContractArtifact } from '@aztec/noir-contracts.js'; import { PublicTxSimulator, WorldStateDB } from '@aztec/simulator'; import { NoopTelemetryClient } from '@aztec/telemetry-client/noop'; import { MerkleTrees } from '@aztec/world-state'; -import { getAvmTestContractBytecode, getAvmTestContractFunctionSelector } from '../../avm/fixtures/index.js'; +import { strict as assert } from 'assert'; /** * If assertionErrString is set, we expect a (non exceptional halting) revert due to a failing assertion and @@ -210,3 +211,24 @@ class MockedAvmTestContractDataSource { return Promise.resolve(); } } + +function getAvmTestContractFunctionSelector(functionName: string): FunctionSelector { + const artifact = AvmTestContractArtifact.functions.find(f => f.name === functionName)!; + assert(!!artifact, `Function ${functionName} not found in AvmTestContractArtifact`); + const params = artifact.parameters; + return FunctionSelector.fromNameAndParameters(artifact.name, params); +} + +function getAvmTestContractArtifact(functionName: string): FunctionArtifact { + const artifact = AvmTestContractArtifact.functions.find(f => f.name === functionName)!; + assert( + !!artifact?.bytecode, + `No bytecode found for function ${functionName}. Try re-running bootstrap.sh on the repository root.`, + ); + return artifact; +} + +function getAvmTestContractBytecode(functionName: string): Buffer { + const artifact = getAvmTestContractArtifact(functionName); + return artifact.bytecode; +} diff --git a/yarn-project/simulator/src/public/public_tx_context.ts b/yarn-project/simulator/src/public/public_tx_context.ts index fd4c860a35c..f6fd8e7c9e1 100644 --- a/yarn-project/simulator/src/public/public_tx_context.ts +++ b/yarn-project/simulator/src/public/public_tx_context.ts @@ -1,5 +1,6 @@ import { type AvmProvingRequest, + MerkleTreeId, type MerkleTreeReadOperations, type PublicExecutionRequest, type SimulationError, @@ -67,10 +68,9 @@ export class PublicTxContext { private readonly setupExecutionRequests: PublicExecutionRequest[], private readonly appLogicExecutionRequests: PublicExecutionRequest[], private readonly teardownExecutionRequests: PublicExecutionRequest[], - private readonly nonRevertibleAccumulatedDataFromPrivate: PrivateToPublicAccumulatedData, - private readonly revertibleAccumulatedDataFromPrivate: PrivateToPublicAccumulatedData, + public readonly nonRevertibleAccumulatedDataFromPrivate: PrivateToPublicAccumulatedData, + public readonly revertibleAccumulatedDataFromPrivate: PrivateToPublicAccumulatedData, public trace: PublicEnqueuedCallSideEffectTrace, // FIXME(dbanks12): should be private - private doMerkleOperations: boolean, ) { this.log = createDebugLogger(`aztec:public_tx_context`); this.gasUsed = startGasUsed; @@ -84,23 +84,12 @@ export class PublicTxContext { doMerkleOperations: boolean, ) { const nonRevertibleAccumulatedDataFromPrivate = tx.data.forPublic!.nonRevertibleAccumulatedData; - const revertibleAccumulatedDataFromPrivate = tx.data.forPublic!.revertibleAccumulatedData; - const nonRevertibleNullifiersFromPrivate = nonRevertibleAccumulatedDataFromPrivate.nullifiers.filter( - n => !n.isEmpty(), - ); - const _revertibleNullifiersFromPrivate = revertibleAccumulatedDataFromPrivate.nullifiers.filter(n => !n.isEmpty()); const innerCallTrace = new PublicSideEffectTrace(); - const previousAccumulatedDataArrayLengths = new SideEffectArrayLengths( - /*publicDataReads*/ 0, /*publicDataWrites*/ 0, - /*noteHashReadRequests*/ 0, countAccumulatedItems(nonRevertibleAccumulatedDataFromPrivate.noteHashes), - /*nullifierReadRequests*/ 0, - /*nullifierNonExistentReadRequests*/ 0, countAccumulatedItems(nonRevertibleAccumulatedDataFromPrivate.nullifiers), - /*l1ToL2MsgReadRequests*/ 0, countAccumulatedItems(nonRevertibleAccumulatedDataFromPrivate.l2ToL1Msgs), /*unencryptedLogsHashes*/ 0, ); @@ -111,12 +100,7 @@ export class PublicTxContext { const trace = new DualSideEffectTrace(innerCallTrace, enqueuedCallTrace); // Transaction level state manager that will be forked for revertible phases. - const txStateManager = await AvmPersistableStateManager.newWithPendingSiloedNullifiers( - worldStateDB, - trace, - nonRevertibleNullifiersFromPrivate, - doMerkleOperations, - ); + const txStateManager = await AvmPersistableStateManager.create(worldStateDB, trace, doMerkleOperations); return new PublicTxContext( new PhaseStateManager(txStateManager), @@ -134,7 +118,6 @@ export class PublicTxContext { tx.data.forPublic!.nonRevertibleAccumulatedData, tx.data.forPublic!.revertibleAccumulatedData, enqueuedCallTrace, - doMerkleOperations, ); } @@ -144,6 +127,9 @@ export class PublicTxContext { * Actual transaction fee and actual total consumed gas can now be queried. */ halt() { + if (this.state.isForked()) { + this.state.mergeForkedState(); + } this.halted = true; } @@ -315,6 +301,11 @@ export class PublicTxContext { */ private generateAvmCircuitPublicInputs(endStateReference: StateReference): AvmCircuitPublicInputs { assert(this.halted, 'Can only get AvmCircuitPublicInputs after tx execution ends'); + // TODO(dbanks12): use the state roots from ephemeral trees + endStateReference.partial.nullifierTree.root = this.state + .getActiveStateManager() + .merkleTrees.treeMap.get(MerkleTreeId.NULLIFIER_TREE)! + .getRoot(); return generateAvmCircuitPublicInputs( this.trace, this.globalVariables, @@ -379,16 +370,21 @@ export class PublicTxContext { * so that we can conditionally fork at the start of a phase. * * There is a state manager that lives at the level of the entire transaction, - * but for setup and teardown the active state manager will be a fork of the + * but for app logic and teardown the active state manager will be a fork of the * transaction level one. */ class PhaseStateManager { + private log: DebugLogger; + private currentlyActiveStateManager: AvmPersistableStateManager | undefined; - constructor(private readonly txStateManager: AvmPersistableStateManager) {} + constructor(private readonly txStateManager: AvmPersistableStateManager) { + this.log = createDebugLogger(`aztec:public_phase_state_manager`); + } fork() { assert(!this.currentlyActiveStateManager, 'Cannot fork when already forked'); + this.log.debug(`Forking phase state manager`); this.currentlyActiveStateManager = this.txStateManager.fork(); } @@ -402,12 +398,14 @@ class PhaseStateManager { mergeForkedState() { assert(this.currentlyActiveStateManager, 'No forked state to merge'); + this.log.debug(`Merging in forked state`); this.txStateManager.merge(this.currentlyActiveStateManager!); // Drop the forked state manager now that it is merged this.currentlyActiveStateManager = undefined; } discardForkedState() { + this.log.debug(`Discarding forked state`); assert(this.currentlyActiveStateManager, 'No forked state to discard'); this.txStateManager.reject(this.currentlyActiveStateManager!); // Drop the forked state manager. We don't want it! diff --git a/yarn-project/simulator/src/public/public_tx_simulator.test.ts b/yarn-project/simulator/src/public/public_tx_simulator.test.ts index abd3f9e6671..c17d1d03bf5 100644 --- a/yarn-project/simulator/src/public/public_tx_simulator.test.ts +++ b/yarn-project/simulator/src/public/public_tx_simulator.test.ts @@ -1,13 +1,19 @@ -import { type MerkleTreeWriteOperations, SimulationError, TxExecutionPhase, mockTx } from '@aztec/circuit-types'; +import { + MerkleTreeId, + type MerkleTreeWriteOperations, + SimulationError, + TxExecutionPhase, + mockTx, +} from '@aztec/circuit-types'; import { AppendOnlyTreeSnapshot, - AztecAddress, Fr, Gas, GasFees, GasSettings, GlobalVariables, Header, + NULLIFIER_SUBTREE_HEIGHT, PUBLIC_DATA_TREE_HEIGHT, PartialStateReference, PublicDataWrite, @@ -15,7 +21,7 @@ import { StateReference, countAccumulatedItems, } from '@aztec/circuits.js'; -import { computePublicDataTreeLeafSlot, siloNullifier } from '@aztec/circuits.js/hash'; +import { computePublicDataTreeLeafSlot } from '@aztec/circuits.js/hash'; import { fr } from '@aztec/circuits.js/testing'; import { type AztecKVStore } from '@aztec/kv-store'; import { openTmpStore } from '@aztec/kv-store/utils'; @@ -30,9 +36,11 @@ import { AvmFinalizedCallResult } from '../avm/avm_contract_call_result.js'; import { type AvmPersistableStateManager } from '../avm/journal/journal.js'; import { type InstructionSet } from '../avm/serialization/bytecode_serialization.js'; import { type WorldStateDB } from './public_db_sources.js'; -import { PublicTxSimulator } from './public_tx_simulator.js'; +import { type PublicTxResult, PublicTxSimulator } from './public_tx_simulator.js'; describe('public_tx_simulator', () => { + // Nullifier must be >=128 since tree starts with 128 entries pre-filled + const MIN_NULLIFIER = 128; // Gas settings. const gasFees = GasFees.from({ feePerDaGas: new Fr(2), feePerL2Gas: new Fr(3) }); const gasLimits = Gas.from({ daGas: 100, l2Gas: 150 }); @@ -72,7 +80,8 @@ describe('public_tx_simulator', () => { numberOfAppLogicCalls?: number; hasPublicTeardownCall?: boolean; }) => { - const tx = mockTx(1, { + // seed with min nullifier to prevent insertion of a nullifier < min + const tx = mockTx(/*seed=*/ MIN_NULLIFIER, { numberOfNonRevertiblePublicCallRequests: numberOfSetupCalls, numberOfRevertiblePublicCallRequests: numberOfAppLogicCalls, hasPublicTeardownCallRequest: hasPublicTeardownCall, @@ -128,6 +137,28 @@ describe('public_tx_simulator', () => { } }; + const checkNullifierRoot = async (txResult: PublicTxResult) => { + const siloedNullifiers = txResult.avmProvingRequest.inputs.output.accumulatedData.nullifiers; + // Loop helpful for debugging so you can see root progression + //for (const nullifier of siloedNullifiers) { + // await db.batchInsert( + // MerkleTreeId.NULLIFIER_TREE, + // [nullifier.toBuffer()], + // NULLIFIER_SUBTREE_HEIGHT, + // ); + // console.log(`TESTING Nullifier tree root after insertion ${(await db.getStateReference()).partial.nullifierTree.root}`); + //} + // This is how the public processor inserts nullifiers. + await db.batchInsert( + MerkleTreeId.NULLIFIER_TREE, + siloedNullifiers.map(n => n.toBuffer()), + NULLIFIER_SUBTREE_HEIGHT, + ); + const expectedRoot = (await db.getStateReference()).partial.nullifierTree.root; + const gotRoot = txResult.avmProvingRequest.inputs.output.endTreeSnapshots.nullifierTree.root; + expect(gotRoot).toEqual(expectedRoot); + }; + const expectAvailableGasForCalls = (availableGases: Gas[]) => { expect(simulateInternal).toHaveBeenCalledTimes(availableGases.length); availableGases.forEach((availableGas, i) => { @@ -179,6 +210,7 @@ describe('public_tx_simulator', () => { new NoopTelemetryClient(), GlobalVariables.from({ ...GlobalVariables.empty(), gasFees }), /*realAvmProvingRequest=*/ false, + /*doMerkleOperations=*/ true, ); // Mock the internal private function. Borrowed from https://stackoverflow.com/a/71033167 @@ -420,7 +452,8 @@ describe('public_tx_simulator', () => { }); it('fails a transaction that reverts in setup', async function () { - const tx = mockTx(1, { + // seed with min nullifier to prevent insertion of a nullifier < min + const tx = mockTx(/*seed=*/ MIN_NULLIFIER, { numberOfNonRevertiblePublicCallRequests: 1, numberOfRevertiblePublicCallRequests: 1, hasPublicTeardownCallRequest: true, @@ -451,24 +484,24 @@ describe('public_tx_simulator', () => { const appLogicFailure = new SimulationError('Simulation Failed in app logic', []); - const contractAddress = AztecAddress.fromBigInt(112233n); + const siloedNullifiers = [new Fr(10000), new Fr(20000), new Fr(30000), new Fr(40000), new Fr(50000)]; mockPublicExecutor([ // SETUP async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(1)); + await stateManager.writeSiloedNullifier(siloedNullifiers[0]); }, // APP LOGIC async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(2)); - await stateManager.writeNullifier(contractAddress, new Fr(3)); + await stateManager.writeSiloedNullifier(siloedNullifiers[1]); + await stateManager.writeSiloedNullifier(siloedNullifiers[2]); }, async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(4)); + await stateManager.writeSiloedNullifier(siloedNullifiers[3]); return Promise.resolve(appLogicFailure); }, // TEARDOWN async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(5)); + await stateManager.writeSiloedNullifier(siloedNullifiers[4]); }, ]); @@ -512,12 +545,17 @@ describe('public_tx_simulator', () => { // we keep the non-revertible data. expect(countAccumulatedItems(output.accumulatedData.nullifiers)).toBe(4); - expect(output.accumulatedData.nullifiers.slice(0, 4)).toEqual([ - new Fr(7777), - new Fr(8888), - siloNullifier(contractAddress, new Fr(1)), - siloNullifier(contractAddress, new Fr(5)), - ]); + const includedSiloedNullifiers = [ + ...tx.data.forPublic!.nonRevertibleAccumulatedData.nullifiers.filter(n => !n.isZero()), + siloedNullifiers[0], + // dropped revertibles and app logic + //...tx.data.forPublic!.revertibleAccumulatedData.nullifiers.filter(n => !n.isZero()), + //..siloedNullifiers[1...3] + // teardown + siloedNullifiers[4], + ]; + expect(output.accumulatedData.nullifiers.filter(n => !n.isZero())).toEqual(includedSiloedNullifiers); + await checkNullifierRoot(txResult); }); it('includes a transaction that reverts in teardown only', async function () { @@ -529,23 +567,23 @@ describe('public_tx_simulator', () => { const teardownFailure = new SimulationError('Simulation Failed in teardown', []); - const contractAddress = AztecAddress.fromBigInt(112233n); + const siloedNullifiers = [new Fr(10000), new Fr(20000), new Fr(30000), new Fr(40000), new Fr(50000)]; mockPublicExecutor([ // SETUP async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(1)); + await stateManager.writeSiloedNullifier(siloedNullifiers[0]); }, // APP LOGIC async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(2)); - await stateManager.writeNullifier(contractAddress, new Fr(3)); + await stateManager.writeSiloedNullifier(siloedNullifiers[1]); + await stateManager.writeSiloedNullifier(siloedNullifiers[2]); }, async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(4)); + await stateManager.writeSiloedNullifier(siloedNullifiers[3]); }, // TEARDOWN async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(5)); + await stateManager.writeSiloedNullifier(siloedNullifiers[4]); return Promise.resolve(teardownFailure); }, ]); @@ -589,16 +627,15 @@ describe('public_tx_simulator', () => { // We keep the non-revertible data. expect(countAccumulatedItems(output.accumulatedData.nullifiers)).toBe(3); - expect(output.accumulatedData.nullifiers.slice(0, 3)).toEqual([ - new Fr(7777), - new Fr(8888), - // new Fr(9999), // TODO: Data in app logic should be kept if teardown reverts. - siloNullifier(contractAddress, new Fr(1)), - // siloNullifier(contractAddress, new Fr(2)), - // siloNullifier(contractAddress, new Fr(3)), - // siloNullifier(contractAddress, new Fr(4)), - // siloNullifier(contractAddress, new Fr(5)), - ]); + const includedSiloedNullifiers = [ + ...tx.data.forPublic!.nonRevertibleAccumulatedData.nullifiers.filter(n => !n.isZero()), + siloedNullifiers[0], + // dropped + //...tx.data.forPublic!.revertibleAccumulatedData.nullifiers.filter(n => !n.isZero()), + //..siloedNullifiers[1...4] + ]; + expect(output.accumulatedData.nullifiers.filter(n => !n.isZero())).toEqual(includedSiloedNullifiers); + await checkNullifierRoot(txResult); }); it('includes a transaction that reverts in app logic and teardown', async function () { @@ -610,24 +647,24 @@ describe('public_tx_simulator', () => { const appLogicFailure = new SimulationError('Simulation Failed in app logic', []); const teardownFailure = new SimulationError('Simulation Failed in teardown', []); - const contractAddress = AztecAddress.fromBigInt(112233n); + const siloedNullifiers = [new Fr(10000), new Fr(20000), new Fr(30000), new Fr(40000), new Fr(50000)]; mockPublicExecutor([ // SETUP async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(1)); + await stateManager.writeSiloedNullifier(siloedNullifiers[0]); }, // APP LOGIC async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(2)); - await stateManager.writeNullifier(contractAddress, new Fr(3)); + await stateManager.writeSiloedNullifier(siloedNullifiers[1]); + await stateManager.writeSiloedNullifier(siloedNullifiers[2]); }, async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(4)); + await stateManager.writeSiloedNullifier(siloedNullifiers[3]); return Promise.resolve(appLogicFailure); }, // TEARDOWN async (stateManager: AvmPersistableStateManager) => { - await stateManager.writeNullifier(contractAddress, new Fr(5)); + await stateManager.writeSiloedNullifier(siloedNullifiers[4]); return Promise.resolve(teardownFailure); }, ]); @@ -673,10 +710,47 @@ describe('public_tx_simulator', () => { // we keep the non-revertible data expect(countAccumulatedItems(output.accumulatedData.nullifiers)).toBe(3); - expect(output.accumulatedData.nullifiers.slice(0, 3)).toEqual([ - new Fr(7777), - new Fr(8888), - siloNullifier(contractAddress, new Fr(1)), + const includedSiloedNullifiers = [ + ...tx.data.forPublic!.nonRevertibleAccumulatedData.nullifiers.filter(n => !n.isZero()), + siloedNullifiers[0], + // dropped revertibles and app logic + //...tx.data.forPublic!.revertibleAccumulatedData.nullifiers.filter(n => !n.isZero()), + //..siloedNullifiers[1...4] + ]; + expect(output.accumulatedData.nullifiers.filter(n => !n.isZero())).toEqual(includedSiloedNullifiers); + await checkNullifierRoot(txResult); + }); + + it('nullifier tree root is right', async function () { + const tx = mockTxWithPublicCalls({ + numberOfSetupCalls: 1, + numberOfAppLogicCalls: 2, + hasPublicTeardownCall: true, + }); + + const siloedNullifiers = [new Fr(10000), new Fr(20000), new Fr(30000), new Fr(40000), new Fr(50000)]; + + mockPublicExecutor([ + // SETUP + async (stateManager: AvmPersistableStateManager) => { + await stateManager.writeSiloedNullifier(siloedNullifiers[0]); + }, + // APP LOGIC + async (stateManager: AvmPersistableStateManager) => { + await stateManager.writeSiloedNullifier(siloedNullifiers[1]); + await stateManager.writeSiloedNullifier(siloedNullifiers[2]); + }, + async (stateManager: AvmPersistableStateManager) => { + await stateManager.writeSiloedNullifier(siloedNullifiers[3]); + }, + // TEARDOWN + async (stateManager: AvmPersistableStateManager) => { + await stateManager.writeSiloedNullifier(siloedNullifiers[4]); + }, ]); + + const txResult = await simulator.simulate(tx); + + await checkNullifierRoot(txResult); }); }); diff --git a/yarn-project/simulator/src/public/public_tx_simulator.ts b/yarn-project/simulator/src/public/public_tx_simulator.ts index 37bf5245c42..7cf250cc1d5 100644 --- a/yarn-project/simulator/src/public/public_tx_simulator.ts +++ b/yarn-project/simulator/src/public/public_tx_simulator.ts @@ -22,8 +22,11 @@ import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log'; import { Timer } from '@aztec/foundation/timer'; import { Attributes, type TelemetryClient, type Tracer, trackSpan } from '@aztec/telemetry-client'; +import { strict as assert } from 'assert'; + import { type AvmFinalizedCallResult } from '../avm/avm_contract_call_result.js'; import { type AvmPersistableStateManager, AvmSimulator } from '../avm/index.js'; +import { NullifierCollisionError } from '../avm/journal/nullifiers.js'; import { getPublicFunctionDebugName } from '../common/debug_fn_name.js'; import { ExecutorMetrics } from './executor_metrics.js'; import { type WorldStateDB } from './public_db_sources.js'; @@ -92,11 +95,14 @@ export class PublicTxSimulator { // FIXME: we shouldn't need to directly modify worldStateDb here! await this.worldStateDB.addNewContracts(tx); + await this.insertNonRevertiblesFromPrivate(context); const processedPhases: ProcessedPhase[] = []; if (context.hasPhase(TxExecutionPhase.SETUP)) { const setupResult: ProcessedPhase = await this.simulateSetupPhase(context); processedPhases.push(setupResult); } + + await this.insertRevertiblesFromPrivate(context); if (context.hasPhase(TxExecutionPhase.APP_LOGIC)) { const appLogicResult: ProcessedPhase = await this.simulateAppLogicPhase(context); processedPhases.push(appLogicResult); @@ -152,9 +158,7 @@ export class PublicTxSimulator { * @returns The phase result. */ private async simulateAppLogicPhase(context: PublicTxContext): Promise { - // Fork the state manager so that we can rollback state if app logic or teardown reverts. - // Don't need to fork for setup since it's non-revertible (if setup fails, transaction is thrown out). - context.state.fork(); + assert(context.state.isForked(), 'App logic phase should operate with forked state.'); const result = await this.simulatePhase(TxExecutionPhase.APP_LOGIC, context); @@ -178,7 +182,7 @@ export class PublicTxSimulator { */ private async simulateTeardownPhase(context: PublicTxContext): Promise { if (!context.state.isForked()) { - // If state isn't forked (app logic was empty or reverted), fork now + // If state isn't forked (app logic reverted), fork now // so we can rollback to the end of setup if teardown reverts. context.state.fork(); } @@ -376,4 +380,39 @@ export class PublicTxSimulator { return result; } + + /** + * Insert the non-revertible accumulated data from private into the public state. + */ + public async insertNonRevertiblesFromPrivate(context: PublicTxContext) { + const stateManager = context.state.getActiveStateManager(); + try { + await stateManager.writeSiloedNullifiersFromPrivate(context.nonRevertibleAccumulatedDataFromPrivate.nullifiers); + } catch (e) { + if (e instanceof NullifierCollisionError) { + throw new NullifierCollisionError( + `Nullifier collision encountered when inserting non-revertible nullifiers from private.\nDetails: ${e.message}\n.Stack:${e.stack}`, + ); + } + } + } + + /** + * Insert the revertible accumulated data from private into the public state. + * Start by forking state so we can rollback to the end of setup if app logic or teardown reverts. + */ + public async insertRevertiblesFromPrivate(context: PublicTxContext) { + // Fork the state manager so we can rollback to end of setup if app logic reverts. + context.state.fork(); + const stateManager = context.state.getActiveStateManager(); + try { + await stateManager.writeSiloedNullifiersFromPrivate(context.revertibleAccumulatedDataFromPrivate.nullifiers); + } catch (e) { + if (e instanceof NullifierCollisionError) { + throw new NullifierCollisionError( + `Nullifier collision encountered when inserting revertible nullifiers from private. Details:\n${e.message}\n.Stack:${e.stack}`, + ); + } + } + } } diff --git a/yarn-project/simulator/src/public/side_effect_trace.test.ts b/yarn-project/simulator/src/public/side_effect_trace.test.ts index ccbcea0267c..7d7e024e967 100644 --- a/yarn-project/simulator/src/public/side_effect_trace.test.ts +++ b/yarn-project/simulator/src/public/side_effect_trace.test.ts @@ -140,7 +140,7 @@ describe('Side Effect Trace', () => { it('Should trace nullifier checks', () => { const exists = true; const lowLeafPreimage = new NullifierLeafPreimage(utxo, Fr.ZERO, 0n); - trace.traceNullifierCheck(address, utxo, exists, lowLeafPreimage, Fr.ZERO, []); + trace.traceNullifierCheck(utxo, exists, lowLeafPreimage, Fr.ZERO, []); expect(trace.getCounter()).toBe(startCounterPlus1); const pxResult = toPxResult(trace); @@ -157,7 +157,7 @@ describe('Side Effect Trace', () => { it('Should trace non-existent nullifier checks', () => { const exists = false; const lowLeafPreimage = new NullifierLeafPreimage(utxo, Fr.ZERO, 0n); - trace.traceNullifierCheck(address, utxo, exists, lowLeafPreimage, Fr.ZERO, []); + trace.traceNullifierCheck(utxo, exists, lowLeafPreimage, Fr.ZERO, []); expect(trace.getCounter()).toBe(startCounterPlus1); const pxResult = toPxResult(trace); @@ -173,7 +173,7 @@ describe('Side Effect Trace', () => { it('Should trace nullifiers', () => { const lowLeafPreimage = new NullifierLeafPreimage(utxo, Fr.ZERO, 0n); - trace.traceNewNullifier(address, utxo, lowLeafPreimage, Fr.ZERO, [], []); + trace.traceNewNullifier(utxo, lowLeafPreimage, Fr.ZERO, [], []); expect(trace.getCounter()).toBe(startCounterPlus1); const pxResult = toPxResult(trace); @@ -301,42 +301,42 @@ describe('Side Effect Trace', () => { it('Should enforce maximum number of nullifier checks', () => { for (let i = 0; i < MAX_NULLIFIER_READ_REQUESTS_PER_TX; i++) { const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); + trace.traceNullifierCheck(new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); } const lowLeafPreimage = new NullifierLeafPreimage(new Fr(41), Fr.ZERO, 0n); - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), true, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); + expect(() => trace.traceNullifierCheck(new Fr(42), true, lowLeafPreimage, Fr.ZERO, [])).toThrow( + SideEffectLimitReachedError, + ); // NOTE: also cannot do a non-existent check once existent checks have filled up - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), false, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); + expect(() => trace.traceNullifierCheck(new Fr(42), false, lowLeafPreimage, Fr.ZERO, [])).toThrow( + SideEffectLimitReachedError, + ); }); it('Should enforce maximum number of nullifier non-existent checks', () => { for (let i = 0; i < MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX; i++) { const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); + trace.traceNullifierCheck(new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); } const lowLeafPreimage = new NullifierLeafPreimage(new Fr(41), Fr.ZERO, 0n); - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), false, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); + expect(() => trace.traceNullifierCheck(new Fr(42), false, lowLeafPreimage, Fr.ZERO, [])).toThrow( + SideEffectLimitReachedError, + ); // NOTE: also cannot do a existent check once non-existent checks have filled up - expect(() => - trace.traceNullifierCheck(AztecAddress.fromNumber(42), new Fr(42), true, lowLeafPreimage, Fr.ZERO, []), - ).toThrow(SideEffectLimitReachedError); + expect(() => trace.traceNullifierCheck(new Fr(42), true, lowLeafPreimage, Fr.ZERO, [])).toThrow( + SideEffectLimitReachedError, + ); }); it('Should enforce maximum number of new nullifiers', () => { for (let i = 0; i < MAX_NULLIFIERS_PER_TX; i++) { const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i + 1), Fr.ZERO, 0n); - trace.traceNewNullifier(AztecAddress.fromNumber(i), new Fr(i), lowLeafPreimage, Fr.ZERO, [], []); + trace.traceNewNullifier(new Fr(i), lowLeafPreimage, Fr.ZERO, [], []); } const lowLeafPreimage = new NullifierLeafPreimage(new Fr(41), Fr.ZERO, 0n); - expect(() => - trace.traceNewNullifier(AztecAddress.fromNumber(42), new Fr(42), lowLeafPreimage, Fr.ZERO, [], []), - ).toThrow(SideEffectLimitReachedError); + expect(() => trace.traceNewNullifier(new Fr(42), lowLeafPreimage, Fr.ZERO, [], [])).toThrow( + SideEffectLimitReachedError, + ); }); it('Should enforce maximum number of L1 to L2 message checks', () => { @@ -369,7 +369,7 @@ describe('Side Effect Trace', () => { it('Should enforce maximum number of nullifier checks for GETCONTRACTINSTANCE', () => { for (let i = 0; i < MAX_NULLIFIER_READ_REQUESTS_PER_TX; i++) { const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); + trace.traceNullifierCheck(new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); } expect(() => trace.traceGetContractInstance(address, /*exists=*/ true, contractInstance)).toThrow( SideEffectLimitReachedError, @@ -383,7 +383,7 @@ describe('Side Effect Trace', () => { it('Should enforce maximum number of nullifier non-existent checks for GETCONTRACTINSTANCE', () => { for (let i = 0; i < MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_TX; i++) { const lowLeafPreimage = new NullifierLeafPreimage(new Fr(i), Fr.ZERO, 0n); - trace.traceNullifierCheck(AztecAddress.fromNumber(i), new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); + trace.traceNullifierCheck(new Fr(i + 1), true, lowLeafPreimage, Fr.ZERO, []); } expect(() => trace.traceGetContractInstance(address, /*exists=*/ false, contractInstance)).toThrow( SideEffectLimitReachedError, @@ -410,11 +410,11 @@ describe('Side Effect Trace', () => { // counter does not increment for note hash checks nestedTrace.traceNewNoteHash(address, utxo, Fr.ZERO, []); testCounter++; - nestedTrace.traceNullifierCheck(address, utxo, true, lowLeafPreimage, Fr.ZERO, []); + nestedTrace.traceNullifierCheck(utxo, true, lowLeafPreimage, Fr.ZERO, []); testCounter++; - nestedTrace.traceNullifierCheck(address, utxo, true, lowLeafPreimage, Fr.ZERO, []); + nestedTrace.traceNullifierCheck(utxo, true, lowLeafPreimage, Fr.ZERO, []); testCounter++; - nestedTrace.traceNewNullifier(address, utxo, lowLeafPreimage, Fr.ZERO, [], []); + nestedTrace.traceNewNullifier(utxo, lowLeafPreimage, Fr.ZERO, [], []); testCounter++; nestedTrace.traceL1ToL2MessageCheck(address, utxo, leafIndex, existsDefault, []); // counter does not increment for l1tol2 message checks diff --git a/yarn-project/simulator/src/public/side_effect_trace.ts b/yarn-project/simulator/src/public/side_effect_trace.ts index dcb86a1850d..474e3ff155d 100644 --- a/yarn-project/simulator/src/public/side_effect_trace.ts +++ b/yarn-project/simulator/src/public/side_effect_trace.ts @@ -215,8 +215,7 @@ export class PublicSideEffectTrace implements PublicSideEffectTraceInterface { } public traceNullifierCheck( - _contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, exists: boolean, lowLeafPreimage: NullifierLeafPreimage = NullifierLeafPreimage.empty(), lowLeafIndex: Fr = Fr.zero(), @@ -227,7 +226,7 @@ export class PublicSideEffectTrace implements PublicSideEffectTraceInterface { this.enforceLimitOnNullifierChecks(); - const readRequest = new ReadRequest(nullifier, this.sideEffectCounter); + const readRequest = new ReadRequest(siloedNullifier, this.sideEffectCounter); if (exists) { this.nullifierReadRequests.push(readRequest); } else { @@ -246,8 +245,7 @@ export class PublicSideEffectTrace implements PublicSideEffectTraceInterface { } public traceNewNullifier( - _contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, lowLeafPreimage: NullifierLeafPreimage = NullifierLeafPreimage.empty(), lowLeafIndex: Fr = Fr.zero(), lowLeafPath: Fr[] = emptyNullifierPath(), @@ -257,7 +255,8 @@ export class PublicSideEffectTrace implements PublicSideEffectTraceInterface { if (this.nullifiers.length >= MAX_NULLIFIERS_PER_TX) { throw new SideEffectLimitReachedError('nullifier', MAX_NULLIFIERS_PER_TX); } - this.nullifiers.push(new Nullifier(nullifier, this.sideEffectCounter, /*noteHash=*/ Fr.ZERO)); + // this will be wrong for siloedNullifier + this.nullifiers.push(new Nullifier(siloedNullifier, this.sideEffectCounter, /*noteHash=*/ Fr.ZERO)); // New hinting const lowLeafReadHint = new AvmNullifierReadTreeHint(lowLeafPreimage, lowLeafIndex, lowLeafPath); this.avmCircuitHints.nullifierWriteHints.items.push(new AvmNullifierWriteTreeHint(lowLeafReadHint, insertionPath)); diff --git a/yarn-project/simulator/src/public/side_effect_trace_interface.ts b/yarn-project/simulator/src/public/side_effect_trace_interface.ts index 98bdd9f809e..06a1c6eb563 100644 --- a/yarn-project/simulator/src/public/side_effect_trace_interface.ts +++ b/yarn-project/simulator/src/public/side_effect_trace_interface.ts @@ -40,16 +40,14 @@ export interface PublicSideEffectTraceInterface { traceNoteHashCheck(contractAddress: AztecAddress, noteHash: Fr, leafIndex: Fr, exists: boolean, path?: Fr[]): void; traceNewNoteHash(contractAddress: AztecAddress, noteHash: Fr, leafIndex?: Fr, path?: Fr[]): void; traceNullifierCheck( - contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, exists: boolean, lowLeafPreimage?: NullifierLeafPreimage, lowLeafIndex?: Fr, lowLeafPath?: Fr[], ): void; traceNewNullifier( - contractAddress: AztecAddress, - nullifier: Fr, + siloedNullifier: Fr, lowLeafPreimage?: NullifierLeafPreimage, lowLeafIndex?: Fr, lowLeafPath?: Fr[], diff --git a/yarn-project/simulator/src/public/transitional_adapters.ts b/yarn-project/simulator/src/public/transitional_adapters.ts index a088f5fa000..63470f6fc18 100644 --- a/yarn-project/simulator/src/public/transitional_adapters.ts +++ b/yarn-project/simulator/src/public/transitional_adapters.ts @@ -20,7 +20,6 @@ import { MAX_NOTE_HASHES_PER_TX, MAX_NOTE_HASH_READ_REQUESTS_PER_CALL, MAX_NULLIFIERS_PER_CALL, - MAX_NULLIFIERS_PER_TX, MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_CALL, MAX_NULLIFIER_READ_REQUESTS_PER_CALL, MAX_PUBLIC_DATA_READS_PER_CALL, @@ -146,16 +145,6 @@ export function generateAvmCircuitPublicInputs( } } - const nullifiersFromPrivate = revertCode.isOK() - ? mergeAccumulatedData( - avmCircuitPublicInputs.previousNonRevertibleAccumulatedData.nullifiers, - avmCircuitPublicInputs.previousRevertibleAccumulatedData.nullifiers, - ) - : avmCircuitPublicInputs.previousNonRevertibleAccumulatedData.nullifiers; - avmCircuitPublicInputs.accumulatedData.nullifiers = assertLength( - mergeAccumulatedData(nullifiersFromPrivate, avmCircuitPublicInputs.accumulatedData.nullifiers), - MAX_NULLIFIERS_PER_TX, - ); const msgsFromPrivate = revertCode.isOK() ? mergeAccumulatedData( avmCircuitPublicInputs.previousNonRevertibleAccumulatedData.l2ToL1Msgs, diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 97f9a929814..d18593fcbf0 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -173,7 +173,7 @@ describe('NativeWorldState', () => { beforeEach(async () => { ws = await NativeWorldStateService.new(EthAddress.random(), dataDir, defaultDBMapSize); - }); + }, 30_000); afterEach(async () => { await ws.close(); diff --git a/yarn-project/world-state/src/test/integration.test.ts b/yarn-project/world-state/src/test/integration.test.ts index 0d6afa61ede..52fde9a94ae 100644 --- a/yarn-project/world-state/src/test/integration.test.ts +++ b/yarn-project/world-state/src/test/integration.test.ts @@ -55,7 +55,7 @@ describe('world-state integration', () => { db = (await createWorldState(config)) as NativeWorldStateService; synchronizer = new TestWorldStateSynchronizer(db, archiver, config, new NoopTelemetryClient()); log.info(`Created synchronizer`); - }); + }, 30_000); afterEach(async () => { await synchronizer.stop(); diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index d80df020866..0e29df8b664 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -323,6 +323,7 @@ __metadata: commander: ^10.0.1 debug: ^4.3.4 fflate: ^0.8.0 + pako: ^2.1.0 tslib: ^2.4.0 bin: bb.js: ./dest/node/main.js