diff --git a/.circleci/config.yml b/.circleci/config.yml index cd068b43e2f2..2abd7304e4ff 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -438,11 +438,33 @@ jobs: - run: name: "Build and test" # We need to force not to use docker buildkit because for some reason on arm only, it ends up making a call - # out to eu-west2 despite the image being locally tagged, resulting in unauthorised 401. Weird docker bug? + # out to eu-west2 despite the image being locally tagged, resulting in unauthorized 401. Weird docker bug? command: | echo "export DOCKER_BUILDKIT=" > $BASH_ENV build aztec-sandbox false arm64 + aztec-p2p-bootstrap: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: large + steps: + - *checkout + - *setup_env + - run: + name: "Build and test" + command: build p2p-bootstrap | add_timestamps + + aztec-node: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: large + steps: + - *checkout + - *setup_env + - run: + name: "Build and test" + command: build aztec-node | add_timestamps + pxe-x86_64: machine: image: ubuntu-2204:2023.07.2 @@ -464,7 +486,7 @@ jobs: - run: name: "Build and test" # We need to force not to use docker buildkit because for some reason on arm only, it ends up making a call - # out to eu-west2 despite the image being locally tagged, resulting in unauthorised 401. Weird docker bug? + # out to eu-west2 despite the image being locally tagged, resulting in unauthorized 401. Weird docker bug? command: | echo "export DOCKER_BUILDKIT=" > $BASH_ENV build pxe false arm64 @@ -504,7 +526,7 @@ jobs: name: "Test via adhoc script" command: ./yarn-project/boxes/blank/run_tests - boxes-private-token: + boxes-token: machine: image: ubuntu-2204:2023.07.2 resource_class: large @@ -513,7 +535,7 @@ jobs: - *setup_env - run: name: "Test via adhoc script" - command: ./yarn-project/boxes/private-token/run_tests + command: ./yarn-project/boxes/token/run_tests canary: machine: @@ -609,18 +631,6 @@ jobs: command: cond_run_script end-to-end ./scripts/run_tests_local e2e_private_airdrop.test.ts environment: { DEBUG: "aztec:*" } - e2e-private-token-contract: - machine: - image: ubuntu-2204:2023.07.2 - resource_class: large - steps: - - *checkout - - *setup_env - - run: - name: "Test" - command: cond_run_script end-to-end ./scripts/run_tests_local e2e_private_token_contract.test.ts - environment: { DEBUG: "aztec:*" } - e2e-sandbox-example: machine: image: ubuntu-2204:2023.07.2 @@ -955,9 +965,20 @@ jobs: name: "Benchmark" command: cond_run_script end-to-end ./scripts/run_tests_local benchmarks/bench_publish_rollup.test.ts environment: - { - DEBUG: "aztec:benchmarks:*,aztec:sequencer,aztec:world_state,aztec:merkle_trees", - } + DEBUG: "aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" + + bench-process-history: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: large + steps: + - *checkout + - *setup_env + - run: + name: "Benchmark" + command: cond_run_script end-to-end ./scripts/run_tests_local benchmarks/bench_process_history.test.ts + environment: + DEBUG: "aztec:benchmarks:*,aztec:sequencer,aztec:sequencer:*,aztec:world_state,aztec:merkle_trees" build-docs: machine: @@ -991,6 +1012,15 @@ jobs: name: "Noop" command: echo Noop + canary-end: + docker: + - image: cimg/base:2023.09 + resource_class: small + steps: + - run: + name: "Noop" + command: echo Noop + bench-summary: machine: image: ubuntu-2204:2023.07.2 @@ -1001,6 +1031,19 @@ jobs: name: "Assemble benchmark summary from uploaded logs" command: ./scripts/ci/assemble_e2e_benchmark.sh + deploy-npm-canary: + # Deploys next version under 'canary' dist tag. + machine: + image: ubuntu-2204:2023.07.2 + resource_class: medium + steps: + - *checkout + - *setup_env + # Aztec.js and dependencies + - run: + name: "yarn-project" + command: yarn-project/deploy_npm.sh canary + deploy-npm: machine: image: ubuntu-2204:2023.07.2 @@ -1011,9 +1054,9 @@ jobs: # Aztec.js and dependencies - run: name: "yarn-project" - command: yarn-project/deploy_npm.sh + command: yarn-project/deploy_npm.sh latest - deploy-dockerhub: + deploy-dockerhub-canary: machine: image: ubuntu-2204:2023.07.2 resource_class: medium @@ -1022,16 +1065,29 @@ jobs: - *setup_env - run: name: "Deploy to dockerhub" - command: yarn-project/deploy_dockerhub.sh + command: yarn-project/deploy_dockerhub.sh canary - deploy-end: - docker: - - image: cimg/base:2023.09 - resource_class: small + deploy-ecr: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: medium + steps: + - *checkout + - *setup_env + - run: + name: "yarn-project" + command: yarn-project/deploy_ecr.sh + + deploy-dockerhub: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: medium steps: + - *checkout + - *setup_env - run: - name: "Noop" - command: echo Noop + name: "Deploy to dockerhub" + command: yarn-project/deploy_dockerhub.sh build-deployment-canary: machine: @@ -1044,7 +1100,7 @@ jobs: name: "Build" command: build canary true - run-deployment-canary-uniswap: + canary-uniswap-test: machine: image: ubuntu-2204:2023.07.2 resource_class: large @@ -1055,7 +1111,7 @@ jobs: name: "Test" command: run_script canary ./scripts/run_tests ./src/uniswap_trade_on_l1_from_l2.test.ts canary ./scripts/docker-compose.yml - run-deployment-canary-browser: + canary-browser-test: machine: image: ubuntu-2204:2023.07.2 resource_class: large @@ -1066,7 +1122,7 @@ jobs: name: "Test" command: run_script canary ./scripts/run_tests ./src/aztec_js_browser.test.ts canary ./scripts/docker-compose-browser.yml - run-deployment-canary-cli: + canary-cli-test: machine: image: ubuntu-2204:2023.07.2 resource_class: large @@ -1233,6 +1289,16 @@ workflows: - yarn-project <<: *defaults + - aztec-p2p-bootstrap: + requires: + - yarn-project + <<: *defaults + + - aztec-node: + requires: + - yarn-project + <<: *defaults + - pxe-x86_64: requires: - yarn-project @@ -1260,7 +1326,7 @@ workflows: - aztec-sandbox-x86_64 <<: *defaults - - boxes-private-token: + - boxes-token: requires: - aztec-sandbox-x86_64 <<: *defaults @@ -1277,7 +1343,6 @@ workflows: - e2e-lending-contract: *e2e_test - e2e-token-contract: *e2e_test - e2e-private-airdrop: *e2e_test - - e2e-private-token-contract: *e2e_test - e2e-sandbox-example: *e2e_test - e2e-multi-transfer-contract: *e2e_test - e2e-block-building: *e2e_test @@ -1307,6 +1372,7 @@ workflows: - guides-sample-dapp: *e2e_test - guides-up-quick-start: *e2e_test - bench-publish-rollup: *e2e_test + - bench-process-history: *e2e_test - e2e-end: requires: @@ -1315,7 +1381,6 @@ workflows: - e2e-lending-contract - e2e-token-contract - e2e-private-airdrop - - e2e-private-token-contract - e2e-sandbox-example - e2e-multi-transfer-contract - e2e-block-building @@ -1344,46 +1409,64 @@ workflows: - guides-dapp-testing - guides-sample-dapp - guides-up-quick-start - - bench-publish-rollup <<: *defaults - bench-summary: requires: - e2e-end + - bench-publish-rollup + - bench-process-history <<: *defaults - # Deployment and Canary tests - - deploy-dockerhub: + # Deploy under canary tag + - deploy-npm-canary: requires: - e2e-end <<: *deploy_defaults - - deploy-npm: + - deploy-dockerhub-canary: requires: - e2e-end <<: *deploy_defaults - - - deploy-end: + - deploy-ecr: requires: - - deploy-dockerhub - - deploy-npm + - e2e-end <<: *deploy_defaults - build-deployment-canary: requires: - - deploy-end + - deploy-npm-canary + - deploy-dockerhub-canary <<: *deploy_defaults - - run-deployment-canary-uniswap: + # Run canary tests + - canary-uniswap-test: requires: - build-deployment-canary <<: *deploy_defaults - - run-deployment-canary-browser: + - canary-browser-test: requires: - build-deployment-canary <<: *deploy_defaults - - run-deployment-canary-cli: + - canary-cli-test: requires: - build-deployment-canary <<: *deploy_defaults + + - canary-end: + requires: + - canary-uniswap-test + - canary-browser-test + - canary-cli-test + <<: *deploy_defaults + + # Production deployment + - deploy-dockerhub: + requires: + - canary-end + <<: *deploy_defaults + - deploy-npm: + requires: + - canary-end + <<: *deploy_defaults diff --git a/.gitignore b/.gitignore index 9115ec7782a8..46c867ea4814 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,5 @@ node_modules build/ .idea cmake-build-debug -.terraform +.terraform* .bootstrapped diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f06a978eb956..7f0596d898c6 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,5 +1,5 @@ { - ".": "0.8.7", - "barretenberg": "0.8.7", - "barretenberg/ts": "0.8.7" + ".": "0.8.14", + "barretenberg": "0.8.14", + "barretenberg/ts": "0.8.14" } diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 1d9e5d4870bf..988ff79b4751 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -36,6 +36,8 @@ "IBM.output-colorizer", // Displays code coverage report information within vscode "ryanluker.vscode-coverage-gutters", + // Spell checking + "streetsidesoftware.code-spell-checker", // End C++/Circuits extensions /////////////////////////////////////// ], diff --git a/CHANGELOG.md b/CHANGELOG.md index 72265a128993..88fcf8572174 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,182 @@ # Changelog +## [0.8.14](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.13...aztec-packages-v0.8.14) (2023-10-13) + + +### Bug Fixes + +* Deploy_defaults for canary-end ([#2854](https://github.com/AztecProtocol/aztec-packages/issues/2854)) ([7b189a8](https://github.com/AztecProtocol/aztec-packages/commit/7b189a83114a4206da425c375a77542af0b7df48)) +* **docker:** Use entrypoint for mult line commands in docker ([#2853](https://github.com/AztecProtocol/aztec-packages/issues/2853)) ([ab99cd0](https://github.com/AztecProtocol/aztec-packages/commit/ab99cd0f0731b7951286ae2a1667a73f1d406a1a)) + +## [0.8.13](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.12...aztec-packages-v0.8.13) (2023-10-13) + + +### Features + +* Add deployed contract to PXE from CLI ([#2850](https://github.com/AztecProtocol/aztec-packages/issues/2850)) ([5bad3e3](https://github.com/AztecProtocol/aztec-packages/commit/5bad3e344ee5842d86aebe443bb001e27d1e735b)) +* **docs_tutorials:** Token Portal & Uniswap Tutorial ([#2726](https://github.com/AztecProtocol/aztec-packages/issues/2726)) ([dbef55f](https://github.com/AztecProtocol/aztec-packages/commit/dbef55fc63a296e720e270616b8ae7bd642b8a28)) + + +### Bug Fixes + +* Added registry contract address to node terraform ([#2851](https://github.com/AztecProtocol/aztec-packages/issues/2851)) ([bfc5feb](https://github.com/AztecProtocol/aztec-packages/commit/bfc5feb1bad76a5a1a4c7deb5ecd674f9ab42a9b)) +* Create canary dockerhub manifest ([#2849](https://github.com/AztecProtocol/aztec-packages/issues/2849)) ([1d7bd26](https://github.com/AztecProtocol/aztec-packages/commit/1d7bd26874af4f3c608ce707c81b844e929cc742)) +* Fix check_circuit in goblin translator (resulted in flimsy test) ([#2827](https://github.com/AztecProtocol/aztec-packages/issues/2827)) ([98b1679](https://github.com/AztecProtocol/aztec-packages/commit/98b16793b0e84360af8dc70934636d11d7bc7e29)) + +## [0.8.12](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.11...aztec-packages-v0.8.12) (2023-10-13) + + +### Features + +* Private token box upgrade to new Token contract ([#2824](https://github.com/AztecProtocol/aztec-packages/issues/2824)) ([22794a5](https://github.com/AztecProtocol/aztec-packages/commit/22794a57bbf45cac72dd69bf9838c63b240e6e22)) +* Use privacy consistently ([#2833](https://github.com/AztecProtocol/aztec-packages/issues/2833)) ([89b9b6a](https://github.com/AztecProtocol/aztec-packages/commit/89b9b6ac6eeed10484a4c0892d43d9374864ee1d)) + + +### Bug Fixes + +* Copied box nargo toml trailing slash ([#2819](https://github.com/AztecProtocol/aztec-packages/issues/2819)) ([ecd2a64](https://github.com/AztecProtocol/aztec-packages/commit/ecd2a64a517e34ada4770d26e6d7f9c578ee82aa)) +* Fix rebuild pattern slashes. ([#2843](https://github.com/AztecProtocol/aztec-packages/issues/2843)) ([e32517e](https://github.com/AztecProtocol/aztec-packages/commit/e32517e9eae791b32f94b3816413392ccf0ba096)) +* Trigger yarn-project rebuild for .sh files ([#2846](https://github.com/AztecProtocol/aztec-packages/issues/2846)) ([c956254](https://github.com/AztecProtocol/aztec-packages/commit/c95625439e3c779568d4ddf2f0d0ed93519fb4ac)) + +## [0.8.11](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.10...aztec-packages-v0.8.11) (2023-10-13) + + +### Features + +* **archiver:** Use registry to fetch searchStartBlock ([#2830](https://github.com/AztecProtocol/aztec-packages/issues/2830)) ([e5bc067](https://github.com/AztecProtocol/aztec-packages/commit/e5bc0672b631f21debf96a85a206080e2d9a838c)) +* Configure sandbox for network ([#2818](https://github.com/AztecProtocol/aztec-packages/issues/2818)) ([d393a59](https://github.com/AztecProtocol/aztec-packages/commit/d393a5954bb5d80dddf602d1828ab9c9f6e092cb)) +* **docker-sandbox:** Allow forks in sandbox ([#2831](https://github.com/AztecProtocol/aztec-packages/issues/2831)) ([ed8431c](https://github.com/AztecProtocol/aztec-packages/commit/ed8431c736ab67dc825316b8ea35ca5c7f078563)), closes [#2726](https://github.com/AztecProtocol/aztec-packages/issues/2726) +* Goblin Translator Decomposition relation (Goblin Translator part 4) ([#2802](https://github.com/AztecProtocol/aztec-packages/issues/2802)) ([3c3cd9f](https://github.com/AztecProtocol/aztec-packages/commit/3c3cd9f62640b505b55916648df6ccddf524cdfc)) +* Goblin Translator GenPermSort relation (Goblin Translator part 3) ([#2795](https://github.com/AztecProtocol/aztec-packages/issues/2795)) ([b36fdc4](https://github.com/AztecProtocol/aztec-packages/commit/b36fdc481d16e56fe244c5a10a5223199f9f2e6b)) +* Goblin translator opcode constraint and accumulator transfer relations (Goblin Translator part 5) ([#2805](https://github.com/AztecProtocol/aztec-packages/issues/2805)) ([b3d1f28](https://github.com/AztecProtocol/aztec-packages/commit/b3d1f280913494322baee369e6ee4f04353891b3)) +* Goblin Translator Permutation relation (Goblin Translator part 2) ([#2790](https://github.com/AztecProtocol/aztec-packages/issues/2790)) ([9a354c9](https://github.com/AztecProtocol/aztec-packages/commit/9a354c94c91f8f2927ca66d0de65b5b893066710)) +* Integrate ZeroMorph into Honk ([#2774](https://github.com/AztecProtocol/aztec-packages/issues/2774)) ([ea86869](https://github.com/AztecProtocol/aztec-packages/commit/ea86869e92da3fbf921314fdbca31fdb85a6e274)) +* NPM canary deployment ([#2731](https://github.com/AztecProtocol/aztec-packages/issues/2731)) ([7d48ed3](https://github.com/AztecProtocol/aztec-packages/commit/7d48ed3beb70f0ed183407e87dad0fb9310fcf13)) +* Purge non native token + reorder params in token portal ([#2723](https://github.com/AztecProtocol/aztec-packages/issues/2723)) ([447dade](https://github.com/AztecProtocol/aztec-packages/commit/447dade3cc21bdd20a24b13fb5d958efea6fed08)) +* Throw compile error if read/write public state from private ([#2804](https://github.com/AztecProtocol/aztec-packages/issues/2804)) ([a3649df](https://github.com/AztecProtocol/aztec-packages/commit/a3649df0691e76e108388aebd780748e844ee8c5)) +* Unencrypted log filtering ([#2600](https://github.com/AztecProtocol/aztec-packages/issues/2600)) ([7ae554a](https://github.com/AztecProtocol/aztec-packages/commit/7ae554a7c4d725c1ae67b083a0286d15fb76ad0b)), closes [#1498](https://github.com/AztecProtocol/aztec-packages/issues/1498) [#1500](https://github.com/AztecProtocol/aztec-packages/issues/1500) +* Update goblin translator circuit builder (Goblin Translator part 1) ([#2764](https://github.com/AztecProtocol/aztec-packages/issues/2764)) ([32c69ae](https://github.com/AztecProtocol/aztec-packages/commit/32c69ae36ed431482d286e228fd830256e8bd1b5)) + + +### Bug Fixes + +* Outdated `noir:clean` ([#2821](https://github.com/AztecProtocol/aztec-packages/issues/2821)) ([2ea199f](https://github.com/AztecProtocol/aztec-packages/commit/2ea199fcd99db73ea2969af7ce0e99501d2cbb5d)) + + +### Miscellaneous + +* Benchmark tx sizes in p2p pool ([#2810](https://github.com/AztecProtocol/aztec-packages/issues/2810)) ([f63219c](https://github.com/AztecProtocol/aztec-packages/commit/f63219c91e076a96a49ed16a779a3124fef202c4)) +* Change acir_tests branch to point to master ([#2815](https://github.com/AztecProtocol/aztec-packages/issues/2815)) ([73f229d](https://github.com/AztecProtocol/aztec-packages/commit/73f229d3123301818262439a2a98767146a1a58c)) +* Fix typo ([#2839](https://github.com/AztecProtocol/aztec-packages/issues/2839)) ([5afdf91](https://github.com/AztecProtocol/aztec-packages/commit/5afdf9105f4980d3ed86ca5fb3a2d6b8e9c33f70)) +* From < genesis allowed in getBlocks ([#2816](https://github.com/AztecProtocol/aztec-packages/issues/2816)) ([5622b50](https://github.com/AztecProtocol/aztec-packages/commit/5622b506513f7f1fb491a6be011f90eca1ea96f3)) +* Remove Ultra Grumpkin flavor ([#2825](https://github.com/AztecProtocol/aztec-packages/issues/2825)) ([bde77b8](https://github.com/AztecProtocol/aztec-packages/commit/bde77b8e6e91fa734e06453e67a50597480b2ec1)) +* Remove work queue from honk ([#2814](https://github.com/AztecProtocol/aztec-packages/issues/2814)) ([bca7d12](https://github.com/AztecProtocol/aztec-packages/commit/bca7d126d2ec583977ee5bdf77a90263d059dc44)) +* Spell check ([#2817](https://github.com/AztecProtocol/aztec-packages/issues/2817)) ([4777a11](https://github.com/AztecProtocol/aztec-packages/commit/4777a113491c4c9901b4589a9a6cb1e1148c0288)) + + +### Documentation + +* Slight changes to update portal page ([#2799](https://github.com/AztecProtocol/aztec-packages/issues/2799)) ([eb65819](https://github.com/AztecProtocol/aztec-packages/commit/eb65819957a0e5e5c2240ad4f299222133a27edd)) +* Update aztec_connect_sunset.mdx ([#2808](https://github.com/AztecProtocol/aztec-packages/issues/2808)) ([5f659a7](https://github.com/AztecProtocol/aztec-packages/commit/5f659a708980c60d015d4292c05e5fd50e7c7f1f)) + +## [0.8.10](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.9...aztec-packages-v0.8.10) (2023-10-11) + + +### Features + +* Adding Fr back as a BB export (ts) ([#2770](https://github.com/AztecProtocol/aztec-packages/issues/2770)) ([d9ac808](https://github.com/AztecProtocol/aztec-packages/commit/d9ac8080a5525b9792b7b3f10c40583536bb256c)) +* Bb faster init ([#2776](https://github.com/AztecProtocol/aztec-packages/issues/2776)) ([c794533](https://github.com/AztecProtocol/aztec-packages/commit/c794533754a9706d362d0374209df9eb5b6bfdc7)) +* Deploy l1 contracts npm pkg ([#2754](https://github.com/AztecProtocol/aztec-packages/issues/2754)) ([e317c47](https://github.com/AztecProtocol/aztec-packages/commit/e317c47471f0dc2ef9c95f917406dc1f85dd87e4)) +* Docs: Add foundational concepts, ACIR and Sequencer pages ([#2716](https://github.com/AztecProtocol/aztec-packages/issues/2716)) ([9d10326](https://github.com/AztecProtocol/aztec-packages/commit/9d103265e8cde02add16c1a920add5b290a8fc92)) +* Events in contract artifacts ([#2786](https://github.com/AztecProtocol/aztec-packages/issues/2786)) ([b8cb7df](https://github.com/AztecProtocol/aztec-packages/commit/b8cb7dfdb68784d60f29249fd49140bde1c8e581)), closes [#2324](https://github.com/AztecProtocol/aztec-packages/issues/2324) +* IAC for a prototype devnet ([#2720](https://github.com/AztecProtocol/aztec-packages/issues/2720)) ([b30839e](https://github.com/AztecProtocol/aztec-packages/commit/b30839e9e5b88124443d35140f84610bbc0a7855)) +* **l1-contracts:** Remove remappings of [@aztec](https://github.com/aztec) ([#2797](https://github.com/AztecProtocol/aztec-packages/issues/2797)) ([aac8b37](https://github.com/AztecProtocol/aztec-packages/commit/aac8b37431d4e69db60388cf72c114297977248a)) +* LLVM xray presets ([#2525](https://github.com/AztecProtocol/aztec-packages/issues/2525)) ([23a1ee9](https://github.com/AztecProtocol/aztec-packages/commit/23a1ee91da6003d1b5798640c8ccecbd226beef7)) +* Separate aggregation protocol ([#2736](https://github.com/AztecProtocol/aztec-packages/issues/2736)) ([ad16937](https://github.com/AztecProtocol/aztec-packages/commit/ad169374943ef49c32eabc66483a7be28a711565)) +* Simplify relation containers ([#2619](https://github.com/AztecProtocol/aztec-packages/issues/2619)) ([99c5127](https://github.com/AztecProtocol/aztec-packages/commit/99c5127ac5c10e6637534870a689a95238ae997c)) +* ZeroMorph ([#2664](https://github.com/AztecProtocol/aztec-packages/issues/2664)) ([a006e5a](https://github.com/AztecProtocol/aztec-packages/commit/a006e5a0e0a30f8dfe992e3ac8a05f6c276f9300)) + + +### Miscellaneous + +* Acir format cleanup ([#2779](https://github.com/AztecProtocol/aztec-packages/issues/2779)) ([5ea373f](https://github.com/AztecProtocol/aztec-packages/commit/5ea373f7d653f7322a108297113a2deb379e1400)) +* Add md to rebuild patterns ([#2798](https://github.com/AztecProtocol/aztec-packages/issues/2798)) ([3f4297d](https://github.com/AztecProtocol/aztec-packages/commit/3f4297dbc924ca76fdfba44975c64316f2236deb)) +* Make canary uniswap test similar to e2e ([#2767](https://github.com/AztecProtocol/aztec-packages/issues/2767)) ([93d458b](https://github.com/AztecProtocol/aztec-packages/commit/93d458bbbf6c88861b72f00e8fe8beb753857765)) +* Measure block building times, history processing times, and db sizes ([#2733](https://github.com/AztecProtocol/aztec-packages/issues/2733)) ([0cc553a](https://github.com/AztecProtocol/aztec-packages/commit/0cc553ab7740c0479582674fce2626a30f3093a9)) +* Moved `AddressNote` to `aztec.nr` ([#2752](https://github.com/AztecProtocol/aztec-packages/issues/2752)) ([5f99066](https://github.com/AztecProtocol/aztec-packages/commit/5f99066113480292c8bc56247eca1adb4d49ad5c)) +* No calls to pedersen from TS ([#2724](https://github.com/AztecProtocol/aztec-packages/issues/2724)) ([78e44c3](https://github.com/AztecProtocol/aztec-packages/commit/78e44c33bb98fa405f104aafa74b44ce791f239f)) +* Remove stale comments ([#2788](https://github.com/AztecProtocol/aztec-packages/issues/2788)) ([d9c458d](https://github.com/AztecProtocol/aztec-packages/commit/d9c458d233d4c4a2ade50cdb6c1fc713e654cb55)) +* Renaming abi as artifact ([#2756](https://github.com/AztecProtocol/aztec-packages/issues/2756)) ([c0abcfd](https://github.com/AztecProtocol/aztec-packages/commit/c0abcfd9dfcceb4a2c81561bd89beb9381d20461)) +* Rewrite benchmark scripts in ts ([#2765](https://github.com/AztecProtocol/aztec-packages/issues/2765)) ([8efa374](https://github.com/AztecProtocol/aztec-packages/commit/8efa3741ca7503cd38a7de75d5768f1b4d1be287)) +* Stop whinging about this ownership stuff. ([#2775](https://github.com/AztecProtocol/aztec-packages/issues/2775)) ([3dd6900](https://github.com/AztecProtocol/aztec-packages/commit/3dd6900f96a7dc855643be0e4aba0cfe9fa8a16e)) +* Update ACIR serialisation format ([#2771](https://github.com/AztecProtocol/aztec-packages/issues/2771)) ([6d85527](https://github.com/AztecProtocol/aztec-packages/commit/6d855270f8c069edac62536ccc391a0cab764323)) +* Use global crs in more places. Less pain. ([#2772](https://github.com/AztecProtocol/aztec-packages/issues/2772)) ([b819980](https://github.com/AztecProtocol/aztec-packages/commit/b8199802bad3c05ebe4d1ded5338a09a04e0ed7e)) + + +### Documentation + +* Add yellow-paper directory ([#2773](https://github.com/AztecProtocol/aztec-packages/issues/2773)) ([03de545](https://github.com/AztecProtocol/aztec-packages/commit/03de545b62ab8d6755fae27b6f2e2bce3575e40e)) +* Adding some authwit docs ([#2711](https://github.com/AztecProtocol/aztec-packages/issues/2711)) ([afc23f4](https://github.com/AztecProtocol/aztec-packages/commit/afc23f4652c478298e86f8895f41b21e727a89a6)) +* Update overview.mdx ([#2746](https://github.com/AztecProtocol/aztec-packages/issues/2746)) ([082ab56](https://github.com/AztecProtocol/aztec-packages/commit/082ab56d4735a8f08922e36a9897a17fb4fd2c3c)) +* Update site title and tagline ([#2769](https://github.com/AztecProtocol/aztec-packages/issues/2769)) ([bbb0b60](https://github.com/AztecProtocol/aztec-packages/commit/bbb0b60d07bc2efa6754b1ad3839735272eeb896)) + +## [0.8.9](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.8...aztec-packages-v0.8.9) (2023-10-10) + + +### Features + +* Auto-recompile the boxes and fix broken frontend CompleteAddress import ([#2727](https://github.com/AztecProtocol/aztec-packages/issues/2727)) ([4ec4ea0](https://github.com/AztecProtocol/aztec-packages/commit/4ec4ea061e2d003da905d6c2026608b41cdca044)) + + +### Bug Fixes + +* Default export in noir-version ([#2757](https://github.com/AztecProtocol/aztec-packages/issues/2757)) ([6ff7bed](https://github.com/AztecProtocol/aztec-packages/commit/6ff7bed1722f8e7afa4b4c495216ca20ea47f42a)) + + +### Documentation + +* Add preview image ([#2759](https://github.com/AztecProtocol/aztec-packages/issues/2759)) ([45597af](https://github.com/AztecProtocol/aztec-packages/commit/45597af2a75ffeb8ecd91028f30f159910821673)) + +## [0.8.8](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.7...aztec-packages-v0.8.8) (2023-10-09) + + +### Features + +* Actually compute selectors ([#2686](https://github.com/AztecProtocol/aztec-packages/issues/2686)) ([dcb65e1](https://github.com/AztecProtocol/aztec-packages/commit/dcb65e1286f8ac4c09e03f65bb3837e02b44ace2)) +* Add otterscan to sandbox ([#2648](https://github.com/AztecProtocol/aztec-packages/issues/2648)) ([6986649](https://github.com/AztecProtocol/aztec-packages/commit/69866498c77ba1b38ca88b766172d5dc46acc4f7)) +* **aztec.js:** Remove attach method ([#2715](https://github.com/AztecProtocol/aztec-packages/issues/2715)) ([c03c654](https://github.com/AztecProtocol/aztec-packages/commit/c03c654631d6e70bb3f2c6abcd9fa046ce8554b8)) +* Create .gitattributes in aztec-nr ([#2661](https://github.com/AztecProtocol/aztec-packages/issues/2661)) ([8084fc3](https://github.com/AztecProtocol/aztec-packages/commit/8084fc3a6a880517284d4aac78a355b9882e88a8)) +* GCC 13 preset ([#2623](https://github.com/AztecProtocol/aztec-packages/issues/2623)) ([4881414](https://github.com/AztecProtocol/aztec-packages/commit/4881414ceb30590674c244ef9bc4c8416eacd6bc)) +* Update noir to v0.16 ([#2718](https://github.com/AztecProtocol/aztec-packages/issues/2718)) ([e8d0675](https://github.com/AztecProtocol/aztec-packages/commit/e8d0675bfb99369ce488943e127ed03d8ecbe9dc)) + + +### Bug Fixes + +* Avoid ambiguity on blank and blank-react (prefix issue) ([#2729](https://github.com/AztecProtocol/aztec-packages/issues/2729)) ([68cdb3f](https://github.com/AztecProtocol/aztec-packages/commit/68cdb3f82cad9b7274c7c4902c2f5919b0acb96b)) +* Block encoding ([#2719](https://github.com/AztecProtocol/aztec-packages/issues/2719)) ([c4796ac](https://github.com/AztecProtocol/aztec-packages/commit/c4796ac4ca6b1150cc1ac08fc44fba5a02e1bcf4)) +* Canary tests to use a fork ([#2739](https://github.com/AztecProtocol/aztec-packages/issues/2739)) ([4906142](https://github.com/AztecProtocol/aztec-packages/commit/4906142ec611ea82296bcccd7aeefcd929a8d006)) +* Challenge generation update ([#2628](https://github.com/AztecProtocol/aztec-packages/issues/2628)) ([68c1fab](https://github.com/AztecProtocol/aztec-packages/commit/68c1fab51e3a339032b719ce966ed34787f33dab)) +* Docs: Sandbox version numbers ([#2708](https://github.com/AztecProtocol/aztec-packages/issues/2708)) ([34b0209](https://github.com/AztecProtocol/aztec-packages/commit/34b020974f63f2486c55b821c3c48d583a5e54d0)) +* Docs: Update Sandbox page to use #include_aztec_version ([#2703](https://github.com/AztecProtocol/aztec-packages/issues/2703)) ([d5b78af](https://github.com/AztecProtocol/aztec-packages/commit/d5b78af731e4838ecd03a9267dab639681b06512)) +* Remove npx from extract_tag_version ([#2697](https://github.com/AztecProtocol/aztec-packages/issues/2697)) ([fe4484a](https://github.com/AztecProtocol/aztec-packages/commit/fe4484a8b9eeb3c997650e94794b0db3b4f4e404)) +* Version in sandbox deployment ([#2730](https://github.com/AztecProtocol/aztec-packages/issues/2730)) ([b1d8efd](https://github.com/AztecProtocol/aztec-packages/commit/b1d8efd62e31a49498870cab4c447ace7d5cc1a1)) + + +### Miscellaneous + +* `foundation/src/serialization` tech debt ([#2722](https://github.com/AztecProtocol/aztec-packages/issues/2722)) ([e92154b](https://github.com/AztecProtocol/aztec-packages/commit/e92154b891ef6362cec511e1371f8d9ff3007e89)) +* Add node10 entrypoint to Foundation ([#2706](https://github.com/AztecProtocol/aztec-packages/issues/2706)) ([30c7935](https://github.com/AztecProtocol/aztec-packages/commit/30c793504951d4eb4f0a192a023fa42fc5d827d1)) +* Add storage slot to docs ([#2601](https://github.com/AztecProtocol/aztec-packages/issues/2601)) ([a7710f0](https://github.com/AztecProtocol/aztec-packages/commit/a7710f0849801a85e6907ac0072dd65140ae086a)) +* Add visibility modifiers ([#2728](https://github.com/AztecProtocol/aztec-packages/issues/2728)) ([d9ae189](https://github.com/AztecProtocol/aztec-packages/commit/d9ae189bcee43a193d262d2e819c55966494cce7)) +* **benchmark:** Measure time to decrypt notes in pxe ([#2714](https://github.com/AztecProtocol/aztec-packages/issues/2714)) ([33a230a](https://github.com/AztecProtocol/aztec-packages/commit/33a230a77488baedb7e93528e296ec47631803c7)) +* Build boxes as part of workspace ([#2725](https://github.com/AztecProtocol/aztec-packages/issues/2725)) ([d18349f](https://github.com/AztecProtocol/aztec-packages/commit/d18349f3435677200734a1db625ed80de35c469a)) +* Bump ACIR deserializer ([#2675](https://github.com/AztecProtocol/aztec-packages/issues/2675)) ([502ee87](https://github.com/AztecProtocol/aztec-packages/commit/502ee872d6360bf4bc5b83c672eeb64c58944073)) +* **circuits:** Delete old code that set a different generator index per vector entry in pedersen commitment ([#2700](https://github.com/AztecProtocol/aztec-packages/issues/2700)) ([4eabfd1](https://github.com/AztecProtocol/aztec-packages/commit/4eabfd1241cce2b2a0c230f600bda3af88f511dd)) +* **log:** Show log level in debug logs ([#2717](https://github.com/AztecProtocol/aztec-packages/issues/2717)) ([2b87381](https://github.com/AztecProtocol/aztec-packages/commit/2b873819ad5bade5104813c4ca2624727090ea9e)) +* Move { Fr } imports to foundation/fields ([#2712](https://github.com/AztecProtocol/aztec-packages/issues/2712)) ([f6fc7f2](https://github.com/AztecProtocol/aztec-packages/commit/f6fc7f20dfe94c7be9d791d369750234b94c1bbd)) +* **uniswap_tests:** Test edge cases around uniswap flow ([#2620](https://github.com/AztecProtocol/aztec-packages/issues/2620)) ([7a58fe9](https://github.com/AztecProtocol/aztec-packages/commit/7a58fe928b658f92afc6914672d64f8742db35bc)) +* Use `serialize` functions in `getInitialWitness` ([#2713](https://github.com/AztecProtocol/aztec-packages/issues/2713)) ([93cc668](https://github.com/AztecProtocol/aztec-packages/commit/93cc668d360ae1c599af5e347df7cd8341c59cda)) + ## [0.8.7](https://github.com/AztecProtocol/aztec-packages/compare/aztec-packages-v0.8.6...aztec-packages-v0.8.7) (2023-10-04) @@ -101,7 +278,7 @@ * Add boxes to CI ([#2456](https://github.com/AztecProtocol/aztec-packages/issues/2456)) ([a90a185](https://github.com/AztecProtocol/aztec-packages/commit/a90a185bb1d72658c7910366e593303607edf873)) * Add selector to call_context ([#2626](https://github.com/AztecProtocol/aztec-packages/issues/2626)) ([8e317be](https://github.com/AztecProtocol/aztec-packages/commit/8e317be9fafb1daa7bc0bdd08d603ce95d3be2f9)) * AddNote api ([#2535](https://github.com/AztecProtocol/aztec-packages/issues/2535)) ([bb004f4](https://github.com/AztecProtocol/aztec-packages/commit/bb004f4419ca9dba9d8216eaba2e65d3a4a994f8)) -* **aztec_noir:** Abstract storage initialisation ([#2406](https://github.com/AztecProtocol/aztec-packages/issues/2406)) ([974b037](https://github.com/AztecProtocol/aztec-packages/commit/974b037650e7fac6fbc3721359daf5f1891b5a2a)) +* **aztec_noir:** Abstract storage initialization ([#2406](https://github.com/AztecProtocol/aztec-packages/issues/2406)) ([974b037](https://github.com/AztecProtocol/aztec-packages/commit/974b037650e7fac6fbc3721359daf5f1891b5a2a)) * **aztec.js:** Support AddressLike parameters ([#2430](https://github.com/AztecProtocol/aztec-packages/issues/2430)) ([5b5f139](https://github.com/AztecProtocol/aztec-packages/commit/5b5f139af2eb8ceb71e807c49be6c2b54e6e435b)) * Barretenberg/crypto/blake3s supports compile-time hashing ([#2556](https://github.com/AztecProtocol/aztec-packages/issues/2556)) ([da05dd7](https://github.com/AztecProtocol/aztec-packages/commit/da05dd7ea41208aea42efe0aeb838e4d76e2d34a)) * **bb:** Add `bb --version` command ([#2482](https://github.com/AztecProtocol/aztec-packages/issues/2482)) ([530676f](https://github.com/AztecProtocol/aztec-packages/commit/530676f8ec53e63ba24f6fabc9097ae8f5db5fc6)) @@ -129,7 +306,7 @@ * Log topic and contract address in unencrypted logs ([#2595](https://github.com/AztecProtocol/aztec-packages/issues/2595)) ([a5b763f](https://github.com/AztecProtocol/aztec-packages/commit/a5b763fb077b967f592ad4de9e391acf2790a094)), closes [#2580](https://github.com/AztecProtocol/aztec-packages/issues/2580) [#2581](https://github.com/AztecProtocol/aztec-packages/issues/2581) [#2586](https://github.com/AztecProtocol/aztec-packages/issues/2586) [#2587](https://github.com/AztecProtocol/aztec-packages/issues/2587) * Parallelization update for polynomials ([#2311](https://github.com/AztecProtocol/aztec-packages/issues/2311)) ([922fc99](https://github.com/AztecProtocol/aztec-packages/commit/922fc9912a4a88a41eef42fe64ca2b59d859b5b1)) * Restore latest block number ([#2474](https://github.com/AztecProtocol/aztec-packages/issues/2474)) ([6dc2da7](https://github.com/AztecProtocol/aztec-packages/commit/6dc2da70584ed1f1f0f00b3dfeca11610e80cc5a)) -* Serialise L2Block to JSON ([#2496](https://github.com/AztecProtocol/aztec-packages/issues/2496)) ([714c727](https://github.com/AztecProtocol/aztec-packages/commit/714c727a88d4c07b76e456e462ab1cf43bcaea75)) +* Serialize L2Block to JSON ([#2496](https://github.com/AztecProtocol/aztec-packages/issues/2496)) ([714c727](https://github.com/AztecProtocol/aztec-packages/commit/714c727a88d4c07b76e456e462ab1cf43bcaea75)) * Standalone Aztec Node and RPC Server ([#2522](https://github.com/AztecProtocol/aztec-packages/issues/2522)) ([8e355bc](https://github.com/AztecProtocol/aztec-packages/commit/8e355bc8c905d2992678d4a2a3b49d354dfa5bf6)) * Unbox empty box ([#2387](https://github.com/AztecProtocol/aztec-packages/issues/2387)) ([3e3930c](https://github.com/AztecProtocol/aztec-packages/commit/3e3930c6487c3b2a264c7a93bccb25473baf0b22)) * Uniswap private flow ([#2559](https://github.com/AztecProtocol/aztec-packages/issues/2559)) ([39f3a91](https://github.com/AztecProtocol/aztec-packages/commit/39f3a917a3bb88f29d8d17ee6c9e1b2294a45937)) @@ -779,7 +956,7 @@ * Set correct version of RPC & Sandbox when deploying tagged commit ([#1914](https://github.com/AztecProtocol/aztec-packages/issues/1914)) ([898c50d](https://github.com/AztecProtocol/aztec-packages/commit/898c50d594b7515f6ca3b904d31ccf724b683ade)) * Set side effect counter on contract reads ([#1870](https://github.com/AztecProtocol/aztec-packages/issues/1870)) ([1d8881e](https://github.com/AztecProtocol/aztec-packages/commit/1d8881e4872b39195ace523432c0e34bc9081f8d)), closes [#1588](https://github.com/AztecProtocol/aztec-packages/issues/1588) * **simulator:** Use nullifier.value in client's `pendingNullifier` set so `set.has()` works ([#1534](https://github.com/AztecProtocol/aztec-packages/issues/1534)) ([a78daf7](https://github.com/AztecProtocol/aztec-packages/commit/a78daf75e3171d9cfafecba5507d5ae215fdd0ef)) -* **synchroniser:** Store most recent globals hash in the synchroniser, rather than fetching from the latest block ([#1539](https://github.com/AztecProtocol/aztec-packages/issues/1539)) ([1dd6225](https://github.com/AztecProtocol/aztec-packages/commit/1dd62256cc323831418808689496f0506d402fc4)) +* **synchronizer:** Store most recent globals hash in the synchronizer, rather than fetching from the latest block ([#1539](https://github.com/AztecProtocol/aztec-packages/issues/1539)) ([1dd6225](https://github.com/AztecProtocol/aztec-packages/commit/1dd62256cc323831418808689496f0506d402fc4)) * **sync:** Sync latest globals within merkle tree ops ([#1612](https://github.com/AztecProtocol/aztec-packages/issues/1612)) ([03b4cf6](https://github.com/AztecProtocol/aztec-packages/commit/03b4cf67cbd4c1629c2937dfae1ea714248d6d3b)) * Truncate SRS size to the amount of points that we have downloaded ([#1862](https://github.com/AztecProtocol/aztec-packages/issues/1862)) ([0a7058c](https://github.com/AztecProtocol/aztec-packages/commit/0a7058cbda228c9baf378d69c906596e204d804f)) * Try to catch last undefined safety issues ([#2027](https://github.com/AztecProtocol/aztec-packages/issues/2027)) ([12e7486](https://github.com/AztecProtocol/aztec-packages/commit/12e7486c0750f648f51d2b43317df843a3c52bec)) diff --git a/VERSION b/VERSION index 4c89f3cde3ae..e7de65ca73e5 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v0.8.7 x-release-please-version +v0.8.14 x-release-please-version diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index 5ace97a5953b..c1e8962b6a47 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = a635b1b79a02a6c57c67ce7ea9c94a3de274961f - parent = a91e9f18eb6615b4616bcc50d0b73cebde4a901e + commit = 9d92a4b81285f38b4195b05cafdd6bc887be23d5 + parent = bc4e1fccfbcc9df40168cb6cb8e395bc9a9093de method = merge cmdver = 0.4.6 diff --git a/barretenberg/CHANGELOG.md b/barretenberg/CHANGELOG.md index f99f081e38bf..f2d96d5a7bde 100644 --- a/barretenberg/CHANGELOG.md +++ b/barretenberg/CHANGELOG.md @@ -1,5 +1,89 @@ # Changelog +## [0.8.14](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.13...barretenberg-v0.8.14) (2023-10-13) + + +### Miscellaneous + +* **barretenberg:** Synchronize aztec-packages versions + +## [0.8.13](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.12...barretenberg-v0.8.13) (2023-10-13) + + +### Bug Fixes + +* Fix check_circuit in goblin translator (resulted in flimsy test) ([#2827](https://github.com/AztecProtocol/aztec-packages/issues/2827)) ([98b1679](https://github.com/AztecProtocol/aztec-packages/commit/98b16793b0e84360af8dc70934636d11d7bc7e29)) + +## [0.8.12](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.11...barretenberg-v0.8.12) (2023-10-13) + + +### Bug Fixes + +* Fix rebuild pattern slashes. ([#2843](https://github.com/AztecProtocol/aztec-packages/issues/2843)) ([e32517e](https://github.com/AztecProtocol/aztec-packages/commit/e32517e9eae791b32f94b3816413392ccf0ba096)) + +## [0.8.11](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.10...barretenberg-v0.8.11) (2023-10-13) + + +### Features + +* Goblin Translator Decomposition relation (Goblin Translator part 4) ([#2802](https://github.com/AztecProtocol/aztec-packages/issues/2802)) ([3c3cd9f](https://github.com/AztecProtocol/aztec-packages/commit/3c3cd9f62640b505b55916648df6ccddf524cdfc)) +* Goblin Translator GenPermSort relation (Goblin Translator part 3) ([#2795](https://github.com/AztecProtocol/aztec-packages/issues/2795)) ([b36fdc4](https://github.com/AztecProtocol/aztec-packages/commit/b36fdc481d16e56fe244c5a10a5223199f9f2e6b)) +* Goblin translator opcode constraint and accumulator transfer relations (Goblin Translator part 5) ([#2805](https://github.com/AztecProtocol/aztec-packages/issues/2805)) ([b3d1f28](https://github.com/AztecProtocol/aztec-packages/commit/b3d1f280913494322baee369e6ee4f04353891b3)) +* Goblin Translator Permutation relation (Goblin Translator part 2) ([#2790](https://github.com/AztecProtocol/aztec-packages/issues/2790)) ([9a354c9](https://github.com/AztecProtocol/aztec-packages/commit/9a354c94c91f8f2927ca66d0de65b5b893066710)) +* Integrate ZeroMorph into Honk ([#2774](https://github.com/AztecProtocol/aztec-packages/issues/2774)) ([ea86869](https://github.com/AztecProtocol/aztec-packages/commit/ea86869e92da3fbf921314fdbca31fdb85a6e274)) +* Update goblin translator circuit builder (Goblin Translator part 1) ([#2764](https://github.com/AztecProtocol/aztec-packages/issues/2764)) ([32c69ae](https://github.com/AztecProtocol/aztec-packages/commit/32c69ae36ed431482d286e228fd830256e8bd1b5)) + + +### Miscellaneous + +* Change acir_tests branch to point to master ([#2815](https://github.com/AztecProtocol/aztec-packages/issues/2815)) ([73f229d](https://github.com/AztecProtocol/aztec-packages/commit/73f229d3123301818262439a2a98767146a1a58c)) +* Remove Ultra Grumpkin flavor ([#2825](https://github.com/AztecProtocol/aztec-packages/issues/2825)) ([bde77b8](https://github.com/AztecProtocol/aztec-packages/commit/bde77b8e6e91fa734e06453e67a50597480b2ec1)) +* Remove work queue from honk ([#2814](https://github.com/AztecProtocol/aztec-packages/issues/2814)) ([bca7d12](https://github.com/AztecProtocol/aztec-packages/commit/bca7d126d2ec583977ee5bdf77a90263d059dc44)) +* Spell check ([#2817](https://github.com/AztecProtocol/aztec-packages/issues/2817)) ([4777a11](https://github.com/AztecProtocol/aztec-packages/commit/4777a113491c4c9901b4589a9a6cb1e1148c0288)) + +## [0.8.10](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.9...barretenberg-v0.8.10) (2023-10-11) + + +### Features + +* Bb faster init ([#2776](https://github.com/AztecProtocol/aztec-packages/issues/2776)) ([c794533](https://github.com/AztecProtocol/aztec-packages/commit/c794533754a9706d362d0374209df9eb5b6bfdc7)) +* LLVM xray presets ([#2525](https://github.com/AztecProtocol/aztec-packages/issues/2525)) ([23a1ee9](https://github.com/AztecProtocol/aztec-packages/commit/23a1ee91da6003d1b5798640c8ccecbd226beef7)) +* Separate aggregation protocol ([#2736](https://github.com/AztecProtocol/aztec-packages/issues/2736)) ([ad16937](https://github.com/AztecProtocol/aztec-packages/commit/ad169374943ef49c32eabc66483a7be28a711565)) +* Simplify relation containers ([#2619](https://github.com/AztecProtocol/aztec-packages/issues/2619)) ([99c5127](https://github.com/AztecProtocol/aztec-packages/commit/99c5127ac5c10e6637534870a689a95238ae997c)) +* ZeroMorph ([#2664](https://github.com/AztecProtocol/aztec-packages/issues/2664)) ([a006e5a](https://github.com/AztecProtocol/aztec-packages/commit/a006e5a0e0a30f8dfe992e3ac8a05f6c276f9300)) + + +### Miscellaneous + +* Acir format cleanup ([#2779](https://github.com/AztecProtocol/aztec-packages/issues/2779)) ([5ea373f](https://github.com/AztecProtocol/aztec-packages/commit/5ea373f7d653f7322a108297113a2deb379e1400)) +* Stop whinging about this ownership stuff. ([#2775](https://github.com/AztecProtocol/aztec-packages/issues/2775)) ([3dd6900](https://github.com/AztecProtocol/aztec-packages/commit/3dd6900f96a7dc855643be0e4aba0cfe9fa8a16e)) +* Update ACIR serialisation format ([#2771](https://github.com/AztecProtocol/aztec-packages/issues/2771)) ([6d85527](https://github.com/AztecProtocol/aztec-packages/commit/6d855270f8c069edac62536ccc391a0cab764323)) +* Use global crs in more places. Less pain. ([#2772](https://github.com/AztecProtocol/aztec-packages/issues/2772)) ([b819980](https://github.com/AztecProtocol/aztec-packages/commit/b8199802bad3c05ebe4d1ded5338a09a04e0ed7e)) + +## [0.8.9](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.8...barretenberg-v0.8.9) (2023-10-10) + + +### Miscellaneous + +* **barretenberg:** Synchronize aztec-packages versions + +## [0.8.8](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.7...barretenberg-v0.8.8) (2023-10-09) + + +### Features + +* GCC 13 preset ([#2623](https://github.com/AztecProtocol/aztec-packages/issues/2623)) ([4881414](https://github.com/AztecProtocol/aztec-packages/commit/4881414ceb30590674c244ef9bc4c8416eacd6bc)) + + +### Bug Fixes + +* Challenge generation update ([#2628](https://github.com/AztecProtocol/aztec-packages/issues/2628)) ([68c1fab](https://github.com/AztecProtocol/aztec-packages/commit/68c1fab51e3a339032b719ce966ed34787f33dab)) + + +### Miscellaneous + +* Bump ACIR deserializer ([#2675](https://github.com/AztecProtocol/aztec-packages/issues/2675)) ([502ee87](https://github.com/AztecProtocol/aztec-packages/commit/502ee872d6360bf4bc5b83c672eeb64c58944073)) + ## [0.8.7](https://github.com/AztecProtocol/aztec-packages/compare/barretenberg-v0.8.6...barretenberg-v0.8.7) (2023-10-04) @@ -465,7 +549,7 @@ * Multithreaded Sumcheck ([#556](https://github.com/AztecProtocol/barretenberg/issues/556)) ([c4094b1](https://github.com/AztecProtocol/barretenberg/commit/c4094b155ba9d8e914c3e6a5b0d7808945b1eeed)) * **nullifier_tree:** make empty nullifier tree leaves hash be 0 ([#360](https://github.com/AztecProtocol/barretenberg/issues/360)) ([#382](https://github.com/AztecProtocol/barretenberg/issues/382)) ([b85ab8d](https://github.com/AztecProtocol/barretenberg/commit/b85ab8d587b3e93db2aa0f1c4f012e58e5d97915)) * Optimize memory consumption of pedersen generators ([#413](https://github.com/AztecProtocol/barretenberg/issues/413)) ([d60b16a](https://github.com/AztecProtocol/barretenberg/commit/d60b16a14219fd4bd130ce4537c3e94bfa10128f)) -* Parallelised folding in Gemini ([#550](https://github.com/AztecProtocol/barretenberg/issues/550)) ([3b962d3](https://github.com/AztecProtocol/barretenberg/commit/3b962d372491430871443fd1b95fd9e049e233c8)) +* Parallelized folding in Gemini ([#550](https://github.com/AztecProtocol/barretenberg/issues/550)) ([3b962d3](https://github.com/AztecProtocol/barretenberg/commit/3b962d372491430871443fd1b95fd9e049e233c8)) * **pkg-config:** Add a bindir variable ([#239](https://github.com/AztecProtocol/barretenberg/issues/239)) ([611bf34](https://github.com/AztecProtocol/barretenberg/commit/611bf34bcc6f82969a6fe546bf0a7cbecda6d36d)) * Remove TOOLCHAIN logic and replace with CMake presets ([#162](https://github.com/AztecProtocol/barretenberg/issues/162)) ([09db0be](https://github.com/AztecProtocol/barretenberg/commit/09db0be3d09ee12b4b73b03abe8fa4565cdb6660)) * replace `MerkleMembershipConstraint` with`ComputeMerkleRootConstraint` ([#385](https://github.com/AztecProtocol/barretenberg/issues/385)) ([74dbce5](https://github.com/AztecProtocol/barretenberg/commit/74dbce5dfa126ecd6dbda7b758581752f7b6a389)) @@ -541,7 +625,7 @@ * Make the circuit constructors field agnostic so we can check circuits on grumpkin ([#534](https://github.com/AztecProtocol/barretenberg/issues/534)) ([656d794](https://github.com/AztecProtocol/barretenberg/commit/656d7944f94f3da88250f3140838f3e32e9d0174)) * Multithreaded Sumcheck ([#556](https://github.com/AztecProtocol/barretenberg/issues/556)) ([c4094b1](https://github.com/AztecProtocol/barretenberg/commit/c4094b155ba9d8e914c3e6a5b0d7808945b1eeed)) * Optimize memory consumption of pedersen generators ([#413](https://github.com/AztecProtocol/barretenberg/issues/413)) ([d60b16a](https://github.com/AztecProtocol/barretenberg/commit/d60b16a14219fd4bd130ce4537c3e94bfa10128f)) -* Parallelised folding in Gemini ([#550](https://github.com/AztecProtocol/barretenberg/issues/550)) ([3b962d3](https://github.com/AztecProtocol/barretenberg/commit/3b962d372491430871443fd1b95fd9e049e233c8)) +* Parallelized folding in Gemini ([#550](https://github.com/AztecProtocol/barretenberg/issues/550)) ([3b962d3](https://github.com/AztecProtocol/barretenberg/commit/3b962d372491430871443fd1b95fd9e049e233c8)) * Sort includes ([#571](https://github.com/AztecProtocol/barretenberg/issues/571)) ([dfa8736](https://github.com/AztecProtocol/barretenberg/commit/dfa8736136323e62a705066d25bef962a6a0b82d)) * Split plonk and honk tests ([#529](https://github.com/AztecProtocol/barretenberg/issues/529)) ([ba583ff](https://github.com/AztecProtocol/barretenberg/commit/ba583ff00509f636feae7b78304b115e34fc2357)) diff --git a/barretenberg/VERSION b/barretenberg/VERSION index 4c89f3cde3ae..e7de65ca73e5 100644 --- a/barretenberg/VERSION +++ b/barretenberg/VERSION @@ -1 +1 @@ -v0.8.7 x-release-please-version +v0.8.14 x-release-please-version diff --git a/barretenberg/acir_tests/flows/all_cmds.sh b/barretenberg/acir_tests/flows/all_cmds.sh index dda4353fb6f0..c7ee147f6203 100755 --- a/barretenberg/acir_tests/flows/all_cmds.sh +++ b/barretenberg/acir_tests/flows/all_cmds.sh @@ -19,6 +19,8 @@ $BIN verify -k vk -p proof $FLAGS # Check supplemental functions. # Grep to determine success. $BIN contract -k vk $BFLAG -o - | grep "Verification Key Hash" > /dev/null -# Use jq to determine success. -$BIN proof_as_fields -k vk -p proof -o - | jq . > /dev/null -$BIN vk_as_fields -k vk -o - > vk_as_fields | jq . > /dev/null \ No newline at end of file +# Use jq to determine success, and also check result not empty. +OUTPUT=$($BIN proof_as_fields -k vk -p proof -o - | jq .) +[ -n "$OUTPUT" ] || exit 1 +OUTPUT=$($BIN vk_as_fields -k vk -o - | jq .) +[ -n "$OUTPUT" ] || exit 1 \ No newline at end of file diff --git a/barretenberg/acir_tests/run_acir_tests.sh b/barretenberg/acir_tests/run_acir_tests.sh index e495580dd971..d7f5a73aa7d8 100755 --- a/barretenberg/acir_tests/run_acir_tests.sh +++ b/barretenberg/acir_tests/run_acir_tests.sh @@ -7,7 +7,7 @@ set -eu BIN=${BIN:-../cpp/build/bin/bb} FLOW=${FLOW:-prove_and_verify} CRS_PATH=~/.bb-crs -BRANCH=master +BRANCH="master" VERBOSE=${VERBOSE:-} NAMED_TEST=${1:-} diff --git a/barretenberg/cpp/.clangd b/barretenberg/cpp/.clangd index 06f5d0d0590b..e09234d9e7a7 100644 --- a/barretenberg/cpp/.clangd +++ b/barretenberg/cpp/.clangd @@ -1,4 +1,4 @@ -CompileFlags: # Tweak the parse settings +CompileFlags: # Tweak the parse settings Remove: -fconstexpr-ops-limit=* --- # Applies all barretenberg source files @@ -42,7 +42,7 @@ Diagnostics: - misc-non-private-member-variables-in-classes - cppcoreguidelines-non-private-member-variables-in-classes # We have many `for` loops that violate this part of the bounds safety profile - - cppcoreguidelines-pro-bounds-constant-array-index + - cppcoreguidelines-pro-bounds-constant-array-index # Large diff; we often `use` an entire namespace. - google-build-using-namespace # Large diff @@ -59,6 +59,8 @@ Diagnostics: - readability-function-cognitive-complexity # It is often nicer to not be explicit - google-explicit-constructor + # Not honouring. + - cppcoreguidelines-owning-memory --- # this divider is necessary # Disable some checks for Google Test/Bench @@ -69,5 +71,4 @@ Diagnostics: # these checks get triggered by the Google macros Remove: - cppcoreguidelines-avoid-non-const-global-variables - - cppcoreguidelines-owning-memory - - cppcoreguidelines-special-member-functions \ No newline at end of file + - cppcoreguidelines-special-member-functions diff --git a/barretenberg/cpp/.rebuild_patterns b/barretenberg/cpp/.rebuild_patterns index c5fea1fede51..6b4bc87e92c2 100644 --- a/barretenberg/cpp/.rebuild_patterns +++ b/barretenberg/cpp/.rebuild_patterns @@ -1,4 +1,4 @@ -^barretenberg/cpp/.*\\.(cpp|cc|cxx|c\\+\\+|h|hpp|hxx|h\\+\\+|c|h|inl|inc|ipp|tpp|cmake)$ -^barretenberg/cpp/.*CMakeLists\\.txt$ +^barretenberg/cpp/.*\.(cpp|cc|cxx|c\+\+|h|hpp|hxx|h\+\+|c|h|inl|inc|ipp|tpp|cmake)$ +^barretenberg/cpp/.*CMakeLists\.txt$ ^barretenberg/cpp/.*Dockerfile.*$ ^barretenberg/cpp/scripts/ diff --git a/barretenberg/cpp/CMakeLists.txt b/barretenberg/cpp/CMakeLists.txt index 4cd344386f54..f442073ff9d0 100644 --- a/barretenberg/cpp/CMakeLists.txt +++ b/barretenberg/cpp/CMakeLists.txt @@ -6,7 +6,7 @@ cmake_minimum_required(VERSION 3.24) project( Barretenberg DESCRIPTION "BN254 elliptic curve library, and PLONK SNARK prover" - VERSION 0.8.7 # x-release-please-version + VERSION 0.8.14 # x-release-please-version LANGUAGES CXX C ) # Insert version into `bb` config file @@ -124,7 +124,7 @@ if(COVERAGE) message(FATAL_ERROR "Couldn't find ${COV_EXECUTABLE_NAME}") endif() - # Add profiling compile options and disable optimisations + # Add profiling compile options and disable optimizations add_compile_options(-fprofile-instr-generate -fcoverage-mapping -O0) # Add a custom target for creating the report diff --git a/barretenberg/cpp/CMakePresets.json b/barretenberg/cpp/CMakePresets.json index 0b43346e94ac..6bafa8fed7aa 100644 --- a/barretenberg/cpp/CMakePresets.json +++ b/barretenberg/cpp/CMakePresets.json @@ -182,6 +182,35 @@ "cacheVariables": { "MULTITHREADING": "ON" } + }, + { + "name": "xray-1thread", + "displayName": "Build with single-threaded XRay Profiling", + "description": "Build with Clang and enable single-threaded LLVM XRay for profiling", + "generator": "Unix Makefiles", + "inherits": "clang16", + "environment": { + "CFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "CXXFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "LDFLAGS": "-fxray-instrument -fxray-instruction-threshold=10" + }, + "cacheVariables": { + "MULTITHREADING": "OFF" + }, + "binaryDir": "build-xray-1thread" + }, + { + "name": "xray", + "displayName": "Build with multi-threaded XRay Profiling", + "description": "Build with Clang and enable multi-threaded LLVM XRay for profiling", + "generator": "Unix Makefiles", + "inherits": "clang16", + "environment": { + "CFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "CXXFLAGS": "-fxray-instrument -fxray-instruction-threshold=10", + "LDFLAGS": "-fxray-instrument -fxray-instruction-threshold=10" + }, + "binaryDir": "build-xray" } ], "buildPresets": [ @@ -273,6 +302,16 @@ "inheritConfigureEnvironment": true, "jobs": 0, "targets": ["barretenberg.wasm"] + }, + { + "name": "xray-1thread", + "configurePreset": "xray-1thread", + "inherits": "default" + }, + { + "name": "xray", + "configurePreset": "xray", + "inherits": "default" } ], "testPresets": [ diff --git a/barretenberg/cpp/cmake/module.cmake b/barretenberg/cpp/cmake/module.cmake index 27b94b9adc70..996e645e0912 100644 --- a/barretenberg/cpp/cmake/module.cmake +++ b/barretenberg/cpp/cmake/module.cmake @@ -6,11 +6,11 @@ # Scans for all .test.cpp files in a subdirectory, and creates a gtest binary named _tests. # Scans for all .bench.cpp files in a subdirectory, and creates a benchmark binary named _bench. # -# We have to get a bit complicated here, due to the fact CMake will not parallelise the building of object files +# We have to get a bit complicated here, due to the fact CMake will not parallelize the building of object files # between dependent targets, due to the potential of post-build code generation steps etc. # To work around this, we create "object libraries" containing the object files. # Then we declare executables/libraries that are to be built from these object files. -# These assets will only be linked as their dependencies complete, but we can parallelise the compilation at least. +# These assets will only be linked as their dependencies complete, but we can parallelize the compilation at least. # This is an interface library that can be used as an install target to include all header files # encountered by the `barretenberg_module` function. There is probably a better way to do this, diff --git a/barretenberg/cpp/scripts/collect_profile_information.sh b/barretenberg/cpp/scripts/collect_profile_information.sh new file mode 100755 index 000000000000..62757181ac35 --- /dev/null +++ b/barretenberg/cpp/scripts/collect_profile_information.sh @@ -0,0 +1,45 @@ +#!/bin/bash +set -eu + +PRESET=${1:-xray-1thread} # can also be 'xray' +ONLY_PROCESS=${2:-} + +# Move above script dir. +cd $(dirname $0)/.. + +# Configure and build with xray preset. +cmake --preset $PRESET +cmake --build --preset $PRESET + +cd build-$PRESET + +if [ -z "$ONLY_PROCESS" ]; then + # Clear old profile data. + rm -f xray-log.honk_bench_main_simple.* + + # Run benchmark with profiling. + XRAY_OPTIONS="patch_premain=true xray_mode=xray-basic verbosity=1" ./bin/honk_bench_main_simple +fi + +function shorten_cpp_names() { + NO_TEMP='s/<[^<>;]+>//g;' + sed -E '# Multiple rounds of template removal (crude but simple). + '"$NO_TEMP $NO_TEMP $NO_TEMP $NO_TEMP $NO_TEMP $NO_TEMP"' + # Remove problematic trailing const. + s/ const;/;/g; + # Parameter removal. + s/\([^();]*\)/()/g; + # Return value removal. + s/;[^; ]+ /;/g; + # Remove namespaces. + s/[a-zA-Z_][a-zA-Z0-9_]*:://g; + ' +} + +# Process benchmark file. +llvm-xray-16 stack xray-log.honk_bench_main_simple.* \ + --instr_map=./bin/honk_bench_main_simple --stack-format=flame --aggregate-threads --aggregation-type=time --all-stacks \ + | node ../scripts/llvm_xray_stack_flame_corrector.js \ + | shorten_cpp_names \ + | ../scripts/flamegraph.pl > xray.svg +echo "Profiling complete, now you can do e.g. 'scp mainframe:`readlink -f xray.svg` .' on a local terminal and open the SVG in a browser." diff --git a/barretenberg/cpp/scripts/flamegraph.pl b/barretenberg/cpp/scripts/flamegraph.pl new file mode 100755 index 000000000000..d2172b616640 --- /dev/null +++ b/barretenberg/cpp/scripts/flamegraph.pl @@ -0,0 +1,1252 @@ +#!/usr/bin/perl -w +# +# flamegraph.pl flame stack grapher. +# +# This takes stack samples and renders a call graph, allowing hot functions +# and codepaths to be quickly identified. Stack samples can be generated using +# tools such as DTrace, perf, SystemTap, and Instruments. +# +# USAGE: ./flamegraph.pl [options] input.txt > graph.svg +# +# grep funcA input.txt | ./flamegraph.pl [options] > graph.svg +# +# Then open the resulting .svg in a web browser, for interactivity: mouse-over +# frames for info, click to zoom, and ctrl-F to search. +# +# Options are listed in the usage message (--help). +# +# The input is stack frames and sample counts formatted as single lines. Each +# frame in the stack is semicolon separated, with a space and count at the end +# of the line. These can be generated for Linux perf script output using +# stackcollapse-perf.pl, for DTrace using stackcollapse.pl, and for other tools +# using the other stackcollapse programs. Example input: +# +# swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 1 +# +# An optional extra column of counts can be provided to generate a differential +# flame graph of the counts, colored red for more, and blue for less. This +# can be useful when using flame graphs for non-regression testing. +# See the header comment in the difffolded.pl program for instructions. +# +# The input functions can optionally have annotations at the end of each +# function name, following a precedent by some tools (Linux perf's _[k]): +# _[k] for kernel +# _[i] for inlined +# _[j] for jit +# _[w] for waker +# Some of the stackcollapse programs support adding these annotations, eg, +# stackcollapse-perf.pl --kernel --jit. They are used merely for colors by +# some palettes, eg, flamegraph.pl --color=java. +# +# The output flame graph shows relative presence of functions in stack samples. +# The ordering on the x-axis has no meaning; since the data is samples, time +# order of events is not known. The order used sorts function names +# alphabetically. +# +# While intended to process stack samples, this can also process stack traces. +# For example, tracing stacks for memory allocation, or resource usage. You +# can use --title to set the title to reflect the content, and --countname +# to change "samples" to "bytes" etc. +# +# There are a few different palettes, selectable using --color. By default, +# the colors are selected at random (except for differentials). Functions +# called "-" will be printed gray, which can be used for stack separators (eg, +# between user and kernel stacks). +# +# HISTORY +# +# This was inspired by Neelakanth Nadgir's excellent function_call_graph.rb +# program, which visualized function entry and return trace events. As Neel +# wrote: "The output displayed is inspired by Roch's CallStackAnalyzer which +# was in turn inspired by the work on vftrace by Jan Boerhout". See: +# https://blogs.oracle.com/realneel/entry/visualizing_callstacks_via_dtrace_and +# +# Copyright 2016 Netflix, Inc. +# Copyright 2011 Joyent, Inc. All rights reserved. +# Copyright 2011 Brendan Gregg. All rights reserved. +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at docs/cddl1.txt or +# http://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at docs/cddl1.txt. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# +# 11-Oct-2014 Adrien Mahieux Added zoom. +# 21-Nov-2013 Shawn Sterling Added consistent palette file option +# 17-Mar-2013 Tim Bunce Added options and more tunables. +# 15-Dec-2011 Dave Pacheco Support for frames with whitespace. +# 10-Sep-2011 Brendan Gregg Created this. + +use strict; + +use Getopt::Long; + +use open qw(:std :utf8); + +# tunables +my $encoding; +my $fonttype = "Verdana"; +my $imagewidth = 1200; # max width, pixels +my $frameheight = 16; # max height is dynamic +my $fontsize = 12; # base text size +my $fontwidth = 0.59; # avg width relative to fontsize +my $minwidth = 0.1; # min function width, pixels +my $nametype = "Function:"; # what are the names in the data? +my $countname = "samples"; # what are the counts in the data? +my $colors = "hot"; # color theme +my $bgcolors = ""; # background color theme +my $nameattrfile; # file holding function attributes +my $timemax; # (override the) sum of the counts +my $factor = 1; # factor to scale counts by +my $hash = 0; # color by function name +my $palette = 0; # if we use consistent palettes (default off) +my %palette_map; # palette map hash +my $pal_file = "palette.map"; # palette map file name +my $stackreverse = 0; # reverse stack order, switching merge end +my $inverted = 0; # icicle graph +my $flamechart = 0; # produce a flame chart (sort by time, do not merge stacks) +my $negate = 0; # switch differential hues +my $titletext = ""; # centered heading +my $titledefault = "Flame Graph"; # overwritten by --title +my $titleinverted = "Icicle Graph"; # " " +my $searchcolor = "rgb(230,0,230)"; # color for search highlighting +my $notestext = ""; # embedded notes in SVG +my $subtitletext = ""; # second level title (optional) +my $help = 0; + +sub usage { + die < outfile.svg\n + --title TEXT # change title text + --subtitle TEXT # second level title (optional) + --width NUM # width of image (default 1200) + --height NUM # height of each frame (default 16) + --minwidth NUM # omit smaller functions (default 0.1 pixels) + --fonttype FONT # font type (default "Verdana") + --fontsize NUM # font size (default 12) + --countname TEXT # count type label (default "samples") + --nametype TEXT # name type label (default "Function:") + --colors PALETTE # set color palette. choices are: hot (default), mem, + # io, wakeup, chain, java, js, perl, red, green, blue, + # aqua, yellow, purple, orange + --bgcolors COLOR # set background colors. gradient choices are yellow + # (default), blue, green, grey; flat colors use "#rrggbb" + --hash # colors are keyed by function name hash + --cp # use consistent palette (palette.map) + --reverse # generate stack-reversed flame graph + --inverted # icicle graph + --flamechart # produce a flame chart (sort by time, do not merge stacks) + --negate # switch differential hues (blue<->red) + --notes TEXT # add notes comment in SVG (for debugging) + --help # this message + + eg, + $0 --title="Flame Graph: malloc()" trace.txt > graph.svg +USAGE_END +} + +GetOptions( + 'fonttype=s' => \$fonttype, + 'width=i' => \$imagewidth, + 'height=i' => \$frameheight, + 'encoding=s' => \$encoding, + 'fontsize=f' => \$fontsize, + 'fontwidth=f' => \$fontwidth, + 'minwidth=f' => \$minwidth, + 'title=s' => \$titletext, + 'subtitle=s' => \$subtitletext, + 'nametype=s' => \$nametype, + 'countname=s' => \$countname, + 'nameattr=s' => \$nameattrfile, + 'total=s' => \$timemax, + 'factor=f' => \$factor, + 'colors=s' => \$colors, + 'bgcolors=s' => \$bgcolors, + 'hash' => \$hash, + 'cp' => \$palette, + 'reverse' => \$stackreverse, + 'inverted' => \$inverted, + 'flamechart' => \$flamechart, + 'negate' => \$negate, + 'notes=s' => \$notestext, + 'help' => \$help, +) or usage(); +$help && usage(); + +# internals +my $ypad1 = $fontsize * 3; # pad top, include title +my $ypad2 = $fontsize * 2 + 10; # pad bottom, include labels +my $ypad3 = $fontsize * 2; # pad top, include subtitle (optional) +my $xpad = 10; # pad lefm and right +my $framepad = 1; # vertical padding for frames +my $depthmax = 0; +my %Events; +my %nameattr; + +if ($flamechart && $titletext eq "") { + $titletext = "Flame Chart"; +} + +if ($titletext eq "") { + unless ($inverted) { + $titletext = $titledefault; + } else { + $titletext = $titleinverted; + } +} + +if ($nameattrfile) { + # The name-attribute file format is a function name followed by a tab then + # a sequence of tab separated name=value pairs. + open my $attrfh, $nameattrfile or die "Can't read $nameattrfile: $!\n"; + while (<$attrfh>) { + chomp; + my ($funcname, $attrstr) = split /\t/, $_, 2; + die "Invalid format in $nameattrfile" unless defined $attrstr; + $nameattr{$funcname} = { map { split /=/, $_, 2 } split /\t/, $attrstr }; + } +} + +if ($notestext =~ /[<>]/) { + die "Notes string can't contain < or >" +} + +# background colors: +# - yellow gradient: default (hot, java, js, perl) +# - green gradient: mem +# - blue gradient: io, wakeup, chain +# - gray gradient: flat colors (red, green, blue, ...) +if ($bgcolors eq "") { + # choose a default + if ($colors eq "mem") { + $bgcolors = "green"; + } elsif ($colors =~ /^(io|wakeup|chain)$/) { + $bgcolors = "blue"; + } elsif ($colors =~ /^(red|green|blue|aqua|yellow|purple|orange)$/) { + $bgcolors = "grey"; + } else { + $bgcolors = "yellow"; + } +} +my ($bgcolor1, $bgcolor2); +if ($bgcolors eq "yellow") { + $bgcolor1 = "#eeeeee"; # background color gradient start + $bgcolor2 = "#eeeeb0"; # background color gradient stop +} elsif ($bgcolors eq "blue") { + $bgcolor1 = "#eeeeee"; $bgcolor2 = "#e0e0ff"; +} elsif ($bgcolors eq "green") { + $bgcolor1 = "#eef2ee"; $bgcolor2 = "#e0ffe0"; +} elsif ($bgcolors eq "grey") { + $bgcolor1 = "#f8f8f8"; $bgcolor2 = "#e8e8e8"; +} elsif ($bgcolors =~ /^#......$/) { + $bgcolor1 = $bgcolor2 = $bgcolors; +} else { + die "Unrecognized bgcolor option \"$bgcolors\"" +} + +# SVG functions +{ package SVG; + sub new { + my $class = shift; + my $self = {}; + bless ($self, $class); + return $self; + } + + sub header { + my ($self, $w, $h) = @_; + my $enc_attr = ''; + if (defined $encoding) { + $enc_attr = qq{ encoding="$encoding"}; + } + $self->{svg} .= < + + + + +SVG + } + + sub include { + my ($self, $content) = @_; + $self->{svg} .= $content; + } + + sub colorAllocate { + my ($self, $r, $g, $b) = @_; + return "rgb($r,$g,$b)"; + } + + sub group_start { + my ($self, $attr) = @_; + + my @g_attr = map { + exists $attr->{$_} ? sprintf(qq/$_="%s"/, $attr->{$_}) : () + } qw(id class); + push @g_attr, $attr->{g_extra} if $attr->{g_extra}; + if ($attr->{href}) { + my @a_attr; + push @a_attr, sprintf qq/xlink:href="%s"/, $attr->{href} if $attr->{href}; + # default target=_top else links will open within SVG + push @a_attr, sprintf qq/target="%s"/, $attr->{target} || "_top"; + push @a_attr, $attr->{a_extra} if $attr->{a_extra}; + $self->{svg} .= sprintf qq/\n/, join(' ', (@a_attr, @g_attr)); + } else { + $self->{svg} .= sprintf qq/\n/, join(' ', @g_attr); + } + + $self->{svg} .= sprintf qq/%s<\/title>/, $attr->{title} + if $attr->{title}; # should be first element within g container + } + + sub group_end { + my ($self, $attr) = @_; + $self->{svg} .= $attr->{href} ? qq/<\/a>\n/ : qq/<\/g>\n/; + } + + sub filledRectangle { + my ($self, $x1, $y1, $x2, $y2, $fill, $extra) = @_; + $x1 = sprintf "%0.1f", $x1; + $x2 = sprintf "%0.1f", $x2; + my $w = sprintf "%0.1f", $x2 - $x1; + my $h = sprintf "%0.1f", $y2 - $y1; + $extra = defined $extra ? $extra : ""; + $self->{svg} .= qq/\n/; + } + + sub stringTTF { + my ($self, $id, $x, $y, $str, $extra) = @_; + $x = sprintf "%0.2f", $x; + $id = defined $id ? qq/id="$id"/ : ""; + $extra ||= ""; + $self->{svg} .= qq/$str<\/text>\n/; + } + + sub svg { + my $self = shift; + return "$self->{svg}\n"; + } + 1; +} + +sub namehash { + # Generate a vector hash for the name string, weighting early over + # later characters. We want to pick the same colors for function + # names across different flame graphs. + my $name = shift; + my $vector = 0; + my $weight = 1; + my $max = 1; + my $mod = 10; + # if module name present, trunc to 1st char + $name =~ s/.(.*?)`//; + foreach my $c (split //, $name) { + my $i = (ord $c) % $mod; + $vector += ($i / ($mod++ - 1)) * $weight; + $max += 1 * $weight; + $weight *= 0.70; + last if $mod > 12; + } + return (1 - $vector / $max) +} + +sub color { + my ($type, $hash, $name) = @_; + my ($v1, $v2, $v3); + + if ($hash) { + $v1 = namehash($name); + $v2 = $v3 = namehash(scalar reverse $name); + } else { + $v1 = rand(1); + $v2 = rand(1); + $v3 = rand(1); + } + + # theme palettes + if (defined $type and $type eq "hot") { + my $r = 205 + int(50 * $v3); + my $g = 0 + int(230 * $v1); + my $b = 0 + int(55 * $v2); + return "rgb($r,$g,$b)"; + } + if (defined $type and $type eq "mem") { + my $r = 0; + my $g = 190 + int(50 * $v2); + my $b = 0 + int(210 * $v1); + return "rgb($r,$g,$b)"; + } + if (defined $type and $type eq "io") { + my $r = 80 + int(60 * $v1); + my $g = $r; + my $b = 190 + int(55 * $v2); + return "rgb($r,$g,$b)"; + } + + # multi palettes + if (defined $type and $type eq "java") { + # Handle both annotations (_[j], _[i], ...; which are + # accurate), as well as input that lacks any annotations, as + # best as possible. Without annotations, we get a little hacky + # and match on java|org|com, etc. + if ($name =~ m:_\[j\]$:) { # jit annotation + $type = "green"; + } elsif ($name =~ m:_\[i\]$:) { # inline annotation + $type = "aqua"; + } elsif ($name =~ m:^L?(java|javax|jdk|net|org|com|io|sun)/:) { # Java + $type = "green"; + } elsif ($name =~ /:::/) { # Java, typical perf-map-agent method separator + $type = "green"; + } elsif ($name =~ /::/) { # C++ + $type = "yellow"; + } elsif ($name =~ m:_\[k\]$:) { # kernel annotation + $type = "orange"; + } elsif ($name =~ /::/) { # C++ + $type = "yellow"; + } else { # system + $type = "red"; + } + # fall-through to color palettes + } + if (defined $type and $type eq "perl") { + if ($name =~ /::/) { # C++ + $type = "yellow"; + } elsif ($name =~ m:Perl: or $name =~ m:\.pl:) { # Perl + $type = "green"; + } elsif ($name =~ m:_\[k\]$:) { # kernel + $type = "orange"; + } else { # system + $type = "red"; + } + # fall-through to color palettes + } + if (defined $type and $type eq "js") { + # Handle both annotations (_[j], _[i], ...; which are + # accurate), as well as input that lacks any annotations, as + # best as possible. Without annotations, we get a little hacky, + # and match on a "/" with a ".js", etc. + if ($name =~ m:_\[j\]$:) { # jit annotation + if ($name =~ m:/:) { + $type = "green"; # source + } else { + $type = "aqua"; # builtin + } + } elsif ($name =~ /::/) { # C++ + $type = "yellow"; + } elsif ($name =~ m:/.*\.js:) { # JavaScript (match "/" in path) + $type = "green"; + } elsif ($name =~ m/:/) { # JavaScript (match ":" in builtin) + $type = "aqua"; + } elsif ($name =~ m/^ $/) { # Missing symbol + $type = "green"; + } elsif ($name =~ m:_\[k\]:) { # kernel + $type = "orange"; + } else { # system + $type = "red"; + } + # fall-through to color palettes + } + if (defined $type and $type eq "wakeup") { + $type = "aqua"; + # fall-through to color palettes + } + if (defined $type and $type eq "chain") { + if ($name =~ m:_\[w\]:) { # waker + $type = "aqua" + } else { # off-CPU + $type = "blue"; + } + # fall-through to color palettes + } + + # color palettes + if (defined $type and $type eq "red") { + my $r = 200 + int(55 * $v1); + my $x = 50 + int(80 * $v1); + return "rgb($r,$x,$x)"; + } + if (defined $type and $type eq "green") { + my $g = 200 + int(55 * $v1); + my $x = 50 + int(60 * $v1); + return "rgb($x,$g,$x)"; + } + if (defined $type and $type eq "blue") { + my $b = 205 + int(50 * $v1); + my $x = 80 + int(60 * $v1); + return "rgb($x,$x,$b)"; + } + if (defined $type and $type eq "yellow") { + my $x = 175 + int(55 * $v1); + my $b = 50 + int(20 * $v1); + return "rgb($x,$x,$b)"; + } + if (defined $type and $type eq "purple") { + my $x = 190 + int(65 * $v1); + my $g = 80 + int(60 * $v1); + return "rgb($x,$g,$x)"; + } + if (defined $type and $type eq "aqua") { + my $r = 50 + int(60 * $v1); + my $g = 165 + int(55 * $v1); + my $b = 165 + int(55 * $v1); + return "rgb($r,$g,$b)"; + } + if (defined $type and $type eq "orange") { + my $r = 190 + int(65 * $v1); + my $g = 90 + int(65 * $v1); + return "rgb($r,$g,0)"; + } + + return "rgb(0,0,0)"; +} + +sub color_scale { + my ($value, $max) = @_; + my ($r, $g, $b) = (255, 255, 255); + $value = -$value if $negate; + if ($value > 0) { + $g = $b = int(210 * ($max - $value) / $max); + } elsif ($value < 0) { + $r = $g = int(210 * ($max + $value) / $max); + } + return "rgb($r,$g,$b)"; +} + +sub color_map { + my ($colors, $func) = @_; + if (exists $palette_map{$func}) { + return $palette_map{$func}; + } else { + $palette_map{$func} = color($colors, $hash, $func); + return $palette_map{$func}; + } +} + +sub write_palette { + open(FILE, ">$pal_file"); + foreach my $key (sort keys %palette_map) { + print FILE $key."->".$palette_map{$key}."\n"; + } + close(FILE); +} + +sub read_palette { + if (-e $pal_file) { + open(FILE, $pal_file) or die "can't open file $pal_file: $!"; + while ( my $line = ) { + chomp($line); + (my $key, my $value) = split("->",$line); + $palette_map{$key}=$value; + } + close(FILE) + } +} + +my %Node; # Hash of merged frame data +my %Tmp; + +# flow() merges two stacks, storing the merged frames and value data in %Node. +sub flow { + my ($last, $this, $v, $d) = @_; + + my $len_a = @$last - 1; + my $len_b = @$this - 1; + + my $i = 0; + my $len_same; + for (; $i <= $len_a; $i++) { + last if $i > $len_b; + last if $last->[$i] ne $this->[$i]; + } + $len_same = $i; + + for ($i = $len_a; $i >= $len_same; $i--) { + my $k = "$last->[$i];$i"; + # a unique ID is constructed from "func;depth;etime"; + # func-depth isn't unique, it may be repeated later. + $Node{"$k;$v"}->{stime} = delete $Tmp{$k}->{stime}; + if (defined $Tmp{$k}->{delta}) { + $Node{"$k;$v"}->{delta} = delete $Tmp{$k}->{delta}; + } + delete $Tmp{$k}; + } + + for ($i = $len_same; $i <= $len_b; $i++) { + my $k = "$this->[$i];$i"; + $Tmp{$k}->{stime} = $v; + if (defined $d) { + $Tmp{$k}->{delta} += $i == $len_b ? $d : 0; + } + } + + return $this; +} + +# parse input +my @Data; +my @SortedData; +my $last = []; +my $time = 0; +my $delta = undef; +my $ignored = 0; +my $line; +my $maxdelta = 1; + +# reverse if needed +foreach (<>) { + chomp; + $line = $_; + if ($stackreverse) { + # there may be an extra samples column for differentials + # XXX todo: redo these REs as one. It's repeated below. + my($stack, $samples) = (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + my $samples2 = undef; + if ($stack =~ /^(.*)\s+?(\d+(?:\.\d*)?)$/) { + $samples2 = $samples; + ($stack, $samples) = $stack =~ (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + unshift @Data, join(";", reverse split(";", $stack)) . " $samples $samples2"; + } else { + unshift @Data, join(";", reverse split(";", $stack)) . " $samples"; + } + } else { + unshift @Data, $line; + } +} + +if ($flamechart) { + # In flame chart mode, just reverse the data so time moves from left to right. + @SortedData = reverse @Data; +} else { + @SortedData = sort @Data; +} + +# process and merge frames +foreach (@SortedData) { + chomp; + # process: folded_stack count + # eg: func_a;func_b;func_c 31 + my ($stack, $samples) = (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + unless (defined $samples and defined $stack) { + ++$ignored; + next; + } + + # there may be an extra samples column for differentials: + my $samples2 = undef; + if ($stack =~ /^(.*)\s+?(\d+(?:\.\d*)?)$/) { + $samples2 = $samples; + ($stack, $samples) = $stack =~ (/^(.*)\s+?(\d+(?:\.\d*)?)$/); + } + $delta = undef; + if (defined $samples2) { + $delta = $samples2 - $samples; + $maxdelta = abs($delta) if abs($delta) > $maxdelta; + } + + # for chain graphs, annotate waker frames with "_[w]", for later + # coloring. This is a hack, but has a precedent ("_[k]" from perf). + if ($colors eq "chain") { + my @parts = split ";--;", $stack; + my @newparts = (); + $stack = shift @parts; + $stack .= ";--;"; + foreach my $part (@parts) { + $part =~ s/;/_[w];/g; + $part .= "_[w]"; + push @newparts, $part; + } + $stack .= join ";--;", @parts; + } + + # merge frames and populate %Node: + $last = flow($last, [ '', split ";", $stack ], $time, $delta); + + if (defined $samples2) { + $time += $samples2; + } else { + $time += $samples; + } +} +flow($last, [], $time, $delta); + +warn "Ignored $ignored lines with invalid format\n" if $ignored; +unless ($time) { + warn "ERROR: No stack counts found\n"; + my $im = SVG->new(); + # emit an error message SVG, for tools automating flamegraph use + my $imageheight = $fontsize * 5; + $im->header($imagewidth, $imageheight); + $im->stringTTF(undef, int($imagewidth / 2), $fontsize * 2, + "ERROR: No valid input provided to flamegraph.pl."); + print $im->svg; + exit 2; +} +if ($timemax and $timemax < $time) { + warn "Specified --total $timemax is less than actual total $time, so ignored\n" + if $timemax/$time > 0.02; # only warn is significant (e.g., not rounding etc) + undef $timemax; +} +$timemax ||= $time; + +my $widthpertime = ($imagewidth - 2 * $xpad) / $timemax; +my $minwidth_time = $minwidth / $widthpertime; + +# prune blocks that are too narrow and determine max depth +while (my ($id, $node) = each %Node) { + my ($func, $depth, $etime) = split ";", $id; + my $stime = $node->{stime}; + die "missing start for $id" if not defined $stime; + + if (($etime-$stime) < $minwidth_time) { + delete $Node{$id}; + next; + } + $depthmax = $depth if $depth > $depthmax; +} + +# draw canvas, and embed interactive JavaScript program +my $imageheight = (($depthmax + 1) * $frameheight) + $ypad1 + $ypad2; +$imageheight += $ypad3 if $subtitletext ne ""; +my $titlesize = $fontsize + 5; +my $im = SVG->new(); +my ($black, $vdgrey, $dgrey) = ( + $im->colorAllocate(0, 0, 0), + $im->colorAllocate(160, 160, 160), + $im->colorAllocate(200, 200, 200), + ); +$im->header($imagewidth, $imageheight); +my $inc = < + + + + + + + +INC +$im->include($inc); +$im->filledRectangle(0, 0, $imagewidth, $imageheight, 'url(#background)'); +$im->stringTTF("title", int($imagewidth / 2), $fontsize * 2, $titletext); +$im->stringTTF("subtitle", int($imagewidth / 2), $fontsize * 4, $subtitletext) if $subtitletext ne ""; +$im->stringTTF("details", $xpad, $imageheight - ($ypad2 / 2), " "); +$im->stringTTF("unzoom", $xpad, $fontsize * 2, "Reset Zoom", 'class="hide"'); +$im->stringTTF("search", $imagewidth - $xpad - 100, $fontsize * 2, "Search"); +$im->stringTTF("ignorecase", $imagewidth - $xpad - 16, $fontsize * 2, "ic"); +$im->stringTTF("matched", $imagewidth - $xpad - 100, $imageheight - ($ypad2 / 2), " "); + +if ($palette) { + read_palette(); +} + +# draw frames +$im->group_start({id => "frames"}); +while (my ($id, $node) = each %Node) { + my ($func, $depth, $etime) = split ";", $id; + my $stime = $node->{stime}; + my $delta = $node->{delta}; + + $etime = $timemax if $func eq "" and $depth == 0; + + my $x1 = $xpad + $stime * $widthpertime; + my $x2 = $xpad + $etime * $widthpertime; + my ($y1, $y2); + unless ($inverted) { + $y1 = $imageheight - $ypad2 - ($depth + 1) * $frameheight + $framepad; + $y2 = $imageheight - $ypad2 - $depth * $frameheight; + } else { + $y1 = $ypad1 + $depth * $frameheight; + $y2 = $ypad1 + ($depth + 1) * $frameheight - $framepad; + } + + my $samples = sprintf "%.0f", ($etime - $stime) * $factor; + (my $samples_txt = $samples) # add commas per perlfaq5 + =~ s/(^[-+]?\d+?(?=(?>(?:\d{3})+)(?!\d))|\G\d{3}(?=\d))/$1,/g; + + my $info; + if ($func eq "" and $depth == 0) { + $info = "all ($samples_txt $countname, 100%)"; + } else { + my $pct = sprintf "%.2f", ((100 * $samples) / ($timemax * $factor)); + my $escaped_func = $func; + # clean up SVG breaking characters: + $escaped_func =~ s/&/&/g; + $escaped_func =~ s//>/g; + $escaped_func =~ s/"/"/g; + $escaped_func =~ s/_\[[kwij]\]$//; # strip any annotation + unless (defined $delta) { + $info = "$escaped_func ($samples_txt $countname, $pct%)"; + } else { + my $d = $negate ? -$delta : $delta; + my $deltapct = sprintf "%.2f", ((100 * $d) / ($timemax * $factor)); + $deltapct = $d > 0 ? "+$deltapct" : $deltapct; + $info = "$escaped_func ($samples_txt $countname, $pct%; $deltapct%)"; + } + } + + my $nameattr = { %{ $nameattr{$func}||{} } }; # shallow clone + $nameattr->{title} ||= $info; + $im->group_start($nameattr); + + my $color; + if ($func eq "--") { + $color = $vdgrey; + } elsif ($func eq "-") { + $color = $dgrey; + } elsif (defined $delta) { + $color = color_scale($delta, $maxdelta); + } elsif ($palette) { + $color = color_map($colors, $func); + } else { + $color = color($colors, $hash, $func); + } + $im->filledRectangle($x1, $y1, $x2, $y2, $color, 'rx="2" ry="2"'); + + my $chars = int( ($x2 - $x1) / ($fontsize * $fontwidth)); + my $text = ""; + if ($chars >= 3) { # room for one char plus two dots + $func =~ s/_\[[kwij]\]$//; # strip any annotation + $text = substr $func, 0, $chars; + substr($text, -2, 2) = ".." if $chars < length $func; + $text =~ s/&/&/g; + $text =~ s//>/g; + } + $im->stringTTF(undef, $x1 + 3, 3 + ($y1 + $y2) / 2, $text); + + $im->group_end($nameattr); +} +$im->group_end(); + +print $im->svg; + +if ($palette) { + write_palette(); +} + +# vim: ts=8 sts=8 sw=8 noexpandtab diff --git a/barretenberg/cpp/scripts/llvm_xray_stack_flame_corrector.js b/barretenberg/cpp/scripts/llvm_xray_stack_flame_corrector.js new file mode 100644 index 000000000000..97323d4cb636 --- /dev/null +++ b/barretenberg/cpp/scripts/llvm_xray_stack_flame_corrector.js @@ -0,0 +1,183 @@ +// Corrects LLVM-XRAY stack traces to properly line up. +// Otherwise, there is a weird offset in each stack level that does not correspond to any function call. +// In the public domain. +// Conversion of public domain https://github.com/DerickEddington/corrector_of_llvm_xray_stack_flame +class Record { + constructor(node_path = [], attribute = BigInt(0)) { + this.node_path = node_path; + this.attribute = attribute; + } + + static fromString(line) { + const components = line.split(";"); + if (components.length >= 2) { + const attribute = BigInt(components.pop().trim()); + return new Record(components, attribute); + } else { + throw new Error("invalid flame format line"); + } + } + + toString() { + // To be reversed in-place + const np = this.node_path.map((x) => x); + return `${np.reverse().join(";")}; ${this.attribute}`; + } +} + +const Kind = { + LEAF: "Leaf", + BRANCH: "Branch", +}; + +class Node { + constructor() { + this.kind = Kind.LEAF; + this.attribute = null; + this.children = new Map(); + } + + correctForChild(child) { + if (this.attribute !== null) { + this.attribute -= child.attribute; + } + } + + child(name) { + if (this.kind === Kind.LEAF) { + this.kind = Kind.BRANCH; + this.children = new Map(); + return this.child(name); + } else { + if (!this.children.has(name)) { + this.children.set(name, new Node()); + } + return this.children.get(name); + } + } + + forEachChild(func) { + for (let child of this.children.values()) { + func(this, child); + } + } +} + +class Tree { + constructor() { + this.roots = new Node(); + this.original_order = []; + } +} + +class BadTree extends Tree { + constructor() { + super(); + } + + static fromIterator(records) { + let tree = new BadTree(); + for (let record of records) { + tree.extend(record); + } + return tree; + } + + extend(record) { + let parent = this.roots; + const [lastComponent, ...pathPrefix] = record.node_path.reverse(); + for (let component of pathPrefix.reverse()) { + parent = parent.child(component); + } + const lastNode = parent.child(lastComponent); + if (lastNode.attribute === null) { + lastNode.attribute = record.attribute; + this.original_order.push({ record, node: lastNode }); + } else { + lastNode.attribute += record.attribute; + } + } + + correct() { + const recur = (parent, child) => { + parent.correctForChild(child); + child.forEachChild(recur); + }; + this.roots.forEachChild((_, root) => root.forEachChild(recur)); + return new GoodTree(this); + } +} + +class GoodTree extends Tree { + constructor(tree) { + super(); + this.roots = tree.roots; + this.original_order = tree.original_order; + } + + *iter() { + for (let ordRecord of this.original_order) { + const { record, node } = ordRecord; + const originalNodePath = record.node_path; + const possiblyCorrectedAttribute = node.attribute; + yield new Record(originalNodePath, possiblyCorrectedAttribute); + } + } + + async dump() { + let output = []; + for (let record of this.iter()) { + output.push(`${record.toString()}\n`); + } + return output; + } +} + +async function correctStackData(input) { + const inputRecords = input.map((line) => Record.fromString(line)); + + const badTree = BadTree.fromIterator(inputRecords); + const goodTree = badTree.correct(); + return await goodTree.dump(); +} + +async function test() { + const result = await correctStackData([ + "thread1;main; 5925054742", + "thread1;main;f2; 5925051360", + "thread1;main;f2;busy; 5925047168", + "thread1;main; 5941982261", + "thread1;main;f1; 5941978880", + "thread1;main;f1;busy; 5941971904", + "thread1;main; 5930717973", + "thread1;main;busy; 5930714592", + ]); + const expected = [ + "thread1;main; 10144\n", + "thread1;main;f2; 4192\n", + "thread1;main;f2;busy; 5925047168\n", + "thread1;main;f1; 6976\n", + "thread1;main;f1;busy; 5941971904\n", + "thread1;main;busy; 5930714592\n", + ]; + if (JSON.stringify(result) !== JSON.stringify(expected)) { + throw new Error("test fail"); + } + console.log("test pass"); +} + +async function main() { + // Read standard input + const inputLines = await new Promise((resolve) => { + let data = ""; + process.stdin + .on("data", (chunk) => (data += chunk)) + .on("end", () => resolve(data.split("\n").filter((line) => line))); + }); + for (const line of await correctStackData(inputLines)) { + process.stdout.write(line); + } +} + +// test(); +main(); diff --git a/barretenberg/cpp/src/CMakeLists.txt b/barretenberg/cpp/src/CMakeLists.txt index 51ecf4fabbd3..8addf06ba2ed 100644 --- a/barretenberg/cpp/src/CMakeLists.txt +++ b/barretenberg/cpp/src/CMakeLists.txt @@ -28,7 +28,7 @@ if(CMAKE_CXX_COMPILER_ID MATCHES "GNU") endif() # We enable -O1 level optimsations, even when compiling debug wasm, otherwise we get "local count too large" at runtime. -# We prioritise reducing size of final artefacts in release with -Oz. +# We prioritise reducing size of final artifacts in release with -Oz. if(WASM) set(CMAKE_CXX_FLAGS_DEBUG "-O1 -g") set(CMAKE_C_FLAGS_DEBUG "-O1 -g") @@ -83,7 +83,7 @@ endif() include(GNUInstallDirs) # For this library we include everything but the env and wasi modules, as it is the responsibility of the -# consumer of this library to define how and in what environment its artefact will run. +# consumer of this library to define how and in what environment its artifact will run. # libbarretenberg + libwasi = a wasi "reactor" that implements it's own env (e.g. logstr), e.g. barretenberg.wasm. # libbarretenberg + env = a wasi "command" that expects a full wasi runtime (e.g. wasmtime), e.g. test binaries. message(STATUS "Compiling all-in-one barretenberg archive") @@ -126,7 +126,7 @@ add_library( if(WASM) # With binaryen installed, it seems its wasm backend optimiser gets invoked automatically. # Due to either a bug in the optimiser, or non-standards compliant c++ in crypto/aes, tests start failing with - # -O3 level optimisations. We force down to -O2 for current workaround. + # -O3 level optimizations. We force down to -O2 for current workaround. # TODO: Time has passed, check if this is still needed. # UPDATE: Uninstall binaryen and any need downstream. set(CMAKE_CXX_FLAGS_RELEASE "-O2") diff --git a/barretenberg/cpp/src/barretenberg/bb/file_io.hpp b/barretenberg/cpp/src/barretenberg/bb/file_io.hpp index 09009ebbd2b2..31796e57ffa3 100644 --- a/barretenberg/cpp/src/barretenberg/bb/file_io.hpp +++ b/barretenberg/cpp/src/barretenberg/bb/file_io.hpp @@ -1,9 +1,10 @@ #pragma once #include #include +#include #include -inline std::vector read_file(const std::string& filename) +inline std::vector read_file(const std::string& filename, size_t bytes = 0) { // Open the file in binary mode and move to the end. std::ifstream file(filename, std::ios::binary | std::ios::ate); @@ -12,7 +13,7 @@ inline std::vector read_file(const std::string& filename) } // Get the file size. - std::streamsize size = file.tellg(); + std::streamsize size = bytes == 0 ? (std::streamsize)file.tellg() : (std::streamsize)bytes; if (size <= 0) { throw std::runtime_error("File is empty or there's an error reading it: " + filename); } diff --git a/barretenberg/cpp/src/barretenberg/bb/get_crs.hpp b/barretenberg/cpp/src/barretenberg/bb/get_crs.hpp index b4269d3ad3d3..1c205f2f3e88 100644 --- a/barretenberg/cpp/src/barretenberg/bb/get_crs.hpp +++ b/barretenberg/cpp/src/barretenberg/bb/get_crs.hpp @@ -65,7 +65,7 @@ inline std::vector get_g1_data(const std::file } if (size >= num_points) { vinfo("using cached crs at: ", path); - auto data = read_file(path / "g1.dat"); + auto data = read_file(path / "g1.dat", 28 + num_points * 64); auto points = std::vector(num_points); auto size_of_points_in_bytes = num_points * 64; barretenberg::srs::IO::read_affine_elements_from_buffer( diff --git a/barretenberg/cpp/src/barretenberg/bb/main.cpp b/barretenberg/cpp/src/barretenberg/bb/main.cpp index 81c4a2bfb49a..169dc6a10f49 100644 --- a/barretenberg/cpp/src/barretenberg/bb/main.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/main.cpp @@ -13,17 +13,29 @@ #include using namespace barretenberg; -// Transcript downloading code only supports fetching and parsing the first transcript file. -const uint32_t MAX_CIRCUIT_SIZE = 1 << 22; std::string CRS_PATH = "./crs"; bool verbose = false; -void init() +acir_proofs::AcirComposer init(acir_format::acir_format& constraint_system) { + acir_proofs::AcirComposer acir_composer(0, verbose); + acir_composer.create_circuit(constraint_system); + auto subgroup_size = acir_composer.get_circuit_subgroup_size(); + // Must +1! - auto g1_data = get_g1_data(CRS_PATH, MAX_CIRCUIT_SIZE + 1); + auto g1_data = get_g1_data(CRS_PATH, subgroup_size + 1); auto g2_data = get_g2_data(CRS_PATH); srs::init_crs_factory(g1_data, g2_data); + + return acir_composer; +} + +acir_proofs::AcirComposer init() +{ + acir_proofs::AcirComposer acir_composer(0, verbose); + auto g2_data = get_g2_data(CRS_PATH); + srs::init_crs_factory({}, g2_data); + return acir_composer; } acir_format::WitnessVector get_witness(std::string const& witness_path) @@ -53,11 +65,12 @@ acir_format::acir_format get_constraint_system(std::string const& bytecode_path) */ bool proveAndVerify(const std::string& bytecodePath, const std::string& witnessPath, bool recursive) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); auto witness = get_witness(witnessPath); - auto proof = acir_composer->create_proof(srs::get_crs_factory(), constraint_system, witness, recursive); - auto verified = acir_composer->verify_proof(proof, recursive); + auto acir_composer = init(constraint_system); + + auto proof = acir_composer.create_proof(constraint_system, witness, recursive); + auto verified = acir_composer.verify_proof(proof, recursive); vinfo("verified: ", verified); return verified; @@ -80,10 +93,10 @@ void prove(const std::string& bytecodePath, bool recursive, const std::string& outputPath) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); auto witness = get_witness(witnessPath); - auto proof = acir_composer->create_proof(srs::get_crs_factory(), constraint_system, witness, recursive); + auto acir_composer = init(constraint_system); + auto proof = acir_composer.create_proof(constraint_system, witness, recursive); if (outputPath == "-") { writeRawBytesToStdout(proof); @@ -104,10 +117,9 @@ void prove(const std::string& bytecodePath, */ void gateCount(const std::string& bytecodePath) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); - acir_composer->create_circuit(constraint_system); - auto gate_count = acir_composer->get_total_circuit_size(); + auto acir_composer = init(constraint_system); + auto gate_count = acir_composer.get_total_circuit_size(); writeUint64AsRawBytesToStdout(static_cast(gate_count)); vinfo("gate count: ", gate_count); @@ -131,10 +143,10 @@ void gateCount(const std::string& bytecodePath) */ bool verify(const std::string& proof_path, bool recursive, const std::string& vk_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + auto acir_composer = init(); auto vk_data = from_buffer(read_file(vk_path)); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); - auto verified = acir_composer->verify_proof(read_file(proof_path), recursive); + acir_composer.load_verification_key(std::move(vk_data)); + auto verified = acir_composer.verify_proof(read_file(proof_path), recursive); vinfo("verified: ", verified); @@ -153,10 +165,10 @@ bool verify(const std::string& proof_path, bool recursive, const std::string& vk */ void writeVk(const std::string& bytecodePath, const std::string& outputPath) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); auto constraint_system = get_constraint_system(bytecodePath); - acir_composer->init_proving_key(srs::get_crs_factory(), constraint_system); - auto vk = acir_composer->init_verification_key(); + auto acir_composer = init(constraint_system); + acir_composer.init_proving_key(constraint_system); + auto vk = acir_composer.init_verification_key(); auto serialized_vk = to_buffer(*vk); if (outputPath == "-") { writeRawBytesToStdout(serialized_vk); @@ -182,10 +194,10 @@ void writeVk(const std::string& bytecodePath, const std::string& outputPath) */ void contract(const std::string& output_path, const std::string& vk_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + auto acir_composer = init(); auto vk_data = from_buffer(read_file(vk_path)); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); - auto contract = acir_composer->get_solidity_verifier(); + acir_composer.load_verification_key(std::move(vk_data)); + auto contract = acir_composer.get_solidity_verifier(); if (output_path == "-") { writeStringToStdout(contract); @@ -223,9 +235,9 @@ void contract(const std::string& output_path, const std::string& vk_path) */ void proofAsFields(const std::string& proof_path, std::string const& vk_path, const std::string& output_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + auto acir_composer = init(); auto vk_data = from_buffer(read_file(vk_path)); - auto data = acir_composer->serialize_proof_into_fields(read_file(proof_path), vk_data.num_public_inputs); + auto data = acir_composer.serialize_proof_into_fields(read_file(proof_path), vk_data.num_public_inputs); auto json = format("[", join(map(data, [](auto fr) { return format("\"", fr, "\""); })), "]"); if (output_path == "-") { @@ -252,10 +264,10 @@ void proofAsFields(const std::string& proof_path, std::string const& vk_path, co */ void vkAsFields(const std::string& vk_path, const std::string& output_path) { - auto acir_composer = new acir_proofs::AcirComposer(MAX_CIRCUIT_SIZE, verbose); + auto acir_composer = init(); auto vk_data = from_buffer(read_file(vk_path)); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); - auto data = acir_composer->serialize_verification_key_into_fields(); + acir_composer.load_verification_key(std::move(vk_data)); + auto data = acir_composer.serialize_verification_key_into_fields(); // We need to move vk_hash to the front... std::rotate(data.begin(), data.end() - 1, data.end()); @@ -338,17 +350,17 @@ int main(int argc, char* argv[]) if (command == "--version") { writeStringToStdout(BB_VERSION); return 0; - } else if (command == "info") { + } + if (command == "info") { std::string output_path = getOption(args, "-o", "info.json"); acvmInfo(output_path); return 0; } - init(); - if (command == "prove_and_verify") { return proveAndVerify(bytecode_path, witness_path, recursive) ? 0 : 1; - } else if (command == "prove") { + } + if (command == "prove") { std::string output_path = getOption(args, "-o", "./proofs/proof"); prove(bytecode_path, witness_path, recursive, output_path); } else if (command == "gates") { diff --git a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt index 16f375379bbb..d851d5af2855 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/CMakeLists.txt @@ -2,4 +2,4 @@ add_subdirectory(decrypt_bench) add_subdirectory(pippenger_bench) add_subdirectory(plonk_bench) add_subdirectory(honk_bench) -add_subdirectory(relations_bench) \ No newline at end of file +add_subdirectory(relations_bench) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt index 3234668be52c..38b08abcb342 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/CMakeLists.txt @@ -19,4 +19,17 @@ foreach(BENCHMARK_SOURCE ${BENCHMARK_SOURCES}) add_executable(${BENCHMARK_NAME}_bench main.bench.cpp ${BENCHMARK_SOURCE} benchmark_utilities.hpp) target_link_libraries(${BENCHMARK_NAME}_bench ${LINKED_LIBRARIES}) add_custom_target(run_${BENCHMARK_NAME} COMMAND ${BENCHMARK_NAME} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -endforeach() \ No newline at end of file +endforeach() + +add_executable( + honk_bench_main_simple + main.simple.cpp +) + +target_link_libraries( + honk_bench_main_simple + PRIVATE + stdlib_sha256 + stdlib_keccak + stdlib_merkle_tree +) diff --git a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp index 21e3513fd84c..288f9605c560 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/benchmark_utilities.hpp @@ -1,3 +1,4 @@ +#pragma once #include #include "barretenberg/honk/composer/ultra_composer.hpp" @@ -62,9 +63,6 @@ template void generate_sha256_test_circuit(Builder& builder, { std::string in; in.resize(32); - for (size_t i = 0; i < 32; ++i) { - in[i] = 0; - } proof_system::plonk::stdlib::packed_byte_array input(&builder, in); for (size_t i = 0; i < num_iterations; i++) { input = proof_system::plonk::stdlib::sha256(input); @@ -244,4 +242,4 @@ void construct_proof_with_specified_num_iterations(State& state, } } -} // namespace bench_utils \ No newline at end of file +} // namespace bench_utils diff --git a/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/main.simple.cpp b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/main.simple.cpp new file mode 100644 index 000000000000..f33faf554d15 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/benchmark/honk_bench/main.simple.cpp @@ -0,0 +1,61 @@ +/* Entry point for profiling with e.g. LLVM xray. + * This provides a simple entrypoint to bypass artifacts with + * TODO(AD): Consider if we can directly profile the bench executables. + */ +#include +#include +#include + +#include "barretenberg/honk/composer/ultra_composer.hpp" +#include "barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp" +#include "barretenberg/proof_system/types/circuit_type.hpp" +#include "barretenberg/stdlib/encryption/ecdsa/ecdsa.hpp" +#include "barretenberg/stdlib/hash/keccak/keccak.hpp" +#include "barretenberg/stdlib/hash/sha256/sha256.hpp" +#include "barretenberg/stdlib/merkle_tree/membership.hpp" +#include "barretenberg/stdlib/merkle_tree/memory_store.hpp" +#include "barretenberg/stdlib/merkle_tree/memory_tree.hpp" +#include "barretenberg/stdlib/merkle_tree/merkle_tree.hpp" +#include "barretenberg/stdlib/primitives/bool/bool.hpp" +#include "barretenberg/stdlib/primitives/curves/secp256k1.hpp" +#include "barretenberg/stdlib/primitives/field/field.hpp" +#include "barretenberg/stdlib/primitives/packed_byte_array/packed_byte_array.hpp" +#include "barretenberg/stdlib/primitives/witness/witness.hpp" + +using namespace proof_system::plonk; + +using UltraBuilder = proof_system::UltraCircuitBuilder; +using UltraHonk = proof_system::honk::UltraComposer; + +template void generate_sha256_test_circuit(Builder& builder, size_t num_iterations) +{ + std::string in; + in.resize(32); + proof_system::plonk::stdlib::packed_byte_array input(&builder, in); + for (size_t i = 0; i < num_iterations; i++) { + input = proof_system::plonk::stdlib::sha256(input); + } +} + +/** + * @brief Benchmark: Construction of a Ultra Honk proof for a circuit determined by the provided circuit function + */ +void construct_proof_ultra() noexcept +{ + barretenberg::srs::init_crs_factory("../srs_db/ignition"); + // Constuct circuit and prover; don't include this part in measurement + auto builder = typename UltraHonk::CircuitBuilder(); + generate_sha256_test_circuit(builder, 1); + + auto composer = UltraHonk(); + auto instance = composer.create_instance(builder); + auto ext_prover = composer.create_prover(instance); + for (size_t i = 0; i < 10; i++) { + auto proof = ext_prover.construct_proof(); + } +} + +int main() +{ + construct_proof_ultra(); +} diff --git a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp index d2f9d6110406..5b41002d2fb5 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp @@ -30,18 +30,17 @@ template void execute_relation(::benchmark: .public_input_delta = public_input_delta, }; - using ClaimedEvaluations = typename Flavor::ProverPolynomialsEvaluations; - using RelationValues = typename Relation::RelationValues; + using AllValues = typename Flavor::AllValues; + using ArrayOfValuesOverSubrelations = typename Relation::ArrayOfValuesOverSubrelations; // Extract an array containing all the polynomial evaluations at a given row i - ClaimedEvaluations new_value; - // Define the appropriate RelationValues type for this relation and initialize to zero - RelationValues accumulator; + AllValues new_value; + // Define the appropriate ArrayOfValuesOverSubrelations type for this relation and initialize to zero + ArrayOfValuesOverSubrelations accumulator; // Evaluate each constraint in the relation and check that each is satisfied - Relation relation; for (auto _ : state) { - relation.add_full_relation_value_contribution(accumulator, new_value, params); + Relation::accumulate(accumulator, new_value, params, 1); } } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp index 250efe82ecd0..59b5192a6452 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp @@ -11,29 +11,30 @@ void read_witness(Builder& builder, WitnessVector const& witness) } } -void create_circuit(Builder& builder, acir_format const& constraint_system) +void add_public_vars(Builder& builder, acir_format const& constraint_system) { - if (constraint_system.public_inputs.size() > constraint_system.varnum) { - info("create_circuit: too many public inputs!"); - } - for (size_t i = 1; i < constraint_system.varnum; ++i) { // If the index is in the public inputs vector, then we add it as a public input if (std::find(constraint_system.public_inputs.begin(), constraint_system.public_inputs.end(), i) != constraint_system.public_inputs.end()) { + builder.add_public_variable(0); + } else { builder.add_variable(0); } } +} +void build_constraints(Builder& builder, acir_format const& constraint_system, bool has_valid_witness_assignments) +{ // Add arithmetic gates for (const auto& constraint : constraint_system.constraints) { builder.create_poly_gate(constraint); } - // Add and constraint + // Add logic constraint for (const auto& constraint : constraint_system.logic_constraints) { create_logic_gate( builder, constraint.a, constraint.b, constraint.result, constraint.num_bits, constraint.is_xor_gate); @@ -54,14 +55,14 @@ void create_circuit(Builder& builder, acir_format const& constraint_system) create_schnorr_verify_constraints(builder, constraint); } - // Add ECDSA K1 constraints + // Add ECDSA k1 constraints for (const auto& constraint : constraint_system.ecdsa_k1_constraints) { - create_ecdsa_k1_verify_constraints(builder, constraint, false); + create_ecdsa_k1_verify_constraints(builder, constraint, has_valid_witness_assignments); } - // Add ECDSA R1 constraints + // Add ECDSA r1 constraints for (const auto& constraint : constraint_system.ecdsa_r1_constraints) { - create_ecdsa_r1_verify_constraints(builder, constraint, false); + create_ecdsa_r1_verify_constraints(builder, constraint, has_valid_witness_assignments); } // Add blake2s constraints @@ -94,13 +95,13 @@ void create_circuit(Builder& builder, acir_format const& constraint_system) // Add block constraints for (const auto& constraint : constraint_system.block_constraints) { - create_block_constraints(builder, constraint, false); + create_block_constraints(builder, constraint, has_valid_witness_assignments); } // Add recursion constraints for (size_t i = 0; i < constraint_system.recursion_constraints.size(); ++i) { auto& constraint = constraint_system.recursion_constraints[i]; - create_recursion_constraints(builder, constraint); + create_recursion_constraints(builder, constraint, has_valid_witness_assignments); // make sure the verification key records the public input indices of the final recursion output // (N.B. up to the ACIR description to make sure that the final output aggregation object wires are public @@ -113,6 +114,16 @@ void create_circuit(Builder& builder, acir_format const& constraint_system) } } +void create_circuit(Builder& builder, acir_format const& constraint_system) +{ + if (constraint_system.public_inputs.size() > constraint_system.varnum) { + info("create_circuit: too many public inputs!"); + } + + add_public_vars(builder, constraint_system); + build_constraints(builder, constraint_system, false); +} + Builder create_circuit(const acir_format& constraint_system, size_t size_hint) { Builder builder(size_hint); @@ -135,104 +146,9 @@ void create_circuit_with_witness(Builder& builder, acir_format const& constraint info("create_circuit_with_witness: too many public inputs!"); } - for (size_t i = 1; i < constraint_system.varnum; ++i) { - // If the index is in the public inputs vector, then we add it as a public input - - if (std::find(constraint_system.public_inputs.begin(), constraint_system.public_inputs.end(), i) != - constraint_system.public_inputs.end()) { - - builder.add_public_variable(0); - - } else { - builder.add_variable(0); - } - } - + add_public_vars(builder, constraint_system); read_witness(builder, witness); - - // Add arithmetic gates - for (const auto& constraint : constraint_system.constraints) { - builder.create_poly_gate(constraint); - } - - // Add logic constraint - for (const auto& constraint : constraint_system.logic_constraints) { - create_logic_gate( - builder, constraint.a, constraint.b, constraint.result, constraint.num_bits, constraint.is_xor_gate); - } - - // Add range constraint - for (const auto& constraint : constraint_system.range_constraints) { - builder.create_range_constraint(constraint.witness, constraint.num_bits, ""); - } - - // Add sha256 constraints - for (const auto& constraint : constraint_system.sha256_constraints) { - create_sha256_constraints(builder, constraint); - } - - // Add schnorr constraints - for (const auto& constraint : constraint_system.schnorr_constraints) { - create_schnorr_verify_constraints(builder, constraint); - } - - // Add ECDSA k1 constraints - for (const auto& constraint : constraint_system.ecdsa_k1_constraints) { - create_ecdsa_k1_verify_constraints(builder, constraint); - } - - // Add ECDSA r1 constraints - for (const auto& constraint : constraint_system.ecdsa_r1_constraints) { - create_ecdsa_r1_verify_constraints(builder, constraint); - } - - // Add blake2s constraints - for (const auto& constraint : constraint_system.blake2s_constraints) { - create_blake2s_constraints(builder, constraint); - } - - // Add keccak constraints - for (const auto& constraint : constraint_system.keccak_constraints) { - create_keccak_constraints(builder, constraint); - } - for (const auto& constraint : constraint_system.keccak_var_constraints) { - create_keccak_var_constraints(builder, constraint); - } - - // Add pedersen constraints - for (const auto& constraint : constraint_system.pedersen_constraints) { - create_pedersen_constraint(builder, constraint); - } - - // Add fixed base scalar mul constraints - for (const auto& constraint : constraint_system.fixed_base_scalar_mul_constraints) { - create_fixed_base_constraint(builder, constraint); - } - - // Add hash to field constraints - for (const auto& constraint : constraint_system.hash_to_field_constraints) { - create_hash_to_field_constraints(builder, constraint); - } - - // Add block constraints - for (const auto& constraint : constraint_system.block_constraints) { - create_block_constraints(builder, constraint); - } - - // Add recursion constraints - for (size_t i = 0; i < constraint_system.recursion_constraints.size(); ++i) { - auto& constraint = constraint_system.recursion_constraints[i]; - create_recursion_constraints(builder, constraint, true); - - // make sure the verification key records the public input indices of the final recursion output - // (N.B. up to the ACIR description to make sure that the final output aggregation object wires are public - // inputs!) - if (i == constraint_system.recursion_constraints.size() - 1) { - std::vector proof_output_witness_indices(constraint.output_aggregation_object.begin(), - constraint.output_aggregation_object.end()); - builder.set_recursive_proof(proof_output_witness_indices); - } - } + build_constraints(builder, constraint_system, true); } } // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp index 9eb908f0b00c..ff10f339f4b4 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/block_constraint.cpp @@ -35,7 +35,7 @@ void create_block_constraints(Builder& builder, const BlockConstraint constraint field_ct value = poly_to_field_ct(op.value, builder); field_ct index = poly_to_field_ct(op.index, builder); // For a ROM table, constant read should be optimised out: - // The rom_table won't work with a constant read because the table may not be initialised + // The rom_table won't work with a constant read because the table may not be initialized ASSERT(op.index.q_l != 0); // We create a new witness w to avoid issues with non-valid witness assignements: // if witness are not assigned, then w will be zero and table[w] will work @@ -55,7 +55,7 @@ void create_block_constraints(Builder& builder, const BlockConstraint constraint field_ct value = poly_to_field_ct(op.value, builder); field_ct index = poly_to_field_ct(op.index, builder); if (has_valid_witness_assignments == false) { - index = field_ct(0); + index = field_ct::from_witness(&builder, 0); } if (op.access_type == 0) { value.assert_equal(table.read(index)); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/schnorr_verify.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/schnorr_verify.cpp index 5c05924b88a4..8b7048b52d71 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/schnorr_verify.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/schnorr_verify.cpp @@ -71,7 +71,7 @@ void create_schnorr_verify_constraints(Builder& builder, const SchnorrConstraint auto new_sig = convert_signature(builder, input.signature); // From ignorance, you will see me convert a bunch of witnesses from ByteArray -> BitArray // This may not be the most efficient way to do it. It is being used as it is known to work, - // optimisations are welcome! + // optimizations are welcome! // First convert the message of u8 witnesses into a byte_array // Do this by taking each element as a u8 and writing it to the byte array diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp index 92839b90ec26..e59f786ce660 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/serde/acir.hpp @@ -712,43 +712,9 @@ struct BrilligOutputs { static BrilligOutputs bincodeDeserialize(std::vector); }; -struct ForeignCallOutput { - - struct Single { - Circuit::Value value; - - friend bool operator==(const Single&, const Single&); - std::vector bincodeSerialize() const; - static Single bincodeDeserialize(std::vector); - }; - - struct Array { - std::vector value; - - friend bool operator==(const Array&, const Array&); - std::vector bincodeSerialize() const; - static Array bincodeDeserialize(std::vector); - }; - - std::variant value; - - friend bool operator==(const ForeignCallOutput&, const ForeignCallOutput&); - std::vector bincodeSerialize() const; - static ForeignCallOutput bincodeDeserialize(std::vector); -}; - -struct ForeignCallResult { - std::vector values; - - friend bool operator==(const ForeignCallResult&, const ForeignCallResult&); - std::vector bincodeSerialize() const; - static ForeignCallResult bincodeDeserialize(std::vector); -}; - struct Brillig { std::vector inputs; std::vector outputs; - std::vector foreign_call_results; std::vector bytecode; std::optional predicate; @@ -3253,9 +3219,6 @@ inline bool operator==(const Brillig& lhs, const Brillig& rhs) if (!(lhs.outputs == rhs.outputs)) { return false; } - if (!(lhs.foreign_call_results == rhs.foreign_call_results)) { - return false; - } if (!(lhs.bytecode == rhs.bytecode)) { return false; } @@ -3291,7 +3254,6 @@ void serde::Serializable::serialize(const Circuit::Brillig& ob serializer.increase_container_depth(); serde::Serializable::serialize(obj.inputs, serializer); serde::Serializable::serialize(obj.outputs, serializer); - serde::Serializable::serialize(obj.foreign_call_results, serializer); serde::Serializable::serialize(obj.bytecode, serializer); serde::Serializable::serialize(obj.predicate, serializer); serializer.decrease_container_depth(); @@ -3305,7 +3267,6 @@ Circuit::Brillig serde::Deserializable::deserialize(Deserializ Circuit::Brillig obj; obj.inputs = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); - obj.foreign_call_results = serde::Deserializable::deserialize(deserializer); obj.bytecode = serde::Deserializable::deserialize(deserializer); obj.predicate = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); @@ -4758,200 +4719,6 @@ Circuit::Expression serde::Deserializable::deserialize(Dese namespace Circuit { -inline bool operator==(const ForeignCallOutput& lhs, const ForeignCallOutput& rhs) -{ - if (!(lhs.value == rhs.value)) { - return false; - } - return true; -} - -inline std::vector ForeignCallOutput::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline ForeignCallOutput ForeignCallOutput::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Circuit - -template <> -template -void serde::Serializable::serialize(const Circuit::ForeignCallOutput& obj, - Serializer& serializer) -{ - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); -} - -template <> -template -Circuit::ForeignCallOutput serde::Deserializable::deserialize(Deserializer& deserializer) -{ - deserializer.increase_container_depth(); - Circuit::ForeignCallOutput obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); - return obj; -} - -namespace Circuit { - -inline bool operator==(const ForeignCallOutput::Single& lhs, const ForeignCallOutput::Single& rhs) -{ - if (!(lhs.value == rhs.value)) { - return false; - } - return true; -} - -inline std::vector ForeignCallOutput::Single::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline ForeignCallOutput::Single ForeignCallOutput::Single::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Circuit - -template <> -template -void serde::Serializable::serialize(const Circuit::ForeignCallOutput::Single& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.value, serializer); -} - -template <> -template -Circuit::ForeignCallOutput::Single serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Circuit::ForeignCallOutput::Single obj; - obj.value = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Circuit { - -inline bool operator==(const ForeignCallOutput::Array& lhs, const ForeignCallOutput::Array& rhs) -{ - if (!(lhs.value == rhs.value)) { - return false; - } - return true; -} - -inline std::vector ForeignCallOutput::Array::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline ForeignCallOutput::Array ForeignCallOutput::Array::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Circuit - -template <> -template -void serde::Serializable::serialize(const Circuit::ForeignCallOutput::Array& obj, - Serializer& serializer) -{ - serde::Serializable::serialize(obj.value, serializer); -} - -template <> -template -Circuit::ForeignCallOutput::Array serde::Deserializable::deserialize( - Deserializer& deserializer) -{ - Circuit::ForeignCallOutput::Array obj; - obj.value = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Circuit { - -inline bool operator==(const ForeignCallResult& lhs, const ForeignCallResult& rhs) -{ - if (!(lhs.values == rhs.values)) { - return false; - } - return true; -} - -inline std::vector ForeignCallResult::bincodeSerialize() const -{ - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); -} - -inline ForeignCallResult ForeignCallResult::bincodeDeserialize(std::vector input) -{ - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw_or_abort("Some input bytes were not read"); - } - return value; -} - -} // end of namespace Circuit - -template <> -template -void serde::Serializable::serialize(const Circuit::ForeignCallResult& obj, - Serializer& serializer) -{ - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.values, serializer); - serializer.decrease_container_depth(); -} - -template <> -template -Circuit::ForeignCallResult serde::Deserializable::deserialize(Deserializer& deserializer) -{ - deserializer.increase_container_depth(); - Circuit::ForeignCallResult obj; - obj.values = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); - return obj; -} - -namespace Circuit { - inline bool operator==(const FunctionInput& lhs, const FunctionInput& rhs) { if (!(lhs.witness == rhs.witness)) { diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp index 29463a06db7f..c5afdc100462 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.cpp @@ -12,88 +12,60 @@ namespace acir_proofs { AcirComposer::AcirComposer(size_t size_hint, bool verbose) - : composer_(/*p_key=*/0, /*v_key=*/0) - , size_hint_(size_hint) + : size_hint_(size_hint) , verbose_(verbose) {} void AcirComposer::create_circuit(acir_format::acir_format& constraint_system) { + if (builder_.get_num_gates() > 1) { + return; + } + vinfo("building circuit..."); builder_ = acir_format::create_circuit(constraint_system, size_hint_); - - // We are done with the constraint system at this point, and we need the memory slab back. - constraint_system.constraints.clear(); - constraint_system.constraints.shrink_to_fit(); - exact_circuit_size_ = builder_.get_num_gates(); total_circuit_size_ = builder_.get_total_circuit_size(); circuit_subgroup_size_ = builder_.get_circuit_subgroup_size(total_circuit_size_); size_hint_ = circuit_subgroup_size_; + vinfo("gates: ", builder_.get_total_circuit_size()); } -void AcirComposer::init_proving_key( - std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system) +void AcirComposer::init_proving_key(acir_format::acir_format& constraint_system) { - vinfo("building circuit... ", size_hint_); - builder_ = acir_format::Builder(size_hint_); - acir_format::create_circuit(builder_, constraint_system); - - // We are done with the constraint system at this point, and we need the memory slab back. - constraint_system.constraints.clear(); - constraint_system.constraints.shrink_to_fit(); - - exact_circuit_size_ = builder_.get_num_gates(); - total_circuit_size_ = builder_.get_total_circuit_size(); - circuit_subgroup_size_ = builder_.get_circuit_subgroup_size(total_circuit_size_); - - composer_ = acir_format::Composer(crs_factory); + create_circuit(constraint_system); + acir_format::Composer composer; vinfo("computing proving key..."); - proving_key_ = composer_.compute_proving_key(builder_); + proving_key_ = composer.compute_proving_key(builder_); } -std::vector AcirComposer::create_proof( - std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system, - acir_format::WitnessVector& witness, - bool is_recursive) +std::vector AcirComposer::create_proof(acir_format::acir_format& constraint_system, + acir_format::WitnessVector& witness, + bool is_recursive) { - // Release prior memory first. - composer_ = acir_format::Composer(/*p_key=*/0, /*v_key=*/0); - - vinfo("building circuit..."); + vinfo("building circuit with witness..."); + builder_ = acir_format::Builder(size_hint_); create_circuit_with_witness(builder_, constraint_system, witness); vinfo("gates: ", builder_.get_total_circuit_size()); - composer_ = [&]() { + auto composer = [&]() { if (proving_key_) { - auto composer = acir_format::Composer(proving_key_, verification_key_); - // You can't produce the verification key unless you manually set the crs. Which seems like a bug. - composer_.crs_factory_ = crs_factory; - return composer; - } else { - return acir_format::Composer(crs_factory); + return acir_format::Composer(proving_key_, nullptr); } - }(); - if (!proving_key_) { + + acir_format::Composer composer; vinfo("computing proving key..."); - proving_key_ = composer_.compute_proving_key(builder_); + proving_key_ = composer.compute_proving_key(builder_); vinfo("done."); - } - - // We are done with the constraint system at this point, and we need the memory slab back. - constraint_system.constraints.clear(); - constraint_system.constraints.shrink_to_fit(); - witness.clear(); - witness.shrink_to_fit(); + return composer; + }(); vinfo("creating proof..."); std::vector proof; if (is_recursive) { - auto prover = composer_.create_prover(builder_); + auto prover = composer.create_prover(builder_); proof = prover.construct_proof().proof_data; } else { - auto prover = composer_.create_ultra_with_keccak_prover(builder_); + auto prover = composer.create_ultra_with_keccak_prover(builder_); proof = prover.construct_proof().proof_data; } vinfo("done."); @@ -102,26 +74,29 @@ std::vector AcirComposer::create_proof( std::shared_ptr AcirComposer::init_verification_key() { + if (!proving_key_) { + throw_or_abort("Compute proving key first."); + } vinfo("computing verification key..."); - verification_key_ = composer_.compute_verification_key(builder_); + acir_format::Composer composer(proving_key_, nullptr); + verification_key_ = composer.compute_verification_key(builder_); vinfo("done."); return verification_key_; } -void AcirComposer::load_verification_key( - std::shared_ptr> const& crs_factory, - proof_system::plonk::verification_key_data&& data) +void AcirComposer::load_verification_key(proof_system::plonk::verification_key_data&& data) { - verification_key_ = - std::make_shared(std::move(data), crs_factory->get_verifier_crs()); - composer_ = acir_format::Composer(proving_key_, verification_key_); + verification_key_ = std::make_shared( + std::move(data), srs::get_crs_factory()->get_verifier_crs()); } bool AcirComposer::verify_proof(std::vector const& proof, bool is_recursive) { + acir_format::Composer composer(proving_key_, verification_key_); + if (!verification_key_) { vinfo("computing verification key..."); - verification_key_ = composer_.compute_verification_key(builder_); + verification_key_ = composer.compute_verification_key(builder_); vinfo("done."); } @@ -129,10 +104,10 @@ bool AcirComposer::verify_proof(std::vector const& proof, bool is_recur builder_.public_inputs.resize((proof.size() - 2144) / 32); if (is_recursive) { - auto verifier = composer_.create_verifier(builder_); + auto verifier = composer.create_verifier(builder_); return verifier.verify_proof({ proof }); } else { - auto verifier = composer_.create_ultra_with_keccak_verifier(builder_); + auto verifier = composer.create_ultra_with_keccak_verifier(builder_); return verifier.verify_proof({ proof }); } } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp index 25814e78d918..32b678268e38 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/acir_composer.hpp @@ -14,18 +14,13 @@ class AcirComposer { void create_circuit(acir_format::acir_format& constraint_system); - void init_proving_key(std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system); + void init_proving_key(acir_format::acir_format& constraint_system); - std::vector create_proof( - std::shared_ptr> const& crs_factory, - acir_format::acir_format& constraint_system, - acir_format::WitnessVector& witness, - bool is_recursive); + std::vector create_proof(acir_format::acir_format& constraint_system, + acir_format::WitnessVector& witness, + bool is_recursive); - void load_verification_key( - std::shared_ptr> const& crs_factory, - proof_system::plonk::verification_key_data&& data); + void load_verification_key(proof_system::plonk::verification_key_data&& data); std::shared_ptr init_verification_key(); @@ -34,6 +29,7 @@ class AcirComposer { std::string get_solidity_verifier(); size_t get_exact_circuit_size() { return exact_circuit_size_; }; size_t get_total_circuit_size() { return total_circuit_size_; }; + size_t get_circuit_subgroup_size() { return circuit_subgroup_size_; }; std::vector serialize_proof_into_fields(std::vector const& proof, size_t num_inner_public_inputs); @@ -42,7 +38,6 @@ class AcirComposer { private: acir_format::Builder builder_; - acir_format::Composer composer_; size_t size_hint_; size_t exact_circuit_size_; size_t total_circuit_size_; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp index 1af145e2978c..0bdfbb519d2f 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_proofs/c_bind.cpp @@ -35,7 +35,7 @@ WASM_EXPORT void acir_init_proving_key(in_ptr acir_composer_ptr, uint8_t const* auto acir_composer = reinterpret_cast(*acir_composer_ptr); auto constraint_system = acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)); - acir_composer->init_proving_key(barretenberg::srs::get_crs_factory(), constraint_system); + acir_composer->init_proving_key(constraint_system); } WASM_EXPORT void acir_create_proof(in_ptr acir_composer_ptr, @@ -48,8 +48,7 @@ WASM_EXPORT void acir_create_proof(in_ptr acir_composer_ptr, auto constraint_system = acir_format::circuit_buf_to_acir_format(from_buffer>(acir_vec)); auto witness = acir_format::witness_buf_to_witness_data(from_buffer>(witness_vec)); - auto proof_data = - acir_composer->create_proof(barretenberg::srs::get_crs_factory(), constraint_system, witness, *is_recursive); + auto proof_data = acir_composer->create_proof(constraint_system, witness, *is_recursive); *out = to_heap_buffer(proof_data); } @@ -57,7 +56,7 @@ WASM_EXPORT void acir_load_verification_key(in_ptr acir_composer_ptr, uint8_t co { auto acir_composer = reinterpret_cast(*acir_composer_ptr); auto vk_data = from_buffer(vk_buf); - acir_composer->load_verification_key(barretenberg::srs::get_crs_factory(), std::move(vk_data)); + acir_composer->load_verification_key(std::move(vk_data)); } WASM_EXPORT void acir_init_verification_key(in_ptr acir_composer_ptr) diff --git a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp index 4e2b292fa8c3..799c202f7098 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/fields/field_declarations.hpp @@ -23,6 +23,7 @@ namespace barretenberg { template struct alignas(32) field { public: + using View = field; using Params = Params_; using in_buf = const uint8_t*; using vec_in_buf = const uint8_t*; diff --git a/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp b/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp index 23129857262c..53f373658feb 100644 --- a/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp +++ b/barretenberg/cpp/src/barretenberg/examples/c_bind.cpp @@ -6,7 +6,7 @@ using namespace proof_system::plonk::stdlib::types; WASM_EXPORT void examples_simple_create_and_verify_proof(bool* valid) { - auto ptrs = examples::simple::create_builder_and_composer(barretenberg::srs::get_crs_factory()); + auto ptrs = examples::simple::create_builder_and_composer(); auto proof = examples::simple::create_proof(ptrs); *valid = examples::simple::verify_proof(ptrs, proof); examples::simple::delete_builder_and_composer(ptrs); diff --git a/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp b/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp index 19af6aa0d691..ad35fa0ee950 100644 --- a/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp +++ b/barretenberg/cpp/src/barretenberg/examples/simple/simple.cpp @@ -19,8 +19,7 @@ void build_circuit(Builder& builder) } } -BuilderComposerPtrs create_builder_and_composer( - std::shared_ptr> const& crs_factory) +BuilderComposerPtrs create_builder_and_composer() { // WARNING: Size hint is essential to perform 512k circuits! auto builder = std::make_unique(CIRCUIT_SIZE); @@ -36,7 +35,7 @@ BuilderComposerPtrs create_builder_and_composer( info("composer gates: ", builder->get_num_gates()); info("computing proving key..."); - auto composer = std::make_unique(crs_factory); + auto composer = std::make_unique(); auto pk = composer->compute_proving_key(*builder); return { builder.release(), composer.release() }; diff --git a/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp b/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp index 0932708bf216..264d328d2d20 100644 --- a/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp +++ b/barretenberg/cpp/src/barretenberg/examples/simple/simple.hpp @@ -12,8 +12,7 @@ struct BuilderComposerPtrs { Composer* composer; }; -BuilderComposerPtrs create_builder_and_composer( - std::shared_ptr> const& crs_factory); +BuilderComposerPtrs create_builder_and_composer(); proof create_proof(BuilderComposerPtrs pair); diff --git a/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp b/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp index a43c3de45033..17689497a750 100644 --- a/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp +++ b/barretenberg/cpp/src/barretenberg/examples/simple/simple.test.cpp @@ -8,8 +8,8 @@ namespace examples::simple { TEST(examples_simple, create_proof) { auto srs_path = std::filesystem::absolute("../srs_db/ignition"); - auto crs_factory = std::make_shared>(srs_path); - auto ptrs = create_builder_and_composer(crs_factory); + srs::init_crs_factory(srs_path); + auto ptrs = create_builder_and_composer(); auto proof = create_proof(ptrs); bool valid = verify_proof(ptrs, proof); delete_builder_and_composer(ptrs); diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.cpp index 0e55849f1f21..6b388cd7183c 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.cpp @@ -15,7 +15,7 @@ template void ECCVMComposer_::compute_witness(Circu return; } - auto polynomials = circuit_constructor.compute_full_polynomials(); + auto polynomials = circuit_constructor.compute_polynomials(); auto key_wires = proving_key->get_wires(); auto poly_wires = polynomials.get_wires(); diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.hpp b/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.hpp index 93b3b9914618..7d8dd2c6ebba 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.hpp @@ -5,6 +5,7 @@ #include "barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp" #include "barretenberg/proof_system/composer/composer_lib.hpp" #include "barretenberg/srs/factories/file_crs_factory.hpp" +#include "barretenberg/srs/global_crs.hpp" namespace proof_system::honk { template class ECCVMComposer_ { diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.test.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.test.cpp index 5fb5daafb66a..70af8587b2e1 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/eccvm_composer.test.cpp @@ -18,7 +18,7 @@ namespace test_eccvm_composer { template class ECCVMComposerTests : public ::testing::Test { protected: - // TODO(640): The Standard Honk on Grumpkin test suite fails unless the SRS is initialised for every test. + // TODO(640): The Standard Honk on Grumpkin test suite fails unless the SRS is initialized for every test. void SetUp() override { if constexpr (std::is_same::value) { diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp index e5d1995fb892..2012cb1547a6 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/goblin/full_goblin_composer.test.cpp @@ -101,13 +101,56 @@ class FullGoblinComposerTests : public ::testing::Test { // Store the commitment data for use by the prover of the next circuit op_queue->set_commitment_data(op_queue_commitments); } + + /** + * @brief Construct and a verify a Honk proof + * + */ + bool construct_and_verify_honk_proof(auto& composer, auto& builder) + { + auto instance = composer.create_instance(builder); + auto prover = composer.create_prover(instance); + auto verifier = composer.create_verifier(instance); + auto proof = prover.construct_proof(); + bool verified = verifier.verify_proof(proof); + + return verified; + } + + /** + * @brief Construct and verify a Goblin ECC op queue merge proof + * + */ + bool construct_and_verify_merge_proof(auto& composer, auto& op_queue) + { + auto merge_prover = composer.create_merge_prover(op_queue); + auto merge_verifier = composer.create_merge_verifier(10); + auto merge_proof = merge_prover.construct_proof(); + bool verified = merge_verifier.verify_proof(merge_proof); + + return verified; + } + + /** + * @brief Construct and verify a Goblin ECC op queue merge proof + * + */ + bool construct_and_verify_eccvm_proof(auto& composer, auto& builder) + { + auto prover = composer.create_prover(builder); + auto proof = prover.construct_proof(); + auto verifier = composer.create_verifier(builder); + bool verified = verifier.verify_proof(proof); + + return verified; + } }; /** * @brief Test proof construction/verification for a circuit with ECC op gates, public inputs, and basic arithmetic * gates * @note We simulate op queue interactions with a previous circuit so the actual circuit under test utilizes an op queue - * with non-empty 'previous' data. This avoid complications with zero-commitments etc. + * with non-empty 'previous' data. This avoids complications with zero-commitments etc. * */ TEST_F(FullGoblinComposerTests, SimpleCircuit) @@ -124,13 +167,16 @@ TEST_F(FullGoblinComposerTests, SimpleCircuit) generate_test_circuit(builder); + // The same composer is used to manage Honk and Merge prover/verifier auto composer = GoblinUltraComposer(); - auto instance = composer.create_instance(builder); - auto prover = composer.create_prover(instance); - auto verifier = composer.create_verifier(instance); - auto proof = prover.construct_proof(); - bool verified = verifier.verify_proof(proof); - EXPECT_EQ(verified, true); + + // Construct and verify Ultra Goblin Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify op queue merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } // Construct an ECCVM circuit then generate and verify its proof @@ -138,15 +184,10 @@ TEST_F(FullGoblinComposerTests, SimpleCircuit) // Instantiate an ECCVM builder with the vm ops stored in the op queue auto builder = ECCVMBuilder(op_queue->raw_ops); - // // Can fiddle with one of the operands to trigger a failure - // builder.vm_operations[0].z1 *= 2; - + // Construct and verify ECCVM proof auto composer = ECCVMComposer(); - auto prover = composer.create_prover(builder); - auto proof = prover.construct_proof(); - auto verifier = composer.create_verifier(builder); - bool verified = verifier.verify_proof(proof); - ASSERT_TRUE(verified); + auto eccvm_verified = construct_and_verify_eccvm_proof(composer, builder); + EXPECT_TRUE(eccvm_verified); } } @@ -168,13 +209,16 @@ TEST_F(FullGoblinComposerTests, SimpleCircuitFailureCase) generate_test_circuit(builder); + // The same composer is used to manage Honk and Merge prover/verifier auto composer = GoblinUltraComposer(); - auto instance = composer.create_instance(builder); - auto prover = composer.create_prover(instance); - auto verifier = composer.create_verifier(instance); - auto proof = prover.construct_proof(); - bool verified = verifier.verify_proof(proof); - EXPECT_EQ(verified, true); + + // Construct and verify Ultra Goblin Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify op queue merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } // Construct an ECCVM circuit then generate and verify its proof @@ -185,12 +229,10 @@ TEST_F(FullGoblinComposerTests, SimpleCircuitFailureCase) // Fiddle with one of the operands to trigger a failure builder.vm_operations[0].z1 += 1; + // Construct and verify ECCVM proof auto composer = ECCVMComposer(); - auto prover = composer.create_prover(builder); - auto proof = prover.construct_proof(); - auto verifier = composer.create_verifier(builder); - bool verified = verifier.verify_proof(proof); - EXPECT_EQ(verified, false); + auto eccvm_verified = construct_and_verify_eccvm_proof(composer, builder); + EXPECT_FALSE(eccvm_verified); } } diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp index 5d33532eb1bd..584fb41d7a2e 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/goblin_ultra_composer.test.cpp @@ -56,18 +56,11 @@ class GoblinUltraHonkComposerTests : public ::testing::Test { } /** - * @brief Construct a goblin ultra circuit then generate a verify its proof + * @brief Construct and a verify a Honk proof * - * @param op_queue - * @return auto */ - bool construct_test_circuit_then_generate_and_verify_proof(auto& op_queue) + bool construct_and_verify_honk_proof(auto& composer, auto& builder) { - auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); - - generate_test_circuit(builder); - - auto composer = GoblinUltraComposer(); auto instance = composer.create_instance(builder); auto prover = composer.create_prover(instance); auto verifier = composer.create_verifier(instance); @@ -76,6 +69,20 @@ class GoblinUltraHonkComposerTests : public ::testing::Test { return verified; } + + /** + * @brief Construct and verify a Goblin ECC op queue merge proof + * + */ + bool construct_and_verify_merge_proof(auto& composer, auto& op_queue) + { + auto merge_prover = composer.create_merge_prover(op_queue); + auto merge_verifier = composer.create_merge_verifier(10); + auto merge_proof = merge_prover.construct_proof(); + bool verified = merge_verifier.verify_proof(merge_proof); + + return verified; + } }; /** @@ -92,18 +99,27 @@ TEST_F(GoblinUltraHonkComposerTests, SingleCircuit) // Add mock data to op queue to simulate interaction with a previous circuit op_queue->populate_with_mock_initital_data(); - // Construct a test circuit then generate and verify its proof - auto verified = construct_test_circuit_then_generate_and_verify_proof(op_queue); + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); - EXPECT_EQ(verified, true); + auto composer = GoblinUltraComposer(); + + // Construct and verify Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify Goblin ECC op queue Merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } /** - * @brief Test proof construction/verification for a circuit with ECC op gates, public inputs, and basic arithmetic - * gates + * @brief Test Merge proof construction/verification for multiple circuits with ECC op gates, public inputs, and + * basic arithmetic gates * */ -TEST_F(GoblinUltraHonkComposerTests, MultipleCircuits) +TEST_F(GoblinUltraHonkComposerTests, MultipleCircuitsMergeOnly) { // Instantiate EccOpQueue. This will be shared across all circuits in the series auto op_queue = std::make_shared(); @@ -114,7 +130,75 @@ TEST_F(GoblinUltraHonkComposerTests, MultipleCircuits) // Construct multiple test circuits that share an ECC op queue. Generate and verify a proof for each. size_t NUM_CIRCUITS = 3; for (size_t i = 0; i < NUM_CIRCUITS; ++i) { - construct_test_circuit_then_generate_and_verify_proof(op_queue); + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); + + auto composer = GoblinUltraComposer(); + + // Construct and verify Goblin ECC op queue Merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); + } +} + +/** + * @brief Test Honk proof construction/verification for multiple circuits with ECC op gates, public inputs, and + * basic arithmetic gates + * + */ +TEST_F(GoblinUltraHonkComposerTests, MultipleCircuitsHonkOnly) +{ + // Instantiate EccOpQueue. This will be shared across all circuits in the series + auto op_queue = std::make_shared(); + + // Add mock data to op queue to simulate interaction with a previous circuit + op_queue->populate_with_mock_initital_data(); + + // Construct multiple test circuits that share an ECC op queue. Generate and verify a proof for each. + size_t NUM_CIRCUITS = 3; + for (size_t i = 0; i < NUM_CIRCUITS; ++i) { + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); + + auto composer = GoblinUltraComposer(); + + // Construct and verify Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + } +} + +/** + * @brief Test Honk and Merge proof construction/verification for multiple circuits with ECC op gates, public inputs, + * and basic arithmetic gates + * + */ +TEST_F(GoblinUltraHonkComposerTests, MultipleCircuitsHonkAndMerge) +{ + // Instantiate EccOpQueue. This will be shared across all circuits in the series + auto op_queue = std::make_shared(); + + // Add mock data to op queue to simulate interaction with a previous circuit + op_queue->populate_with_mock_initital_data(); + + // Construct multiple test circuits that share an ECC op queue. Generate and verify a proof for each. + size_t NUM_CIRCUITS = 3; + for (size_t i = 0; i < NUM_CIRCUITS; ++i) { + auto builder = proof_system::GoblinUltraCircuitBuilder(op_queue); + + generate_test_circuit(builder); + + auto composer = GoblinUltraComposer(); + + // Construct and verify Honk proof + auto honk_verified = construct_and_verify_honk_proof(composer, builder); + EXPECT_TRUE(honk_verified); + + // Construct and verify Goblin ECC op queue Merge proof + auto merge_verified = construct_and_verify_merge_proof(composer, op_queue); + EXPECT_TRUE(merge_verified); } // Compute the commitments to the aggregate op queue directly and check that they match those that were computed diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.cpp index f01b4d3588d1..017d96b0b229 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.cpp @@ -36,6 +36,5 @@ UltraVerifier_ UltraComposer_::create_verifier(std::shared_ptr; -template class UltraComposer_; template class UltraComposer_; } // namespace proof_system::honk diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp index 8cf4c27c8aa6..24835a00b324 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.hpp @@ -1,5 +1,7 @@ #pragma once #include "barretenberg/honk/instance/prover_instance.hpp" +#include "barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp" +#include "barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp" #include "barretenberg/honk/proof_system/protogalaxy_prover.hpp" #include "barretenberg/honk/proof_system/protogalaxy_verifier.hpp" #include "barretenberg/honk/proof_system/ultra_prover.hpp" @@ -72,6 +74,34 @@ template class UltraComposer_ { UltraProver_ create_prover(std::shared_ptr); UltraVerifier_ create_verifier(std::shared_ptr); + /** + * @brief Create Prover for Goblin ECC op queue merge protocol + * + * @param op_queue + * @return MergeProver_ + */ + MergeProver_ create_merge_prover(std::shared_ptr op_queue) + { + // Store the previous aggregate op queue size and update the current one + op_queue->set_size_data(); + // Merge requires a commitment key with size equal to that of the current op queue transcript T_i since the + // shift of the current contribution t_i will be of degree equal to deg(T_i) + auto commitment_key = compute_commitment_key(op_queue->get_current_size()); + return MergeProver_(commitment_key, op_queue); + } + + /** + * @brief Create Verifier for Goblin ECC op queue merge protocol + * + * @param size Size of commitment key required to commit to shifted op queue contribution t_i + * @return MergeVerifier_ + */ + MergeVerifier_ create_merge_verifier(size_t size) + { + auto pcs_verification_key = std::make_unique(size, crs_factory_); + return MergeVerifier_(std::move(pcs_verification_key)); + } + ProtoGalaxyProver_ create_folding_prover(std::vector> instances) { ProverInstances insts(instances); @@ -92,12 +122,8 @@ template class UltraComposer_ { }; }; extern template class UltraComposer_; -// TODO: the UltraGrumpkin flavor still works on BN254 because plookup needs to be templated to be able to construct -// Grumpkin circuits. -extern template class UltraComposer_; extern template class UltraComposer_; // TODO(#532): this pattern is weird; is this not instantiating the templates? using UltraComposer = UltraComposer_; -using UltraGrumpkinComposer = UltraComposer_; using GoblinUltraComposer = UltraComposer_; } // namespace proof_system::honk diff --git a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.test.cpp b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.test.cpp index 9b1775da7d5e..993bfa6728ef 100644 --- a/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/composer/ultra_composer.test.cpp @@ -922,35 +922,4 @@ TEST_F(UltraHonkComposerTests, range_constraint_small_variable) prove_and_verify(circuit_builder, composer, /*expected_result=*/true); } -TEST(UltraGrumpkinHonkComposer, XorConstraint) -{ - using fr = barretenberg::fr; - // NOTE: as a WIP, this test may not actually use the Grumpkin SRS (just the IPA PCS). - - auto circuit_builder = proof_system::UltraCircuitBuilder(); - - uint32_t left_value = engine.get_random_uint32(); - uint32_t right_value = engine.get_random_uint32(); - - fr left_witness_value = fr{ left_value, 0, 0, 0 }.to_montgomery_form(); - fr right_witness_value = fr{ right_value, 0, 0, 0 }.to_montgomery_form(); - - uint32_t left_witness_index = circuit_builder.add_variable(left_witness_value); - uint32_t right_witness_index = circuit_builder.add_variable(right_witness_value); - - uint32_t xor_result_expected = left_value ^ right_value; - - const auto lookup_accumulators = plookup::get_lookup_accumulators( - plookup::MultiTableId::UINT32_XOR, left_witness_value, right_witness_value, true); - auto xor_result = lookup_accumulators[plookup::ColumnIdx::C3] - [0]; // The zeroth index in the 3rd column is the fully accumulated xor - - EXPECT_EQ(xor_result, xor_result_expected); - circuit_builder.create_gates_from_plookup_accumulators( - plookup::MultiTableId::UINT32_XOR, lookup_accumulators, left_witness_index, right_witness_index); - - barretenberg::srs::init_crs_factory("../srs_db/ignition"); - auto composer = UltraGrumpkinComposer(); - prove_and_verify(circuit_builder, composer, /*expected_result=*/true); -} } // namespace test_ultra_honk_composer \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/flavor/ecc_vm.hpp b/barretenberg/cpp/src/barretenberg/honk/flavor/ecc_vm.hpp index b061663e2705..9db2e22d30f4 100644 --- a/barretenberg/cpp/src/barretenberg/honk/flavor/ecc_vm.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/flavor/ecc_vm.hpp @@ -76,8 +76,8 @@ template class ECCVMBa // static_assert(instantiate_barycentric_utils()); // define the containers for storing the contributions from each relation in Sumcheck - using RelationUnivariates = decltype(create_relation_univariates_container()); - using RelationValues = decltype(create_relation_values_container()); + using TupleOfTuplesOfUnivariates = decltype(create_relation_univariates_container()); + using TupleOfArraysOfValues = decltype(create_relation_values_container()); private: /** @@ -105,82 +105,82 @@ template class ECCVMBa class WitnessEntities : public WitnessEntities_ { public: // clang-format off - DataType& transcript_add = std::get<0>(this->_data); - DataType& transcript_mul = std::get<1>(this->_data); - DataType& transcript_eq = std::get<2>(this->_data); - DataType& transcript_collision_check = std::get<3>(this->_data); - DataType& transcript_msm_transition = std::get<4>(this->_data); - DataType& transcript_pc = std::get<5>(this->_data); - DataType& transcript_msm_count = std::get<6>(this->_data); - DataType& transcript_x = std::get<7>(this->_data); - DataType& transcript_y = std::get<8>(this->_data); - DataType& transcript_z1 = std::get<9>(this->_data); - DataType& transcript_z2 = std::get<10>(this->_data); - DataType& transcript_z1zero = std::get<11>(this->_data); - DataType& transcript_z2zero = std::get<12>(this->_data); - DataType& transcript_op = std::get<13>(this->_data); - DataType& transcript_accumulator_x = std::get<14>(this->_data); - DataType& transcript_accumulator_y = std::get<15>(this->_data); - DataType& transcript_msm_x = std::get<16>(this->_data); - DataType& transcript_msm_y = std::get<17>(this->_data); - DataType& precompute_pc = std::get<18>(this->_data); - DataType& precompute_point_transition = std::get<19>(this->_data); - DataType& precompute_round = std::get<20>(this->_data); - DataType& precompute_scalar_sum = std::get<21>(this->_data); - DataType& precompute_s1hi = std::get<22>(this->_data); - DataType& precompute_s1lo = std::get<23>(this->_data); - DataType& precompute_s2hi = std::get<24>(this->_data); - DataType& precompute_s2lo = std::get<25>(this->_data); - DataType& precompute_s3hi = std::get<26>(this->_data); - DataType& precompute_s3lo = std::get<27>(this->_data); - DataType& precompute_s4hi = std::get<28>(this->_data); - DataType& precompute_s4lo = std::get<29>(this->_data); - DataType& precompute_skew = std::get<30>(this->_data); - DataType& precompute_dx = std::get<31>(this->_data); - DataType& precompute_dy = std::get<32>(this->_data); - DataType& precompute_tx = std::get<33>(this->_data); - DataType& precompute_ty = std::get<34>(this->_data); - DataType& msm_transition = std::get<35>(this->_data); - DataType& msm_add = std::get<36>(this->_data); - DataType& msm_double = std::get<37>(this->_data); - DataType& msm_skew = std::get<38>(this->_data); - DataType& msm_accumulator_x = std::get<39>(this->_data); - DataType& msm_accumulator_y = std::get<40>(this->_data); - DataType& msm_pc = std::get<41>(this->_data); - DataType& msm_size_of_msm = std::get<42>(this->_data); - DataType& msm_count = std::get<43>(this->_data); - DataType& msm_round = std::get<44>(this->_data); - DataType& msm_add1 = std::get<45>(this->_data); - DataType& msm_add2 = std::get<46>(this->_data); - DataType& msm_add3 = std::get<47>(this->_data); - DataType& msm_add4 = std::get<48>(this->_data); - DataType& msm_x1 = std::get<49>(this->_data); - DataType& msm_y1 = std::get<50>(this->_data); - DataType& msm_x2 = std::get<51>(this->_data); - DataType& msm_y2 = std::get<52>(this->_data); - DataType& msm_x3 = std::get<53>(this->_data); - DataType& msm_y3 = std::get<54>(this->_data); - DataType& msm_x4 = std::get<55>(this->_data); - DataType& msm_y4 = std::get<56>(this->_data); - DataType& msm_collision_x1 = std::get<57>(this->_data); - DataType& msm_collision_x2 = std::get<58>(this->_data); - DataType& msm_collision_x3 = std::get<59>(this->_data); - DataType& msm_collision_x4 = std::get<60>(this->_data); - DataType& msm_lambda1 = std::get<61>(this->_data); - DataType& msm_lambda2 = std::get<62>(this->_data); - DataType& msm_lambda3 = std::get<63>(this->_data); - DataType& msm_lambda4 = std::get<64>(this->_data); - DataType& msm_slice1 = std::get<65>(this->_data); - DataType& msm_slice2 = std::get<66>(this->_data); - DataType& msm_slice3 = std::get<67>(this->_data); - DataType& msm_slice4 = std::get<68>(this->_data); - DataType& transcript_accumulator_empty = std::get<69>(this->_data); - DataType& transcript_reset_accumulator = std::get<70>(this->_data); - DataType& precompute_select = std::get<71>(this->_data); - DataType& lookup_read_counts_0 = std::get<72>(this->_data); - DataType& lookup_read_counts_1 = std::get<73>(this->_data); - DataType& z_perm = std::get<74>(this->_data); - DataType& lookup_inverses = std::get<75>(this->_data); + DataType& transcript_add = std::get<0>(this->_data); + DataType& transcript_mul = std::get<1>(this->_data); + DataType& transcript_eq = std::get<2>(this->_data); + DataType& transcript_collision_check = std::get<3>(this->_data); + DataType& transcript_msm_transition = std::get<4>(this->_data); + DataType& transcript_pc = std::get<5>(this->_data); + DataType& transcript_msm_count = std::get<6>(this->_data); + DataType& transcript_x = std::get<7>(this->_data); + DataType& transcript_y = std::get<8>(this->_data); + DataType& transcript_z1 = std::get<9>(this->_data); + DataType& transcript_z2 = std::get<10>(this->_data); + DataType& transcript_z1zero = std::get<11>(this->_data); + DataType& transcript_z2zero = std::get<12>(this->_data); + DataType& transcript_op = std::get<13>(this->_data); + DataType& transcript_accumulator_x = std::get<14>(this->_data); + DataType& transcript_accumulator_y = std::get<15>(this->_data); + DataType& transcript_msm_x = std::get<16>(this->_data); + DataType& transcript_msm_y = std::get<17>(this->_data); + DataType& precompute_pc = std::get<18>(this->_data); + DataType& precompute_point_transition = std::get<19>(this->_data); + DataType& precompute_round = std::get<20>(this->_data); + DataType& precompute_scalar_sum = std::get<21>(this->_data); + DataType& precompute_s1hi = std::get<22>(this->_data); + DataType& precompute_s1lo = std::get<23>(this->_data); + DataType& precompute_s2hi = std::get<24>(this->_data); + DataType& precompute_s2lo = std::get<25>(this->_data); + DataType& precompute_s3hi = std::get<26>(this->_data); + DataType& precompute_s3lo = std::get<27>(this->_data); + DataType& precompute_s4hi = std::get<28>(this->_data); + DataType& precompute_s4lo = std::get<29>(this->_data); + DataType& precompute_skew = std::get<30>(this->_data); + DataType& precompute_dx = std::get<31>(this->_data); + DataType& precompute_dy = std::get<32>(this->_data); + DataType& precompute_tx = std::get<33>(this->_data); + DataType& precompute_ty = std::get<34>(this->_data); + DataType& msm_transition = std::get<35>(this->_data); + DataType& msm_add = std::get<36>(this->_data); + DataType& msm_double = std::get<37>(this->_data); + DataType& msm_skew = std::get<38>(this->_data); + DataType& msm_accumulator_x = std::get<39>(this->_data); + DataType& msm_accumulator_y = std::get<40>(this->_data); + DataType& msm_pc = std::get<41>(this->_data); + DataType& msm_size_of_msm = std::get<42>(this->_data); + DataType& msm_count = std::get<43>(this->_data); + DataType& msm_round = std::get<44>(this->_data); + DataType& msm_add1 = std::get<45>(this->_data); + DataType& msm_add2 = std::get<46>(this->_data); + DataType& msm_add3 = std::get<47>(this->_data); + DataType& msm_add4 = std::get<48>(this->_data); + DataType& msm_x1 = std::get<49>(this->_data); + DataType& msm_y1 = std::get<50>(this->_data); + DataType& msm_x2 = std::get<51>(this->_data); + DataType& msm_y2 = std::get<52>(this->_data); + DataType& msm_x3 = std::get<53>(this->_data); + DataType& msm_y3 = std::get<54>(this->_data); + DataType& msm_x4 = std::get<55>(this->_data); + DataType& msm_y4 = std::get<56>(this->_data); + DataType& msm_collision_x1 = std::get<57>(this->_data); + DataType& msm_collision_x2 = std::get<58>(this->_data); + DataType& msm_collision_x3 = std::get<59>(this->_data); + DataType& msm_collision_x4 = std::get<60>(this->_data); + DataType& msm_lambda1 = std::get<61>(this->_data); + DataType& msm_lambda2 = std::get<62>(this->_data); + DataType& msm_lambda3 = std::get<63>(this->_data); + DataType& msm_lambda4 = std::get<64>(this->_data); + DataType& msm_slice1 = std::get<65>(this->_data); + DataType& msm_slice2 = std::get<66>(this->_data); + DataType& msm_slice3 = std::get<67>(this->_data); + DataType& msm_slice4 = std::get<68>(this->_data); + DataType& transcript_accumulator_empty = std::get<69>(this->_data); + DataType& transcript_reset_accumulator = std::get<70>(this->_data); + DataType& precompute_select = std::get<71>(this->_data); + DataType& lookup_read_counts_0 = std::get<72>(this->_data); + DataType& lookup_read_counts_1 = std::get<73>(this->_data); + DataType& z_perm = std::get<74>(this->_data); + DataType& lookup_inverses = std::get<75>(this->_data); // clang-format on std::vector get_wires() override @@ -279,111 +279,111 @@ template class ECCVMBa class AllEntities : public AllEntities_ { public: // clang-format off - DataType& lagrange_first = std::get<0>(this->_data); - DataType& lagrange_second = std::get<1>(this->_data); - DataType& lagrange_last = std::get<2>(this->_data); - DataType& transcript_add = std::get<3>(this->_data); - DataType& transcript_mul = std::get<4>(this->_data); - DataType& transcript_eq = std::get<5>(this->_data); - DataType& transcript_collision_check = std::get<6>(this->_data); - DataType& transcript_msm_transition = std::get<7>(this->_data); - DataType& transcript_pc = std::get<8>(this->_data); - DataType& transcript_msm_count = std::get<9>(this->_data); - DataType& transcript_x = std::get<10>(this->_data); - DataType& transcript_y = std::get<11>(this->_data); - DataType& transcript_z1 = std::get<12>(this->_data); - DataType& transcript_z2 = std::get<13>(this->_data); - DataType& transcript_z1zero = std::get<14>(this->_data); - DataType& transcript_z2zero = std::get<15>(this->_data); - DataType& transcript_op = std::get<16>(this->_data); - DataType& transcript_accumulator_x = std::get<17>(this->_data); - DataType& transcript_accumulator_y = std::get<18>(this->_data); - DataType& transcript_msm_x = std::get<19>(this->_data); - DataType& transcript_msm_y = std::get<20>(this->_data); - DataType& precompute_pc = std::get<21>(this->_data); - DataType& precompute_point_transition = std::get<22>(this->_data); - DataType& precompute_round = std::get<23>(this->_data); - DataType& precompute_scalar_sum = std::get<24>(this->_data); - DataType& precompute_s1hi = std::get<25>(this->_data); - DataType& precompute_s1lo = std::get<26>(this->_data); - DataType& precompute_s2hi = std::get<27>(this->_data); - DataType& precompute_s2lo = std::get<28>(this->_data); - DataType& precompute_s3hi = std::get<29>(this->_data); - DataType& precompute_s3lo = std::get<30>(this->_data); - DataType& precompute_s4hi = std::get<31>(this->_data); - DataType& precompute_s4lo = std::get<32>(this->_data); - DataType& precompute_skew = std::get<33>(this->_data); - DataType& precompute_dx = std::get<34>(this->_data); - DataType& precompute_dy = std::get<35>(this->_data); - DataType& precompute_tx = std::get<36>(this->_data); - DataType& precompute_ty = std::get<37>(this->_data); - DataType& msm_transition = std::get<38>(this->_data); - DataType& msm_add = std::get<39>(this->_data); - DataType& msm_double = std::get<40>(this->_data); - DataType& msm_skew = std::get<41>(this->_data); - DataType& msm_accumulator_x = std::get<42>(this->_data); - DataType& msm_accumulator_y = std::get<43>(this->_data); - DataType& msm_pc = std::get<44>(this->_data); - DataType& msm_size_of_msm = std::get<45>(this->_data); - DataType& msm_count = std::get<46>(this->_data); - DataType& msm_round = std::get<47>(this->_data); - DataType& msm_add1 = std::get<48>(this->_data); - DataType& msm_add2 = std::get<49>(this->_data); - DataType& msm_add3 = std::get<50>(this->_data); - DataType& msm_add4 = std::get<51>(this->_data); - DataType& msm_x1 = std::get<52>(this->_data); - DataType& msm_y1 = std::get<53>(this->_data); - DataType& msm_x2 = std::get<54>(this->_data); - DataType& msm_y2 = std::get<55>(this->_data); - DataType& msm_x3 = std::get<56>(this->_data); - DataType& msm_y3 = std::get<57>(this->_data); - DataType& msm_x4 = std::get<58>(this->_data); - DataType& msm_y4 = std::get<59>(this->_data); - DataType& msm_collision_x1 = std::get<60>(this->_data); - DataType& msm_collision_x2 = std::get<61>(this->_data); - DataType& msm_collision_x3 = std::get<62>(this->_data); - DataType& msm_collision_x4 = std::get<63>(this->_data); - DataType& msm_lambda1 = std::get<64>(this->_data); - DataType& msm_lambda2 = std::get<65>(this->_data); - DataType& msm_lambda3 = std::get<66>(this->_data); - DataType& msm_lambda4 = std::get<67>(this->_data); - DataType& msm_slice1 = std::get<68>(this->_data); - DataType& msm_slice2 = std::get<69>(this->_data); - DataType& msm_slice3 = std::get<70>(this->_data); - DataType& msm_slice4 = std::get<71>(this->_data); - DataType& transcript_accumulator_empty = std::get<72>(this->_data); - DataType& transcript_reset_accumulator = std::get<73>(this->_data); - DataType& precompute_select = std::get<74>(this->_data); - DataType& lookup_read_counts_0 = std::get<75>(this->_data); - DataType& lookup_read_counts_1 = std::get<76>(this->_data); - DataType& z_perm = std::get<77>(this->_data); - DataType& lookup_inverses = std::get<78>(this->_data); - DataType& transcript_mul_shift = std::get<79>(this->_data); - DataType& transcript_msm_count_shift = std::get<80>(this->_data); - DataType& transcript_accumulator_x_shift = std::get<81>(this->_data); - DataType& transcript_accumulator_y_shift = std::get<82>(this->_data); - DataType& precompute_scalar_sum_shift = std::get<83>(this->_data); - DataType& precompute_s1hi_shift = std::get<84>(this->_data); - DataType& precompute_dx_shift = std::get<85>(this->_data); - DataType& precompute_dy_shift = std::get<86>(this->_data); - DataType& precompute_tx_shift = std::get<87>(this->_data); - DataType& precompute_ty_shift = std::get<88>(this->_data); - DataType& msm_transition_shift = std::get<89>(this->_data); - DataType& msm_add_shift = std::get<90>(this->_data); - DataType& msm_double_shift = std::get<91>(this->_data); - DataType& msm_skew_shift = std::get<92>(this->_data); - DataType& msm_accumulator_x_shift = std::get<93>(this->_data); - DataType& msm_accumulator_y_shift = std::get<94>(this->_data); - DataType& msm_count_shift = std::get<95>(this->_data); - DataType& msm_round_shift = std::get<96>(this->_data); - DataType& msm_add1_shift = std::get<97>(this->_data); - DataType& msm_pc_shift = std::get<98>(this->_data); - DataType& precompute_pc_shift = std::get<99>(this->_data); - DataType& transcript_pc_shift = std::get<100>(this->_data); - DataType& precompute_round_shift = std::get<101>(this->_data); - DataType& transcript_accumulator_empty_shift= std::get<102>(this->_data); - DataType& precompute_select_shift = std::get<103>(this->_data); - DataType& z_perm_shift = std::get<104>(this->_data); + DataType& lagrange_first = std::get<0>(this->_data); + DataType& lagrange_second = std::get<1>(this->_data); + DataType& lagrange_last = std::get<2>(this->_data); + DataType& transcript_add = std::get<3>(this->_data); + DataType& transcript_mul = std::get<4>(this->_data); + DataType& transcript_eq = std::get<5>(this->_data); + DataType& transcript_collision_check = std::get<6>(this->_data); + DataType& transcript_msm_transition = std::get<7>(this->_data); + DataType& transcript_pc = std::get<8>(this->_data); + DataType& transcript_msm_count = std::get<9>(this->_data); + DataType& transcript_x = std::get<10>(this->_data); + DataType& transcript_y = std::get<11>(this->_data); + DataType& transcript_z1 = std::get<12>(this->_data); + DataType& transcript_z2 = std::get<13>(this->_data); + DataType& transcript_z1zero = std::get<14>(this->_data); + DataType& transcript_z2zero = std::get<15>(this->_data); + DataType& transcript_op = std::get<16>(this->_data); + DataType& transcript_accumulator_x = std::get<17>(this->_data); + DataType& transcript_accumulator_y = std::get<18>(this->_data); + DataType& transcript_msm_x = std::get<19>(this->_data); + DataType& transcript_msm_y = std::get<20>(this->_data); + DataType& precompute_pc = std::get<21>(this->_data); + DataType& precompute_point_transition = std::get<22>(this->_data); + DataType& precompute_round = std::get<23>(this->_data); + DataType& precompute_scalar_sum = std::get<24>(this->_data); + DataType& precompute_s1hi = std::get<25>(this->_data); + DataType& precompute_s1lo = std::get<26>(this->_data); + DataType& precompute_s2hi = std::get<27>(this->_data); + DataType& precompute_s2lo = std::get<28>(this->_data); + DataType& precompute_s3hi = std::get<29>(this->_data); + DataType& precompute_s3lo = std::get<30>(this->_data); + DataType& precompute_s4hi = std::get<31>(this->_data); + DataType& precompute_s4lo = std::get<32>(this->_data); + DataType& precompute_skew = std::get<33>(this->_data); + DataType& precompute_dx = std::get<34>(this->_data); + DataType& precompute_dy = std::get<35>(this->_data); + DataType& precompute_tx = std::get<36>(this->_data); + DataType& precompute_ty = std::get<37>(this->_data); + DataType& msm_transition = std::get<38>(this->_data); + DataType& msm_add = std::get<39>(this->_data); + DataType& msm_double = std::get<40>(this->_data); + DataType& msm_skew = std::get<41>(this->_data); + DataType& msm_accumulator_x = std::get<42>(this->_data); + DataType& msm_accumulator_y = std::get<43>(this->_data); + DataType& msm_pc = std::get<44>(this->_data); + DataType& msm_size_of_msm = std::get<45>(this->_data); + DataType& msm_count = std::get<46>(this->_data); + DataType& msm_round = std::get<47>(this->_data); + DataType& msm_add1 = std::get<48>(this->_data); + DataType& msm_add2 = std::get<49>(this->_data); + DataType& msm_add3 = std::get<50>(this->_data); + DataType& msm_add4 = std::get<51>(this->_data); + DataType& msm_x1 = std::get<52>(this->_data); + DataType& msm_y1 = std::get<53>(this->_data); + DataType& msm_x2 = std::get<54>(this->_data); + DataType& msm_y2 = std::get<55>(this->_data); + DataType& msm_x3 = std::get<56>(this->_data); + DataType& msm_y3 = std::get<57>(this->_data); + DataType& msm_x4 = std::get<58>(this->_data); + DataType& msm_y4 = std::get<59>(this->_data); + DataType& msm_collision_x1 = std::get<60>(this->_data); + DataType& msm_collision_x2 = std::get<61>(this->_data); + DataType& msm_collision_x3 = std::get<62>(this->_data); + DataType& msm_collision_x4 = std::get<63>(this->_data); + DataType& msm_lambda1 = std::get<64>(this->_data); + DataType& msm_lambda2 = std::get<65>(this->_data); + DataType& msm_lambda3 = std::get<66>(this->_data); + DataType& msm_lambda4 = std::get<67>(this->_data); + DataType& msm_slice1 = std::get<68>(this->_data); + DataType& msm_slice2 = std::get<69>(this->_data); + DataType& msm_slice3 = std::get<70>(this->_data); + DataType& msm_slice4 = std::get<71>(this->_data); + DataType& transcript_accumulator_empty = std::get<72>(this->_data); + DataType& transcript_reset_accumulator = std::get<73>(this->_data); + DataType& precompute_select = std::get<74>(this->_data); + DataType& lookup_read_counts_0 = std::get<75>(this->_data); + DataType& lookup_read_counts_1 = std::get<76>(this->_data); + DataType& z_perm = std::get<77>(this->_data); + DataType& lookup_inverses = std::get<78>(this->_data); + DataType& transcript_mul_shift = std::get<79>(this->_data); + DataType& transcript_msm_count_shift = std::get<80>(this->_data); + DataType& transcript_accumulator_x_shift = std::get<81>(this->_data); + DataType& transcript_accumulator_y_shift = std::get<82>(this->_data); + DataType& precompute_scalar_sum_shift = std::get<83>(this->_data); + DataType& precompute_s1hi_shift = std::get<84>(this->_data); + DataType& precompute_dx_shift = std::get<85>(this->_data); + DataType& precompute_dy_shift = std::get<86>(this->_data); + DataType& precompute_tx_shift = std::get<87>(this->_data); + DataType& precompute_ty_shift = std::get<88>(this->_data); + DataType& msm_transition_shift = std::get<89>(this->_data); + DataType& msm_add_shift = std::get<90>(this->_data); + DataType& msm_double_shift = std::get<91>(this->_data); + DataType& msm_skew_shift = std::get<92>(this->_data); + DataType& msm_accumulator_x_shift = std::get<93>(this->_data); + DataType& msm_accumulator_y_shift = std::get<94>(this->_data); + DataType& msm_count_shift = std::get<95>(this->_data); + DataType& msm_round_shift = std::get<96>(this->_data); + DataType& msm_add1_shift = std::get<97>(this->_data); + DataType& msm_pc_shift = std::get<98>(this->_data); + DataType& precompute_pc_shift = std::get<99>(this->_data); + DataType& transcript_pc_shift = std::get<100>(this->_data); + DataType& precompute_round_shift = std::get<101>(this->_data); + DataType& transcript_accumulator_empty_shift = std::get<102>(this->_data); + DataType& precompute_select_shift = std::get<103>(this->_data); + DataType& z_perm_shift = std::get<104>(this->_data); template [[nodiscard]] const DataType& lookup_read_counts() const @@ -649,19 +649,46 @@ template class ECCVMBa */ using VerificationKey = VerificationKey_>; - /** - * @brief A container for polynomials handles; only stores spans. - */ - using ProverPolynomials = AllEntities; - /** * @brief A container for polynomials produced after the first round of sumcheck. * @todo TODO(#394) Use polynomial classes for guaranteed memory alignment. */ using FoldedPolynomials = AllEntities, PolynomialHandle>; - using RawPolynomials = AllEntities; + /** + * @brief A field element for each entity of the flavor. + */ + class AllValues : public AllEntities { + public: + using Base = AllEntities; + using Base::Base; + AllValues(std::array _data_in) { this->_data = _data_in; } + }; + /** + * @brief An owning container of polynomials. + * @warning When this was introduced it broke some of our design principles. + * - Execution trace builders don't handle "polynomials" because the interpretation of the execution trace columns + * as polynomials is a detail of the proving system, and trace builders are (sometimes in practice, always in + * principle) reusable for different proving protocols (e.g., Plonk and Honk). + * - Polynomial storage is handled by key classes. Polynomials aren't moved, but are accessed elsewhere by + * std::spans. + * + * We will consider revising this data model: TODO(https://github.com/AztecProtocol/barretenberg/issues/743) + */ + class AllPolynomials : public AllEntities { + public: + AllValues get_row(const size_t row_idx) + { + AllValues result; + size_t column_idx = 0; // // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip + for (auto& column : this->_data) { + result[column_idx] = column[row_idx]; + column_idx++; + } + return result; + } + }; /** * @brief A container for polynomials produced after the first round of sumcheck. * @todo TODO(#394) Use polynomial classes for guaranteed memory alignment. @@ -693,18 +720,24 @@ template class ECCVMBa barretenberg::Univariate>; /** - * @brief A container storing evaluations of all prover polynomials at one point. - * In sumcheck, this data structure represents the evaluations produced during sumcheck, which are claimed to be the - * evaluations of prover polynomials commited in earilier rounds - * In ProtoGalaxy, it's used to store the evaluations for each point on the boolean hypercurbe, so one - * ProverPolynomailsEvaluation object represents evaluations of one row in the execution trace. - * + * @brief A container for polynomials handles of the prover; only stores spans. */ - class ProverPolynomialsEvaluations : public AllEntities { + class ProverPolynomials : public AllEntities { public: - using Base = AllEntities; - using Base::Base; - ProverPolynomialsEvaluations(std::array _data_in) { this->_data = _data_in; } + /** + * @brief Returns the evaluations of all prover polynomials at one point, which represents one row in the + * execution trace. + */ + AllValues get_row(const size_t row_idx) + { + AllValues result; + size_t column_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip + for (auto& column : this->_data) { + result[column_idx] = column[row_idx]; + column_idx++; + } + return result; + } }; /** diff --git a/barretenberg/cpp/src/barretenberg/honk/flavor/flavor.test.cpp b/barretenberg/cpp/src/barretenberg/honk/flavor/flavor.test.cpp index d574dd9563b2..32395c746cc3 100644 --- a/barretenberg/cpp/src/barretenberg/honk/flavor/flavor.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/flavor/flavor.test.cpp @@ -34,7 +34,7 @@ TEST(Flavor, Getters) Flavor::VerificationKey verification_key; Flavor::ProverPolynomials prover_polynomials; Flavor::ExtendedEdges edges; - Flavor::ProverPolynomialsEvaluations evals; + Flavor::AllValues evals; Flavor::CommitmentLabels commitment_labels; // Globals are also available through STL container sizes @@ -131,4 +131,23 @@ TEST(Flavor, AllEntitiesSpecialMemberFunctions) ASSERT_EQ(random_poly, polynomials_C.w_l); } +TEST(Flavor, GetRow) +{ + using Flavor = proof_system::honk::flavor::Ultra; + using FF = typename Flavor::FF; + std::array, Flavor::NUM_ALL_ENTITIES> data; + std::generate(data.begin(), data.end(), []() { + return std::vector({ FF::random_element(), FF::random_element() }); + }); + Flavor::ProverPolynomials prover_polynomials; + size_t poly_idx = 0; + for (auto& poly : prover_polynomials) { + poly = data[poly_idx]; + poly_idx++; + } + auto row0 = prover_polynomials.get_row(0); + auto row1 = prover_polynomials.get_row(1); + EXPECT_EQ(row0.q_elliptic, prover_polynomials.q_elliptic[0]); + EXPECT_EQ(row1.w_4_shift, prover_polynomials.w_4_shift[1]); +} } // namespace proof_system::test_flavor diff --git a/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp b/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp index 374fa33521a4..8bce02d0c965 100644 --- a/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra.hpp @@ -59,8 +59,8 @@ class GoblinUltra { static constexpr size_t NUM_RELATIONS = std::tuple_size::value; // define the container for storing the univariate contribution from each relation in Sumcheck - using RelationUnivariates = decltype(create_relation_univariates_container()); - using RelationValues = decltype(create_relation_values_container()); + using TupleOfTuplesOfUnivariates = decltype(create_relation_univariates_container()); + using TupleOfArraysOfValues = decltype(create_relation_values_container()); // Whether or not the first row of the execution trace is reserved for 0s to enable shifts static constexpr bool has_zero_row = true; @@ -288,8 +288,6 @@ class GoblinUltra { size_t num_ecc_op_gates; // needed to determine public input offset - std::shared_ptr op_queue; - // The plookup wires that store plookup read data. std::array get_table_column_wires() { return { w_l, w_r, w_o }; }; }; @@ -304,11 +302,6 @@ class GoblinUltra { */ using VerificationKey = VerificationKey_>; - /** - * @brief A container for polynomials handles; only stores spans. - */ - using ProverPolynomials = AllEntities; - /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. */ @@ -334,18 +327,31 @@ class GoblinUltra { barretenberg::Univariate>; /** - * @brief A container storing evaluations of all prover polynomials at one point. - * In sumcheck, this data structure represents the evaluations produced during sumcheck, which are claimed to be the - * evaluations of prover polynomials commited in earilier rounds - * In protogalaxy, it's used to store the evaluations for each point on the boolean hypercurbe, so one - * ProverPolynomailsEvaluation object represents evaluations of one row in the execution trace. - * + * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated + * at one point. */ - class ProverPolynomialsEvaluations : public AllEntities { + class AllValues : public AllEntities { public: using Base = AllEntities; using Base::Base; - ProverPolynomialsEvaluations(std::array _data_in) { this->_data = _data_in; } + AllValues(std::array _data_in) { this->_data = _data_in; } + }; + + /** + * @brief A container for polynomials handles; only stores spans. + */ + class ProverPolynomials : public AllEntities { + public: + AllValues get_row(const size_t row_idx) + { + AllValues result; + size_t column_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip + for (auto& column : this->_data) { + result[column_idx] = column[row_idx]; + column_idx++; + } + return result; + } }; /** diff --git a/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra_recursive.hpp b/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra_recursive.hpp index a657f08e0219..95d2a70be6b5 100644 --- a/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra_recursive.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/flavor/goblin_ultra_recursive.hpp @@ -85,8 +85,8 @@ template class GoblinUltraRecursive_ { static constexpr size_t NUM_RELATIONS = std::tuple_size::value; // define the container for storing the univariate contribution from each relation in Sumcheck - using RelationUnivariates = decltype(create_relation_univariates_container()); - using RelationValues = decltype(create_relation_values_container()); + using TupleOfTuplesOfUnivariates = decltype(create_relation_univariates_container()); + using TupleOfArraysOfValues = decltype(create_relation_values_container()); private: template @@ -343,18 +343,14 @@ template class GoblinUltraRecursive_ { }; /** - * @brief A container storing evaluations of all prover polynomials at one point. - * In sumcheck, this data structure represents the evaluations produced during sumcheck, which are claimed to be the - * evaluations of prover polynomials commited in earilier rounds - * In protogalaxy, it's used to store the evaluations for each point on the boolean hypercurbe, so one - * ProverPolynomailsEvaluation object represents evaluations of one row in the execution trace. - * + * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated + * at one point. */ - class ProverPolynomialsEvaluations : public AllEntities { + class AllValues : public AllEntities { public: using Base = AllEntities; using Base::Base; - ProverPolynomialsEvaluations(std::array _data_in) { this->_data = _data_in; } + AllValues(std::array _data_in) { this->_data = _data_in; } }; /** diff --git a/barretenberg/cpp/src/barretenberg/honk/flavor/ultra.hpp b/barretenberg/cpp/src/barretenberg/honk/flavor/ultra.hpp index 4ff13851d400..7330662bc85d 100644 --- a/barretenberg/cpp/src/barretenberg/honk/flavor/ultra.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/flavor/ultra.hpp @@ -61,8 +61,8 @@ class Ultra { static constexpr size_t NUM_RELATIONS = std::tuple_size::value; // define the container for storing the univariate contribution from each relation in Sumcheck - using RelationUnivariates = decltype(create_relation_univariates_container()); - using RelationValues = decltype(create_relation_values_container()); + using TupleOfTuplesOfUnivariates = decltype(create_relation_univariates_container()); + using TupleOfArraysOfValues = decltype(create_relation_values_container()); // Whether or not the first row of the execution trace is reserved for 0s to enable shifts static constexpr bool has_zero_row = true; @@ -271,10 +271,32 @@ class Ultra { */ using VerificationKey = VerificationKey_>; + /** + * @brief A field element for each entity of the flavor. + */ + class AllValues : public AllEntities { + public: + using Base = AllEntities; + using Base::Base; + AllValues(std::array _data_in) { this->_data = _data_in; } + }; + /** * @brief A container for polynomials handles; only stores spans. */ - using ProverPolynomials = AllEntities; + class ProverPolynomials : public AllEntities { + public: + AllValues get_row(const size_t row_idx) + { + AllValues result; + size_t column_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip + for (auto& column : this->_data) { + result[column_idx] = column[row_idx]; + column_idx++; + } + return result; + } + }; /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. @@ -300,21 +322,6 @@ class Ultra { using ExtendedEdges = AllEntities, barretenberg::Univariate>; - /** - * @brief A container storing evaluations of all prover polynomials at one point. - * In sumcheck, this data structure represents the evaluations produced during sumcheck, which are claimed to be the - * evaluations of prover polynomials commited in earilier rounds - * In protogalaxy, it's used to store the evaluations for each point on the boolean hypercurbe, so one - * ProverPolynomailsEvaluation object represents evaluations of one row in the execution trace. - * - */ - class ProverPolynomialsEvaluations : public AllEntities { - public: - using Base = AllEntities; - using Base::Base; - ProverPolynomialsEvaluations(std::array _data_in) { this->_data = _data_in; } - }; - /** * @brief A container for commitment labels. * @note It's debatable whether this should inherit from AllEntities. since most entries are not strictly needed. It diff --git a/barretenberg/cpp/src/barretenberg/honk/flavor/ultra_recursive.hpp b/barretenberg/cpp/src/barretenberg/honk/flavor/ultra_recursive.hpp index 63479fa06352..bbe9b187dca3 100644 --- a/barretenberg/cpp/src/barretenberg/honk/flavor/ultra_recursive.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/flavor/ultra_recursive.hpp @@ -84,8 +84,8 @@ template class UltraRecursive_ { static constexpr size_t NUM_RELATIONS = std::tuple_size::value; // define the container for storing the univariate contribution from each relation in Sumcheck - using RelationUnivariates = decltype(create_relation_univariates_container()); - using RelationValues = decltype(create_relation_values_container()); + using TupleOfTuplesOfUnivariates = decltype(create_relation_univariates_container()); + using TupleOfArraysOfValues = decltype(create_relation_values_container()); private: template @@ -308,18 +308,14 @@ template class UltraRecursive_ { }; /** - * @brief A container storing evaluations of all prover polynomials at one point. - * In sumcheck, this data structure represents the evaluations produced during sumcheck, which are claimed to be the - * evaluations of prover polynomials commited in earilier rounds - * In protogalaxy, it's used to store the evaluations for each point on the boolean hypercurbe, so one - * ProverPolynomailsEvaluation object represents evaluations of one row in the execution trace. - * + * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated + * at one point. */ - class ProverPolynomialsEvaluations : public AllEntities { + class AllValues : public AllEntities { public: using Base = AllEntities; using Base::Base; - ProverPolynomialsEvaluations(std::array _data_in) { this->_data = _data_in; } + AllValues(std::array _data_in) { this->_data = _data_in; } }; /** diff --git a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp index 961548373993..8efda159e461 100644 --- a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.cpp @@ -249,7 +249,6 @@ std::shared_ptr ProverInstance_::compute_pr if constexpr (IsGoblinFlavor) { proving_key->num_ecc_op_gates = num_ecc_op_gates; - proving_key->op_queue = circuit.op_queue; } return proving_key; @@ -477,7 +476,6 @@ std::shared_ptr ProverInstance_::compu } template class ProverInstance_; -template class ProverInstance_; template class ProverInstance_; } // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.hpp b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.hpp index 85836331169a..0edfc45ff6ca 100644 --- a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.hpp @@ -1,7 +1,6 @@ #pragma once #include "barretenberg/honk/flavor/goblin_ultra.hpp" #include "barretenberg/honk/flavor/ultra.hpp" -#include "barretenberg/honk/flavor/ultra_grumpkin.hpp" #include "barretenberg/honk/proof_system/folding_result.hpp" #include "barretenberg/proof_system/composer/composer_lib.hpp" #include "barretenberg/proof_system/flavor/flavor.hpp" @@ -45,7 +44,7 @@ template class ProverInstance_ { proof_system::RelationParameters relation_parameters; std::vector recursive_proof_public_input_indices; // non-empty for the accumulated instances - FoldingParameters folding_params; + FoldingParameters folding_parameters; ProverInstance_(Circuit& circuit) { @@ -59,7 +58,7 @@ template class ProverInstance_ { , prover_polynomials(result.folded_prover_polynomials) , public_inputs(result.folded_public_inputs) , relation_parameters(result.folded_relation_parameters) - , folding_params(result.folding_parameters){}; + , folding_parameters(result.folding_parameters){}; ~ProverInstance_() = default; @@ -99,7 +98,6 @@ template class ProverInstance_ { }; extern template class ProverInstance_; -extern template class ProverInstance_; extern template class ProverInstance_; } // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.test.cpp b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.test.cpp index 593a166b885f..ff5d41102fb8 100644 --- a/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/instance/prover_instance.test.cpp @@ -88,7 +88,7 @@ template class InstanceTests : public testing::Test { }; }; -using FlavorTypes = testing::Types; +using FlavorTypes = testing::Types; TYPED_TEST_SUITE(InstanceTests, FlavorTypes); TYPED_TEST(InstanceTests, SortedListAccumulator) diff --git a/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp b/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp index 9daeeb707460..05f405494f8b 100644 --- a/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/pcs/claim.hpp @@ -19,6 +19,20 @@ template class OpeningPair { bool operator==(const OpeningPair& other) const = default; }; +/** + * @brief Polynomial p and an opening pair (r,v) such that p(r) = v + * + * @tparam Params for the given commitment scheme + */ +template class ProverOpeningClaim { + using Fr = typename Curve::ScalarField; + using Polynomial = barretenberg::Polynomial; + + public: + Polynomial polynomial; // p + OpeningPair opening_pair; // (challenge r, evaluation v = p(r)) +}; + /** * @brief Unverified claim (C,r,v) for some witness polynomial p(X) such that * - C = Commit(p(X)) diff --git a/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.hpp b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.hpp new file mode 100644 index 000000000000..ed3beccf79e2 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.hpp @@ -0,0 +1,605 @@ +#pragma once +#include "barretenberg/polynomials/polynomial.hpp" + +namespace proof_system::honk::pcs::zeromorph { + +/** + * @brief Compute powers of a given challenge + * + * @tparam FF + * @param challenge + * @param num_powers + * @return std::vector + */ +template inline std::vector powers_of_challenge(const FF challenge, const size_t num_powers) +{ + std::vector challenge_powers = { FF(1), challenge }; + challenge_powers.reserve(num_powers); + for (size_t j = 2; j < num_powers; j++) { + challenge_powers.emplace_back(challenge_powers[j - 1] * challenge); + } + return challenge_powers; +}; + +/** + * @brief Prover for ZeroMorph multilinear PCS + * + * @tparam Curve + */ +template class ZeroMorphProver_ { + using FF = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + using Polynomial = barretenberg::Polynomial; + + // TODO(#742): Set this N_max to be the number of G1 elements in the mocked zeromorph SRS once it's in place. (Then, + // eventually, set it based on the real SRS). For now we set it to be large but more or less arbitrary. + static const size_t N_max = 1 << 22; + + public: + /** + * @brief Compute multivariate quotients q_k(X_0, ..., X_{k-1}) for f(X_0, ..., X_{d-1}) + * @details Given multilinear polynomial f = f(X_0, ..., X_{d-1}) for which f(u) = v, compute q_k such that: + * + * f(X_0, ..., X_{d-1}) - v = \sum_{k=0}^{d-1} (X_k - u_k)q_k(X_0, ..., X_{k-1}) + * + * The polynomials q_k can be computed explicitly as the difference of the partial evaluation of f in the last + * (n - k) variables at, respectively, u'' = (u_k + 1, u_{k+1}, ..., u_{n-1}) and u' = (u_k, ..., u_{n-1}). I.e. + * + * q_k(X_0, ..., X_{k-1}) = f(X_0,...,X_{k-1}, u'') - f(X_0,...,X_{k-1}, u') + * + * @note In practice, 2^d is equal to the circuit size N + * + * TODO(#739): This method has been designed for clarity at the expense of efficiency. Implement the more efficient + * algorithm detailed in the latest versions of the ZeroMorph paper. + * @param polynomial Multilinear polynomial f(X_0, ..., X_{d-1}) + * @param u_challenge Multivariate challenge u = (u_0, ..., u_{d-1}) + * @return std::vector The quotients q_k + */ + static std::vector compute_multilinear_quotients(Polynomial polynomial, std::span u_challenge) + { + size_t log_N = numeric::get_msb(polynomial.size()); + // The size of the multilinear challenge must equal the log of the polynomial size + ASSERT(log_N == u_challenge.size()); + + // Define the vector of quotients q_k, k = 0, ..., log_N-1 + std::vector quotients; + for (size_t k = 0; k < log_N; ++k) { + size_t size = 1 << k; + quotients.emplace_back(Polynomial(size)); // degree 2^k - 1 + } + + // Compute the q_k in reverse order, i.e. q_{n-1}, ..., q_0 + for (size_t k = 0; k < log_N; ++k) { + // Define partial evaluation point u' = (u_k, ..., u_{n-1}) + auto evaluation_point_size = static_cast(k + 1); + std::vector u_partial(u_challenge.end() - evaluation_point_size, u_challenge.end()); + + // Compute f' = f(X_0,...,X_{k-1}, u') + auto f_1 = polynomial.partial_evaluate_mle(u_partial); + + // Increment first element to get altered partial evaluation point u'' = (u_k + 1, u_{k+1}, ..., u_{n-1}) + u_partial[0] += 1; + + // Compute f'' = f(X_0,...,X_{k-1}, u'') + auto f_2 = polynomial.partial_evaluate_mle(u_partial); + + // Compute q_k = f''(X_0,...,X_{k-1}) - f'(X_0,...,X_{k-1}) + auto q_k = f_2; + q_k -= f_1; + + quotients[log_N - k - 1] = q_k; + } + + return quotients; + } + + /** + * @brief Construct batched, lifted-degree univariate quotient \hat{q} = \sum_k y^k * X^{N - d_k - 1} * q_k + * @details The purpose of the batched lifted-degree quotient is to reduce the individual degree checks + * deg(q_k) <= 2^k - 1 to a single degree check on \hat{q}. This is done by first shifting each of the q_k to the + * right (i.e. multiplying by an appropriate power of X) so that each is degree N-1, then batching them all together + * using powers of the provided challenge. Note: In practice, we do not actually compute the shifted q_k, we simply + * accumulate them into \hat{q} at the appropriate offset. + * + * @param quotients Polynomials q_k, interpreted as univariates; deg(q_k) = 2^k - 1 + * @param N circuit size + * @return Polynomial + */ + static Polynomial compute_batched_lifted_degree_quotient(std::vector& quotients, + FF y_challenge, + size_t N) + { + // Batched lifted degree quotient polynomial + auto result = Polynomial(N); + + // Compute \hat{q} = \sum_k y^k * X^{N - d_k - 1} * q_k + size_t k = 0; + auto scalar = FF(1); // y^k + for (auto& quotient : quotients) { + // Rather than explicitly computing the shifts of q_k by N - d_k - 1 (i.e. multiplying q_k by X^{N - d_k - + // 1}) then accumulating them, we simply accumulate y^k*q_k into \hat{q} at the index offset N - d_k - 1 + auto deg_k = static_cast((1 << k) - 1); + size_t offset = N - deg_k - 1; + for (size_t idx = 0; idx < deg_k + 1; ++idx) { + result[offset + idx] += scalar * quotient[idx]; + } + scalar *= y_challenge; // update batching scalar y^k + k++; + } + + return result; + } + + /** + * @brief Compute partially evaluated degree check polynomial \zeta_x = q - \sum_k y^k * x^{N - d_k - 1} * q_k + * @details Compute \zeta_x, where + * + * \zeta_x = q - \sum_k y^k * x^{N - d_k - 1} * q_k + * + * @param batched_quotient + * @param quotients + * @param y_challenge + * @param x_challenge + * @return Polynomial Degree check polynomial \zeta_x such that \zeta_x(x) = 0 + */ + static Polynomial compute_partially_evaluated_degree_check_polynomial(Polynomial& batched_quotient, + std::vector& quotients, + FF y_challenge, + FF x_challenge) + { + size_t N = batched_quotient.size(); + size_t log_N = quotients.size(); + + // Initialize partially evaluated degree check polynomial \zeta_x to \hat{q} + auto result = batched_quotient; + + auto y_power = FF(1); // y^k + for (size_t k = 0; k < log_N; ++k) { + // Accumulate y^k * x^{N - d_k - 1} * q_k into \hat{q} + auto deg_k = static_cast((1 << k) - 1); + auto x_power = x_challenge.pow(N - deg_k - 1); // x^{N - d_k - 1} + + result.add_scaled(quotients[k], -y_power * x_power); + + y_power *= y_challenge; // update batching scalar y^k + } + + return result; + } + + /** + * @brief Compute partially evaluated zeromorph identity polynomial Z_x + * @details Compute Z_x, where + * + * Z_x = x * f_batched + g_batched - v * x * \Phi_n(x) + * - x * \sum_k (x^{2^k}\Phi_{n-k-1}(x^{2^{k-1}}) - u_k\Phi_{n-k}(x^{2^k})) * q_k + * + * where f_batched = \sum_{i=0}^{m-1}\rho^i*f_i, g_batched = \sum_{i=0}^{l-1}\rho^{m+i}*g_i + * + * @param input_polynomial + * @param quotients + * @param v_evaluation + * @param x_challenge + * @return Polynomial + */ + static Polynomial compute_partially_evaluated_zeromorph_identity_polynomial(Polynomial& f_batched, + Polynomial& g_batched, + std::vector& quotients, + FF v_evaluation, + std::span u_challenge, + FF x_challenge) + { + size_t N = f_batched.size(); + size_t log_N = quotients.size(); + + // Initialize Z_x with x * \sum_{i=0}^{m-1} f_i + \sum_{i=0}^{l-1} g_i + auto result = g_batched; + result.add_scaled(f_batched, x_challenge); + + // Compute Z_x -= v * x * \Phi_n(x) + auto phi_numerator = x_challenge.pow(N) - 1; // x^N - 1 + auto phi_n_x = phi_numerator / (x_challenge - 1); + result[0] -= v_evaluation * x_challenge * phi_n_x; + + // Add contribution from q_k polynomials + auto x_power = x_challenge; // x^{2^k} + for (size_t k = 0; k < log_N; ++k) { + x_power = x_challenge.pow(1 << k); // x^{2^k} + + // \Phi_{n-k-1}(x^{2^{k + 1}}) + auto phi_term_1 = phi_numerator / (x_challenge.pow(1 << (k + 1)) - 1); + + // \Phi_{n-k}(x^{2^k}) + auto phi_term_2 = phi_numerator / (x_challenge.pow(1 << k) - 1); + + // x^{2^k} * \Phi_{n-k-1}(x^{2^{k+1}}) - u_k * \Phi_{n-k}(x^{2^k}) + auto scalar = x_power * phi_term_1 - u_challenge[k] * phi_term_2; + + scalar *= x_challenge; + scalar *= FF(-1); + + result.add_scaled(quotients[k], scalar); + } + + return result; + } + + /** + * @brief Compute combined evaluation and degree-check quotient polynomial pi + * @details Compute univariate quotient pi, where + * + * pi = (q_\zeta + z*q_Z) X^{N_{max}-(N-1)}, with q_\zeta = \zeta_x/(X-x), q_Z = Z_x/(X-x) + * + * @param Z_x + * @param zeta_x + * @param x_challenge + * @param z_challenge + * @param N_max + * @return Polynomial + */ + static Polynomial compute_batched_evaluation_and_degree_check_quotient(Polynomial& zeta_x, + Polynomial& Z_x, + FF x_challenge, + FF z_challenge) + { + // We cannot commit to polynomials with size > N_max + size_t N = zeta_x.size(); + ASSERT(N <= N_max); + + // Compute q_{\zeta} and q_Z in place + zeta_x.factor_roots(x_challenge); + Z_x.factor_roots(x_challenge); + + // Compute batched quotient q_{\zeta} + z*q_Z + auto batched_quotient = zeta_x; + batched_quotient.add_scaled(Z_x, z_challenge); + + // TODO(#742): To complete the degree check, we need to commit to (q_{\zeta} + z*q_Z)*X^{N_max - N - 1}. + // Verification then requires a pairing check similar to the standard KZG check but with [1]_2 replaced by + // [X^{N_max - N -1}]_2. Two issues: A) we do not have an SRS with these G2 elements (so need to generate a fake + // setup until we can do the real thing), and B) its not clear to me how to update our pairing algorithms to do + // this type of pairing. For now, simply construct q_{\zeta} + z*q_Z without the shift and do a standard KZG + // pairing check. When we're ready, all we have to do to make this fully legit is commit to the shift here and + // update the pairing check accordingly. Note: When this is implemented properly, it doesnt make sense to store + // the (massive) shifted polynomial of size N_max. Ideally would only store the unshifted version and just + // compute the shifted commitment directly via a new method. + auto batched_shifted_quotient = batched_quotient; + + return batched_shifted_quotient; + } + + /** + * @brief Prove a set of multilinear evaluation claims for unshifted polynomials f_i and to-be-shifted polynomials + * g_i + * + * @param f_polynomials Unshifted polynomials + * @param g_polynomials To-be-shifted polynomials (of which the shifts h_i were evaluated by sumcheck) + * @param evaluations Set of evaluations v_i = f_i(u), w_i = h_i(u) = g_i_shifted(u) + * @param multilinear_challenge Multilinear challenge point u + * @param commitment_key + * @param transcript + */ + static void prove(const auto& f_polynomials, + const auto& g_polynomials, + auto& evaluations, + auto& multilinear_challenge, + auto& commitment_key, + auto& transcript) + { + // Generate batching challenge \rho and powers 1,...,\rho^{m-1} + FF rho = transcript.get_challenge("rho"); + std::vector rhos = powers_of_challenge(rho, evaluations.size()); + + // Extract multilinear challenge u and claimed multilinear evaluations from Sumcheck output + std::span u_challenge = multilinear_challenge; + std::span claimed_evaluations = evaluations; + size_t log_N = u_challenge.size(); + size_t N = 1 << log_N; + + // Compute batching of unshifted polynomials f_i and to-be-shifted polynomials g_i: + // f_batched = sum_{i=0}^{m-1}\rho^i*f_i and g_batched = sum_{i=0}^{l-1}\rho^{m+i}*g_i, + // and also batched evaluation + // v = sum_{i=0}^{m-1}\rho^i*f_i(u) + sum_{i=0}^{l-1}\rho^{m+i}*h_i(u). + // Note: g_batched is formed from the to-be-shifted polynomials, but the batched evaluation incorporates the + // evaluations produced by sumcheck of h_i = g_i_shifted. + auto batched_evaluation = FF(0); + Polynomial f_batched(N); // batched unshifted polynomials + size_t poly_idx = 0; // TODO(#391) zip + for (auto& f_poly : f_polynomials) { + f_batched.add_scaled(f_poly, rhos[poly_idx]); + batched_evaluation += rhos[poly_idx] * claimed_evaluations[poly_idx]; + ++poly_idx; + } + + Polynomial g_batched(N); // batched to-be-shifted polynomials + for (auto& g_poly : g_polynomials) { + g_batched.add_scaled(g_poly, rhos[poly_idx]); + batched_evaluation += rhos[poly_idx] * claimed_evaluations[poly_idx]; + ++poly_idx; + }; + + // Compute the full batched polynomial f = f_batched + g_batched.shifted() = f_batched + h_batched. This is the + // polynomial for which we compute the quotients q_k and prove f(u) = v_batched. + auto f_polynomial = f_batched; + f_polynomial += g_batched.shifted(); + + // Compute the multilinear quotients q_k = q_k(X_0, ..., X_{k-1}) + auto quotients = compute_multilinear_quotients(f_polynomial, u_challenge); + + // Compute and send commitments C_{q_k} = [q_k], k = 0,...,d-1 + std::vector q_k_commitments; + q_k_commitments.reserve(log_N); + for (size_t idx = 0; idx < log_N; ++idx) { + q_k_commitments[idx] = commitment_key->commit(quotients[idx]); + std::string label = "ZM:C_q_" + std::to_string(idx); + transcript.send_to_verifier(label, q_k_commitments[idx]); + } + + // Get challenge y + auto y_challenge = transcript.get_challenge("ZM:y"); + + // Compute the batched, lifted-degree quotient \hat{q} + auto batched_quotient = compute_batched_lifted_degree_quotient(quotients, y_challenge, N); + + // Compute and send the commitment C_q = [\hat{q}] + auto q_commitment = commitment_key->commit(batched_quotient); + transcript.send_to_verifier("ZM:C_q", q_commitment); + + // Get challenges x and z + auto [x_challenge, z_challenge] = transcript.get_challenges("ZM:x", "ZM:z"); + + // Compute degree check polynomial \zeta partially evaluated at x + auto zeta_x = + compute_partially_evaluated_degree_check_polynomial(batched_quotient, quotients, y_challenge, x_challenge); + + // Compute ZeroMorph identity polynomial Z partially evaluated at x + auto Z_x = compute_partially_evaluated_zeromorph_identity_polynomial( + f_batched, g_batched, quotients, batched_evaluation, u_challenge, x_challenge); + + // Compute batched degree-check and ZM-identity quotient polynomial pi + auto pi_polynomial = + compute_batched_evaluation_and_degree_check_quotient(zeta_x, Z_x, x_challenge, z_challenge); + + // Compute and send proof commitment pi + auto pi_commitment = commitment_key->commit(pi_polynomial); + transcript.send_to_verifier("ZM:PI", pi_commitment); + } +}; + +/** + * @brief Verifier for ZeroMorph multilinear PCS + * + * @tparam Curve + */ +template class ZeroMorphVerifier_ { + using FF = typename Curve::ScalarField; + using Commitment = typename Curve::AffineElement; + + public: + /** + * @brief Compute commitment to partially evaluated batched lifted degree quotient identity + * @details Compute commitment C_{\zeta_x} = [\zeta_x]_1 using homomorphicity: + * + * C_{\zeta_x} = [q]_1 - \sum_k y^k * x^{N - d_k - 1} * [q_k]_1 + * + * @param C_q Commitment to batched lifted degree quotient + * @param C_q_k Commitments to quotients q_k + * @param y_challenge + * @param x_challenge + * @return Commitment + */ + static Commitment compute_C_zeta_x(Commitment C_q, std::vector& C_q_k, FF y_challenge, FF x_challenge) + { + size_t log_N = C_q_k.size(); + size_t N = 1 << log_N; + + // Instantiate containers for input to batch mul + std::vector scalars; + std::vector commitments; + + // Contribution from C_q + if constexpr (Curve::is_stdlib_type) { + auto builder = x_challenge.get_context(); + scalars.emplace_back(FF(builder, 1)); + } else { + scalars.emplace_back(FF(1)); + } + commitments.emplace_back(C_q); + + // Contribution from C_q_k, k = 0,...,log_N + for (size_t k = 0; k < log_N; ++k) { + auto deg_k = static_cast((1 << k) - 1); + // Compute scalar y^k * x^{N - deg_k - 1} + auto scalar = y_challenge.pow(k); + scalar *= x_challenge.pow(N - deg_k - 1); + scalar *= FF(-1); + + scalars.emplace_back(scalar); + commitments.emplace_back(C_q_k[k]); + } + + // Compute batch mul to get the result + if constexpr (Curve::is_stdlib_type) { + return Commitment::batch_mul(commitments, scalars); + } else { + return batch_mul_native(commitments, scalars); + } + } + + /** + * @brief Compute commitment to partially evaluated ZeroMorph identity Z + * @details Compute commitment C_{Z_x} = [Z_x]_1 using homomorphicity: + * + * C_{Z_x} = x * \sum_{i=0}^{m-1}\rho^i*[f_i] + \sum_{i=0}^{l-1}\rho^{m+i}*[g_i] - v * x * \Phi_n(x) * [1]_1 + * - x * \sum_k (x^{2^k}\Phi_{n-k-1}(x^{2^{k-1}}) - u_k\Phi_{n-k}(x^{2^k})) * [q_k] + * + * @param f_commitments Commitments to unshifted polynomials [f_i] + * @param g_commitments Commitments to to-be-shifted polynomials [g_i] + * @param C_q_k Commitments to q_k + * @param rho + * @param batched_evaluation \sum_{i=0}^{m-1} \rho^i*f_i(u) + \sum_{i=0}^{l-1} \rho^{m+i}*h_i(u) + * @param x_challenge + * @param u_challenge multilinear challenge + * @return Commitment + */ + static Commitment compute_C_Z_x(std::vector f_commitments, + std::vector g_commitments, + std::vector& C_q_k, + FF rho, + FF batched_evaluation, + FF x_challenge, + std::vector u_challenge) + { + size_t log_N = C_q_k.size(); + size_t N = 1 << log_N; + + std::vector scalars; + std::vector commitments; + + // Phi_n(x) = (x^N - 1) / (x - 1) + auto phi_numerator = x_challenge.pow(N) - 1; // x^N - 1 + auto phi_n_x = phi_numerator / (x_challenge - 1); + + // Add contribution: -v * x * \Phi_n(x) * [1]_1 + if constexpr (Curve::is_stdlib_type) { + auto builder = x_challenge.get_context(); + scalars.emplace_back(FF(builder, -1) * batched_evaluation * x_challenge * phi_n_x); + commitments.emplace_back(Commitment::one(builder)); + } else { + scalars.emplace_back(FF(-1) * batched_evaluation * x_challenge * phi_n_x); + commitments.emplace_back(Commitment::one()); + } + + // Add contribution: x * \sum_{i=0}^{m-1} \rho^i*[f_i] + auto rho_pow = FF(1); + for (auto& commitment : f_commitments) { + scalars.emplace_back(x_challenge * rho_pow); + commitments.emplace_back(commitment); + rho_pow *= rho; + } + + // Add contribution: \sum_{i=0}^{l-1} \rho^{m+i}*[g_i] + for (auto& commitment : g_commitments) { + scalars.emplace_back(rho_pow); + commitments.emplace_back(commitment); + rho_pow *= rho; + } + + // Add contributions: scalar * [q_k], k = 0,...,log_N, where + // scalar = -x * (x^{2^k} * \Phi_{n-k-1}(x^{2^{k+1}}) - u_k * \Phi_{n-k}(x^{2^k})) + auto x_pow_2k = x_challenge; // x^{2^k} + auto x_pow_2kp1 = x_challenge * x_challenge; // x^{2^{k + 1}} + for (size_t k = 0; k < log_N; ++k) { + + auto phi_term_1 = phi_numerator / (x_pow_2kp1 - 1); // \Phi_{n-k-1}(x^{2^{k + 1}}) + auto phi_term_2 = phi_numerator / (x_pow_2k - 1); // \Phi_{n-k}(x^{2^k}) + + auto scalar = x_pow_2k * phi_term_1; + scalar -= u_challenge[k] * phi_term_2; + scalar *= x_challenge; + scalar *= FF(-1); + + scalars.emplace_back(scalar); + commitments.emplace_back(C_q_k[k]); + + // Update powers of challenge x + x_pow_2k = x_pow_2kp1; + x_pow_2kp1 *= x_pow_2kp1; + } + + if constexpr (Curve::is_stdlib_type) { + return Commitment::batch_mul(commitments, scalars); + } else { + return batch_mul_native(commitments, scalars); + } + } + + /** + * @brief Utility for native batch multiplication of group elements + * @note This is used only for native verification and is not optimized for efficiency + */ + static Commitment batch_mul_native(std::vector points, std::vector scalars) + { + auto result = points[0] * scalars[0]; + for (size_t idx = 1; idx < scalars.size(); ++idx) { + result = result + points[idx] * scalars[idx]; + } + return result; + } + + /** + * @brief Verify a set of multilinear evaluation claims for unshifted polynomials f_i and to-be-shifted polynomials + * g_i + * + * @param commitments Commitments to polynomials f_i and g_i (unshifted and to-be-shifted) + * @param claimed_evaluations Claimed evaluations v_i = f_i(u) and w_i = h_i(u) = g_i_shifted(u) + * @param multivariate_challenge Challenge point u + * @param transcript + * @return std::array Inputs to the final pairing check + */ + static std::array verify(auto& commitments, + auto& claimed_evaluations, + auto& multivariate_challenge, + auto& transcript) + { + size_t log_N = multivariate_challenge.size(); + FF rho = transcript.get_challenge("rho"); + + // Compute powers of batching challenge rho + std::vector rhos = pcs::zeromorph::powers_of_challenge(rho, claimed_evaluations.size()); + + // Construct batched evaluation v = sum_{i=0}^{m-1}\rho^i*f_i(u) + sum_{i=0}^{l-1}\rho^{m+i}*h_i(u) + FF batched_evaluation = FF(0); + size_t evaluation_idx = 0; + for (auto& value : claimed_evaluations.get_unshifted_then_shifted()) { + batched_evaluation += value * rhos[evaluation_idx]; + ++evaluation_idx; + } + + // Receive commitments [q_k] + std::vector C_q_k; + C_q_k.reserve(log_N); + for (size_t i = 0; i < log_N; ++i) { + C_q_k.emplace_back(transcript.template receive_from_prover("ZM:C_q_" + std::to_string(i))); + } + + // Challenge y + auto y_challenge = transcript.get_challenge("ZM:y"); + + // Receive commitment C_{q} + auto C_q = transcript.template receive_from_prover("ZM:C_q"); + + // Challenges x, z + auto [x_challenge, z_challenge] = transcript.get_challenges("ZM:x", "ZM:z"); + + // Compute commitment C_{\zeta_x} + auto C_zeta_x = compute_C_zeta_x(C_q, C_q_k, y_challenge, x_challenge); + + // Compute commitment C_{Z_x} + Commitment C_Z_x = compute_C_Z_x(commitments.get_unshifted(), + commitments.get_to_be_shifted(), + C_q_k, + rho, + batched_evaluation, + x_challenge, + multivariate_challenge); + + // Compute commitment C_{\zeta,Z} + auto C_zeta_Z = C_zeta_x + C_Z_x * z_challenge; + + // Receive proof commitment \pi + auto C_pi = transcript.template receive_from_prover("ZM:PI"); + + // Construct inputs and perform pairing check to verify claimed evaluation + // Note: The pairing check (without the degree check component X^{N_max-N-1}) can be expressed naturally as + // e(C_{\zeta,Z}, [1]_2) = e(pi, [X - x]_2). This can be rearranged (e.g. see the plonk paper) as + // e(C_{\zeta,Z} - x*pi, [1]_2) * e(-pi, [X]_2) = 1, or + // e(P_0, [1]_2) * e(P_1, [X]_2) = 1 + auto P0 = C_zeta_Z + C_pi * x_challenge; + auto P1 = -C_pi; + + return { P0, P1 }; + } +}; + +} // namespace proof_system::honk::pcs::zeromorph \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.test.cpp b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.test.cpp new file mode 100644 index 000000000000..2dd12c429f9c --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/pcs/zeromorph/zeromorph.test.cpp @@ -0,0 +1,479 @@ +#include "zeromorph.hpp" +#include "../commitment_key.test.hpp" +#include "barretenberg/honk/transcript/transcript.hpp" + +#include + +namespace proof_system::honk::pcs::zeromorph { + +template class ZeroMorphTest : public CommitmentTest { + public: + using Fr = typename Curve::ScalarField; + using Polynomial = barretenberg::Polynomial; + using Commitment = typename Curve::AffineElement; + using GroupElement = typename Curve::Element; + using ZeroMorphProver = ZeroMorphProver_; + using ZeroMorphVerifier = ZeroMorphVerifier_; + + // Evaluate Phi_k(x) = \sum_{i=0}^k x^i using the direct inefficent formula + Fr Phi(Fr challenge, size_t subscript) + { + size_t length = 1 << subscript; + auto result = Fr(0); + for (size_t idx = 0; idx < length; ++idx) { + result += challenge.pow(idx); + } + return result; + } + + /** + * @brief Construct and verify ZeroMorph proof of batched multilinear evaluation with shifts + * @details The goal is to construct and verify a single batched multilinear evaluation proof for m polynomials f_i + * and l polynomials h_i. It is assumed that the h_i are shifts of polynomials g_i (the "to-be-shifted" + * polynomials), which are a subset of the f_i. This is what is encountered in practice. We accomplish this using + * evaluations of h_i but commitments to only their unshifted counterparts g_i (which we get for "free" since + * commitments [g_i] are contained in the set of commitments [f_i]). + * + */ + bool execute_zeromorph_protocol(size_t NUM_UNSHIFTED, size_t NUM_SHIFTED) + { + bool verified = false; + + size_t N = 16; + size_t log_N = numeric::get_msb(N); + + auto u_challenge = this->random_evaluation_point(log_N); + + // Construct some random multilinear polynomials f_i and their evaluations v_i = f_i(u) + std::vector f_polynomials; // unshifted polynomials + std::vector v_evaluations; + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + f_polynomials.emplace_back(this->random_polynomial(N)); + f_polynomials[i][0] = Fr(0); // ensure f is "shiftable" + v_evaluations.emplace_back(f_polynomials[i].evaluate_mle(u_challenge)); + } + + // Construct some "shifted" multilinear polynomials h_i as the left-shift-by-1 of f_i + std::vector g_polynomials; // to-be-shifted polynomials + std::vector h_polynomials; // shifts of the to-be-shifted polynomials + std::vector w_evaluations; + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + g_polynomials.emplace_back(f_polynomials[i]); + h_polynomials.emplace_back(g_polynomials[i].shifted()); + w_evaluations.emplace_back(h_polynomials[i].evaluate_mle(u_challenge)); + // ASSERT_EQ(w_evaluations[i], g_polynomials[i].evaluate_mle(u_challenge, /* shift = */ true)); + } + + // Compute commitments [f_i] + std::vector f_commitments; + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + f_commitments.emplace_back(this->commit(f_polynomials[i])); + } + + // Construct container of commitments of the "to-be-shifted" polynomials [g_i] (= [f_i]) + std::vector g_commitments; + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + g_commitments.emplace_back(f_commitments[i]); + } + + // Initialize an empty ProverTranscript + auto prover_transcript = ProverTranscript::init_empty(); + + // Execute Prover protocol + { + auto rho = prover_transcript.get_challenge("ZM:rho"); + + // Compute batching of f_i and g_i polynomials: sum_{i=0}^{m-1}\rho^i*f_i and + // sum_{i=0}^{l-1}\rho^{m+i}*h_i, and also batched evaluation v = sum_{i=0}^{m-1}\rho^i*v_i + + // sum_{i=0}^{l-1}\rho^{m+i}*w_i. + auto f_batched = Polynomial(N); + auto g_batched = Polynomial(N); + auto v_evaluation = Fr(0); + auto rho_pow = Fr(1); + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + f_batched.add_scaled(f_polynomials[i], rho_pow); + v_evaluation += rho_pow * v_evaluations[i]; + rho_pow *= rho; + } + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + g_batched.add_scaled(g_polynomials[i], rho_pow); + v_evaluation += rho_pow * w_evaluations[i]; + rho_pow *= rho; + } + + // The new f is f_batched + g_batched.shifted() = f_batched + h_batched + auto f_polynomial = f_batched; + f_polynomial += g_batched.shifted(); + + // Compute the multilinear quotients q_k = q_k(X_0, ..., X_{k-1}) + auto quotients = ZeroMorphProver::compute_multilinear_quotients(f_polynomial, u_challenge); + + // Compute and send commitments C_{q_k} = [q_k], k = 0,...,d-1 + std::vector q_k_commitments; + q_k_commitments.reserve(log_N); + for (size_t idx = 0; idx < log_N; ++idx) { + q_k_commitments[idx] = this->commit(quotients[idx]); + std::string label = "ZM:C_q_" + std::to_string(idx); + prover_transcript.send_to_verifier(label, q_k_commitments[idx]); + } + + // Get challenge y + auto y_challenge = prover_transcript.get_challenge("ZM:y"); + + // Compute the batched, lifted-degree quotient \hat{q} + auto batched_quotient = ZeroMorphProver::compute_batched_lifted_degree_quotient(quotients, y_challenge, N); + + // Compute and send the commitment C_q = [\hat{q}] + auto q_commitment = this->commit(batched_quotient); + prover_transcript.send_to_verifier("ZM:C_q", q_commitment); + + // Get challenges x and z + auto [x_challenge, z_challenge] = prover_transcript.get_challenges("ZM:x", "ZM:z"); + + // Compute degree check polynomial \zeta partially evaluated at x + auto zeta_x = ZeroMorphProver::compute_partially_evaluated_degree_check_polynomial( + batched_quotient, quotients, y_challenge, x_challenge); + + // Compute ZeroMorph identity polynomial Z partially evaluated at x + auto Z_x = ZeroMorphProver::compute_partially_evaluated_zeromorph_identity_polynomial( + f_batched, g_batched, quotients, v_evaluation, u_challenge, x_challenge); + + // Compute batched degree and ZM-identity quotient polynomial pi + auto pi_polynomial = ZeroMorphProver::compute_batched_evaluation_and_degree_check_quotient( + zeta_x, Z_x, x_challenge, z_challenge); + + // Compute and send proof commitment pi + auto pi_commitment = this->commit(pi_polynomial); + prover_transcript.send_to_verifier("ZM:PI", pi_commitment); + } + + auto verifier_transcript = VerifierTranscript::init_empty(prover_transcript); + + // Execute Verifier protocol + { + // Challenge rho + auto rho = verifier_transcript.get_challenge("ZM:rho"); + + // Construct batched evaluation v = sum_{i=0}^{m-1}\rho^i*v_i + sum_{i=0}^{l-1}\rho^{m+i}*w_i + auto v_evaluation = Fr(0); + auto rho_pow = Fr(1); + for (size_t i = 0; i < NUM_UNSHIFTED; ++i) { + v_evaluation += rho_pow * v_evaluations[i]; + rho_pow *= rho; + } + for (size_t i = 0; i < NUM_SHIFTED; ++i) { + v_evaluation += rho_pow * w_evaluations[i]; + rho_pow *= rho; + } + + // Receive commitments [q_k] + std::vector C_q_k; + C_q_k.reserve(log_N); + for (size_t i = 0; i < log_N; ++i) { + C_q_k.emplace_back( + verifier_transcript.template receive_from_prover("ZM:C_q_" + std::to_string(i))); + } + + // Challenge y + auto y_challenge = verifier_transcript.get_challenge("ZM:y"); + + // Receive commitment C_{q} + auto C_q = verifier_transcript.template receive_from_prover("ZM:C_q"); + + // Challenges x, z + auto [x_challenge, z_challenge] = verifier_transcript.get_challenges("ZM:x", "ZM:z"); + + // Compute commitment C_{\zeta_x} + auto C_zeta_x = ZeroMorphVerifier::compute_C_zeta_x(C_q, C_q_k, y_challenge, x_challenge); + + // Compute commitment C_{Z_x} + Commitment C_Z_x = ZeroMorphVerifier::compute_C_Z_x( + f_commitments, g_commitments, C_q_k, rho, v_evaluation, x_challenge, u_challenge); + + // Compute commitment C_{\zeta,Z} + auto C_zeta_Z = C_zeta_x + C_Z_x * z_challenge; + + // Receive proof commitment \pi + auto C_pi = verifier_transcript.template receive_from_prover("ZM:PI"); + + // The prover and verifier manifests should agree + EXPECT_EQ(prover_transcript.get_manifest(), verifier_transcript.get_manifest()); + + // Construct inputs and perform pairing check to verify claimed evaluation + // Note: The pairing check (without the degree check component X^{N_max-N-1}) can be expressed naturally as + // e(C_{\zeta,Z}, [1]_2) = e(pi, [X - x]_2). This can be rearranged (e.g. see the plonk paper) as + // e(C_{\zeta,Z} - x*pi, [1]_2) * e(-pi, [X]_2) = 1, or + // e(P_0, [1]_2) * e(P_1, [X]_2) = 1 + auto P0 = C_zeta_Z + C_pi * x_challenge; + auto P1 = -C_pi; + verified = this->vk()->pairing_check(P0, P1); + // EXPECT_TRUE(verified); + } + return verified; + } +}; + +using CurveTypes = ::testing::Types; +TYPED_TEST_SUITE(ZeroMorphTest, CurveTypes); + +/** + * @brief Test method for computing q_k given multilinear f + * @details Given f = f(X_0, ..., X_{d-1}), and (u,v) such that f(u) = v, compute q_k = q_k(X_0, ..., X_{k-1}) such that + * the following identity holds: + * + * f(X_0, ..., X_{d-1}) - v = \sum_{k=0}^{d-1} (X_k - u_k)q_k(X_0, ..., X_{k-1}) + * + */ +TYPED_TEST(ZeroMorphTest, QuotientConstruction) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + // Define size parameters + size_t N = 16; + size_t log_N = numeric::get_msb(N); + + // Construct a random multilinear polynomial f, and (u,v) such that f(u) = v. + Polynomial multilinear_f = this->random_polynomial(N); + std::vector u_challenge = this->random_evaluation_point(log_N); + Fr v_evaluation = multilinear_f.evaluate_mle(u_challenge); + + // Compute the multilinear quotients q_k = q_k(X_0, ..., X_{k-1}) + std::vector quotients = ZeroMorphProver::compute_multilinear_quotients(multilinear_f, u_challenge); + + // Show that the q_k were properly constructed by showing that the identity holds at a random multilinear challenge + // z, i.e. f(z) - v - \sum_{k=0}^{d-1} (z_k - u_k)q_k(z) = 0 + std::vector z_challenge = this->random_evaluation_point(log_N); + + Fr result = multilinear_f.evaluate_mle(z_challenge); + result -= v_evaluation; + for (size_t k = 0; k < log_N; ++k) { + auto q_k_eval = Fr(0); + if (k == 0) { + // q_0 = a_0 is a constant polynomial so it's evaluation is simply its constant coefficient + q_k_eval = quotients[k][0]; + } else { + // Construct (u_0, ..., u_{k-1}) + auto subrange_size = static_cast(k); + std::vector z_partial(z_challenge.begin(), z_challenge.begin() + subrange_size); + q_k_eval = quotients[k].evaluate_mle(z_partial); + } + // result = result - (z_k - u_k) * q_k(u_0, ..., u_{k-1}) + result -= (z_challenge[k] - u_challenge[k]) * q_k_eval; + } + + EXPECT_EQ(result, 0); +} + +/** + * @brief Test function for constructing batched lifted degree quotient \hat{q} + * + */ +TYPED_TEST(ZeroMorphTest, BatchedLiftedDegreeQuotient) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + const size_t N = 8; + + // Define some mock q_k with deg(q_k) = 2^k - 1 + std::vector data_0 = { 1 }; + std::vector data_1 = { 2, 3 }; + std::vector data_2 = { 4, 5, 6, 7 }; + Polynomial q_0(data_0); + Polynomial q_1(data_1); + Polynomial q_2(data_2); + std::vector quotients = { q_0, q_1, q_2 }; + + auto y_challenge = Fr::random_element(); + + // Compute batched quotient \hat{q} using the prover method + auto batched_quotient = ZeroMorphProver::compute_batched_lifted_degree_quotient(quotients, y_challenge, N); + + // Now explicitly define q_k_lifted = X^{N-2^k} * q_k and compute the expected batched result + std::array data_0_lifted = { 0, 0, 0, 0, 0, 0, 0, 1 }; + std::array data_1_lifted = { 0, 0, 0, 0, 0, 0, 2, 3 }; + std::array data_2_lifted = { 0, 0, 0, 0, 4, 5, 6, 7 }; + Polynomial q_0_lifted(data_0_lifted); + Polynomial q_1_lifted(data_1_lifted); + Polynomial q_2_lifted(data_2_lifted); + + // Explicitly compute \hat{q} + auto batched_quotient_expected = Polynomial(N); + batched_quotient_expected += q_0_lifted; + batched_quotient_expected.add_scaled(q_1_lifted, y_challenge); + batched_quotient_expected.add_scaled(q_2_lifted, y_challenge * y_challenge); + + EXPECT_EQ(batched_quotient, batched_quotient_expected); +} + +/** + * @brief Test function for constructing partially evaluated quotient \zeta_x + * + */ +TYPED_TEST(ZeroMorphTest, PartiallyEvaluatedQuotientZeta) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + const size_t N = 8; + + // Define some mock q_k with deg(q_k) = 2^k - 1 + std::vector data_0 = { 1 }; + std::vector data_1 = { 2, 3 }; + std::vector data_2 = { 4, 5, 6, 7 }; + Polynomial q_0(data_0); + Polynomial q_1(data_1); + Polynomial q_2(data_2); + std::vector quotients = { q_0, q_1, q_2 }; + + auto y_challenge = Fr::random_element(); + + auto batched_quotient = ZeroMorphProver::compute_batched_lifted_degree_quotient(quotients, y_challenge, N); + + auto x_challenge = Fr::random_element(); + + // Contruct zeta_x using the prover method + auto zeta_x = ZeroMorphProver::compute_partially_evaluated_degree_check_polynomial( + batched_quotient, quotients, y_challenge, x_challenge); + + // Now construct zeta_x explicitly + auto zeta_x_expected = Polynomial(N); + zeta_x_expected += batched_quotient; + // q_batched - \sum_k q_k * y^k * x^{N - deg(q_k) - 1} + zeta_x_expected.add_scaled(q_0, -x_challenge.pow(N - 0 - 1)); + zeta_x_expected.add_scaled(q_1, -y_challenge * x_challenge.pow(N - 1 - 1)); + zeta_x_expected.add_scaled(q_2, -y_challenge * y_challenge * x_challenge.pow(N - 3 - 1)); + + EXPECT_EQ(zeta_x, zeta_x_expected); +} + +/** + * @brief Demonstrate formulas for efficiently computing \Phi_k(x) = \sum_{i=0}^{k-1}x^i + * @details \Phi_k(x) = \sum_{i=0}^{k-1}x^i = (x^{2^k} - 1) / (x - 1) + * + */ +TYPED_TEST(ZeroMorphTest, PhiEvaluation) +{ + using Fr = typename TypeParam::ScalarField; + const size_t N = 8; + size_t n = numeric::get_msb(N); + + // \Phi_n(x) + { + auto x_challenge = Fr::random_element(); + + auto efficient = (x_challenge.pow(1 << n) - 1) / (x_challenge - 1); + + auto expected = this->Phi(x_challenge, n); + + EXPECT_EQ(efficient, expected); + } + + // \Phi_{n-k-1}(x^{2^{k + 1}}) = (x^{2^n} - 1) / (x^{2^{k + 1}} - 1) + { + auto x_challenge = Fr::random_element(); + + size_t k = 2; + + // x^{2^{k+1}} + auto x_pow = x_challenge.pow(1 << (k + 1)); + + auto efficient = x_challenge.pow(1 << n) - 1; // x^N - 1 + efficient = efficient / (x_pow - 1); // (x^N - 1) / (x^{2^{k + 1}} - 1) + + auto expected = this->Phi(x_pow, n - k - 1); + EXPECT_EQ(efficient, expected); + } +} + +/** + * @brief Test function for constructing partially evaluated quotient Z_x + * + */ +TYPED_TEST(ZeroMorphTest, PartiallyEvaluatedQuotientZ) +{ + // Define some useful type aliases + using ZeroMorphProver = ZeroMorphProver_; + using Fr = typename TypeParam::ScalarField; + using Polynomial = barretenberg::Polynomial; + + const size_t N = 8; + size_t log_N = numeric::get_msb(N); + + // Construct a random multilinear polynomial f, and (u,v) such that f(u) = v. + Polynomial multilinear_f = this->random_polynomial(N); + Polynomial multilinear_g = this->random_polynomial(N); + multilinear_g[0] = 0; + std::vector u_challenge = this->random_evaluation_point(log_N); + Fr v_evaluation = multilinear_f.evaluate_mle(u_challenge); + Fr w_evaluation = multilinear_g.evaluate_mle(u_challenge, /* shift = */ true); + + auto rho = Fr::random_element(); + + // compute batched polynomial and evaluation + auto f_batched = multilinear_f; + auto g_batched = multilinear_g; + g_batched *= rho; + auto v_batched = v_evaluation + rho * w_evaluation; + + // Define some mock q_k with deg(q_k) = 2^k - 1 + auto q_0 = this->random_polynomial(1 << 0); + auto q_1 = this->random_polynomial(1 << 1); + auto q_2 = this->random_polynomial(1 << 2); + std::vector quotients = { q_0, q_1, q_2 }; + + auto x_challenge = Fr::random_element(); + + // Construct Z_x using the prover method + auto Z_x = ZeroMorphProver::compute_partially_evaluated_zeromorph_identity_polynomial( + f_batched, g_batched, quotients, v_batched, u_challenge, x_challenge); + + // Compute Z_x directly + auto Z_x_expected = g_batched; + Z_x_expected.add_scaled(f_batched, x_challenge); + Z_x_expected[0] -= v_batched * x_challenge * this->Phi(x_challenge, log_N); + for (size_t k = 0; k < log_N; ++k) { + auto x_pow_2k = x_challenge.pow(1 << k); // x^{2^k} + auto x_pow_2kp1 = x_challenge.pow(1 << (k + 1)); // x^{2^{k+1}} + // x^{2^k} * \Phi_{n-k-1}(x^{2^{k+1}}) - u_k * \Phi_{n-k}(x^{2^k}) + auto scalar = x_pow_2k * this->Phi(x_pow_2kp1, log_N - k - 1) - u_challenge[k] * this->Phi(x_pow_2k, log_N - k); + scalar *= x_challenge; + scalar *= Fr(-1); + Z_x_expected.add_scaled(quotients[k], scalar); + } + + EXPECT_EQ(Z_x, Z_x_expected); +} + +/** + * @brief Test full Prover/Verifier protocol for proving single multilinear evaluation + * + */ +TYPED_TEST(ZeroMorphTest, ProveAndVerifySingle) +{ + size_t num_unshifted = 1; + size_t num_shifted = 0; + auto verified = this->execute_zeromorph_protocol(num_unshifted, num_shifted); + EXPECT_TRUE(verified); +} + +/** + * @brief Test full Prover/Verifier protocol for proving batched multilinear evaluation with shifts + * + */ +TYPED_TEST(ZeroMorphTest, ProveAndVerifyBatchedWithShifts) +{ + size_t num_unshifted = 3; + size_t num_shifted = 2; + auto verified = this->execute_zeromorph_protocol(num_unshifted, num_shifted); + EXPECT_TRUE(verified); +} + +} // namespace proof_system::honk::pcs::zeromorph diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/composer_lib.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/composer_lib.hpp index 557570d90d9c..1219b2a586b9 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/composer_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/composer_lib.hpp @@ -26,7 +26,7 @@ std::shared_ptr compute_verification_key_commo auto commitment_key = typename Flavor::CommitmentKey(proving_key->circuit_size, proving_key->crs); - size_t poly_idx = 0; // TODO(#391) zip + size_t poly_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip for (auto& polynomial : proving_key) { verification_key[poly_idx] = commitment_key.commit(polynomial); ++polynomial_idx; diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.cpp index c44589b05780..9f5143ba028e 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.cpp @@ -33,8 +33,7 @@ template ECCVMProver_::ECCVMProver_(std::shared_ptr input_key, std::shared_ptr commitment_key) : key(input_key) - , queue(commitment_key, transcript) - , pcs_commitment_key(commitment_key) + , commitment_key(commitment_key) { // TODO(@zac-williamson) Future work; is there a cleaner way of doing this? #2213 @@ -144,19 +143,7 @@ ECCVMProver_::ECCVMProver_(std::shared_ptr prover_polynomials.lookup_inverses = key->lookup_inverses; key->z_perm = Polynomial(key->circuit_size); prover_polynomials.z_perm = key->z_perm; -} - -/** - * @brief Commit to the first three wires only - * - */ -template void ECCVMProver_::compute_wire_commitments() -{ - auto wire_polys = key->get_wires(); - auto labels = commitment_labels.get_wires(); - for (size_t idx = 0; idx < wire_polys.size(); ++idx) { - queue.add_commitment(wire_polys[idx], labels[idx]); - } + prover_polynomials.z_perm_shift = key->z_perm; // this will be initialized properly later } /** @@ -179,7 +166,7 @@ template void ECCVMProver_::execute_wire_commitment auto wire_polys = key->get_wires(); auto labels = commitment_labels.get_wires(); for (size_t idx = 0; idx < wire_polys.size(); ++idx) { - queue.add_commitment(wire_polys[idx], labels[idx]); + transcript.send_to_verifier(labels[idx], commitment_key->commit(wire_polys[idx])); } } @@ -203,7 +190,7 @@ template void ECCVMProver_::execute_log_derivative_ // Compute inverse polynomial for our logarithmic-derivative lookup method lookup_library::compute_logderivative_inverse( prover_polynomials, relation_parameters, key->circuit_size); - queue.add_commitment(key->lookup_inverses, commitment_labels.lookup_inverses); + transcript.send_to_verifier(commitment_labels.lookup_inverses, commitment_key->commit(key->lookup_inverses)); prover_polynomials.lookup_inverses = key->lookup_inverses; } @@ -216,7 +203,7 @@ template void ECCVMProver_::execute_grand_product_c // Compute permutation grand product and their commitments permutation_library::compute_permutation_grand_products(key, prover_polynomials, relation_parameters); - queue.add_commitment(key->z_perm, commitment_labels.z_perm); + transcript.send_to_verifier(commitment_labels.z_perm, commitment_key->commit(key->z_perm)); } /** @@ -247,7 +234,7 @@ template void ECCVMProver_::execute_univariatizatio // Batch the unshifted polynomials and the to-be-shifted polynomials using ρ Polynomial batched_poly_unshifted(key->circuit_size); // batched unshifted polynomials - size_t poly_idx = 0; // TODO(#391) zip + size_t poly_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip for (auto& unshifted_poly : prover_polynomials.get_unshifted()) { batched_poly_unshifted.add_scaled(unshifted_poly, rhos[poly_idx]); ++poly_idx; @@ -265,7 +252,8 @@ template void ECCVMProver_::execute_univariatizatio // Compute and add to trasnscript the commitments [Fold^(i)], i = 1, ..., d-1 for (size_t l = 0; l < key->log_circuit_size - 1; ++l) { - queue.add_commitment(gemini_polynomials[l + 2], "Gemini:FOLD_" + std::to_string(l + 1)); + transcript.send_to_verifier("Gemini:FOLD_" + std::to_string(l + 1), + commitment_key->commit(gemini_polynomials[l + 2])); } } @@ -300,7 +288,7 @@ template void ECCVMProver_::execute_shplonk_batched Shplonk::compute_batched_quotient(gemini_output.opening_pairs, gemini_output.witnesses, nu_challenge); // commit to Q(X) and add [Q] to the transcript - queue.add_commitment(batched_quotient_Q, "Shplonk:Q"); + transcript.send_to_verifier("Shplonk:Q", commitment_key->commit(batched_quotient_Q)); } /** @@ -321,8 +309,7 @@ template void ECCVMProver_::execute_shplonk_partial * */ template void ECCVMProver_::execute_final_pcs_round() { - PCS::compute_opening_proof(pcs_commitment_key, shplonk_output.opening_pair, shplonk_output.witness, transcript); - // queue.add_commitment(quotient_W, "KZG:W"); + PCS::compute_opening_proof(commitment_key, shplonk_output.opening_pair, shplonk_output.witness, transcript); } template plonk::proof& ECCVMProver_::export_proof() @@ -338,16 +325,13 @@ template plonk::proof& ECCVMProver_::construct_proo // Compute first three wire commitments execute_wire_commitments_round(); - queue.process_queue(); // Compute sorted list accumulator and commitment execute_log_derivative_commitments_round(); - queue.process_queue(); // Fiat-Shamir: bbeta & gamma // Compute grand product(s) and commitments. execute_grand_product_computation_round(); - queue.process_queue(); // Fiat-Shamir: alpha // Run sumcheck subprotocol. @@ -356,7 +340,6 @@ template plonk::proof& ECCVMProver_::construct_proo // Fiat-Shamir: rho // Compute Fold polynomials and their commitments. execute_univariatization_round(); - queue.process_queue(); // Fiat-Shamir: r // Compute Fold evaluations @@ -365,7 +348,6 @@ template plonk::proof& ECCVMProver_::construct_proo // Fiat-Shamir: nu // Compute Shplonk batched quotient commitment Q execute_shplonk_batched_quotient_round(); - queue.process_queue(); // Fiat-Shamir: z // Compute partial evaluation Q_z diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.hpp index 0e5298b7f6a5..d781fcb86c26 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/eccvm_prover.hpp @@ -2,7 +2,6 @@ #include "barretenberg/honk/flavor/ecc_vm.hpp" #include "barretenberg/honk/pcs/gemini/gemini.hpp" #include "barretenberg/honk/pcs/shplonk/shplonk.hpp" -#include "barretenberg/honk/proof_system/work_queue.hpp" #include "barretenberg/honk/sumcheck/sumcheck_output.hpp" #include "barretenberg/honk/transcript/transcript.hpp" #include "barretenberg/plonk/proof_system/types/proof.hpp" @@ -37,8 +36,6 @@ template class ECCVMProver_ { void execute_shplonk_partial_evaluation_round(); void execute_final_pcs_round(); - void compute_wire_commitments(); - plonk::proof& export_proof(); plonk::proof& construct_proof(); @@ -63,12 +60,10 @@ template class ECCVMProver_ { Polynomial quotient_W; - work_queue queue; - sumcheck::SumcheckOutput sumcheck_output; pcs::gemini::ProverOutput gemini_output; pcs::shplonk::ProverOutput shplonk_output; - std::shared_ptr pcs_commitment_key; + std::shared_ptr commitment_key; using Gemini = pcs::gemini::GeminiProver_; using Shplonk = pcs::shplonk::ShplonkProver_; diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.cpp new file mode 100644 index 000000000000..ba5ed3909a70 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.cpp @@ -0,0 +1,119 @@ +#include "merge_prover.hpp" + +namespace proof_system::honk { + +/** + * Create MergeProver_ + * + */ +template +MergeProver_::MergeProver_(std::shared_ptr commitment_key, std::shared_ptr op_queue) + : op_queue(op_queue) + , pcs_commitment_key(commitment_key) +{} + +/** + * @brief Prove proper construction of the aggregate Goblin ECC op queue polynomials T_i^(j), j = 1,2,3,4. + * @details Let T_i^(j) be the jth column of the aggregate op queue after incorporating the contribution from the + * present circuit. T_{i-1}^(j) corresponds to the aggregate op queue at the previous stage and $t_i^(j)$ represents + * the contribution from the present circuit only. For each j, we have the relationship T_i = T_{i-1} + right_shift(t_i, + * M_{i-1}), where the shift magnitude M_{i-1} is the length of T_{i-1}. This protocol demonstrates that the aggregate + * op queue has been constructed correctly via a simple Schwartz-Zippel check. Evaluations are proven via batched KZG. + * + * TODO(#746): Prove connection between t_i^{shift}, committed to herein, and t_i, used in the main protocol. See issue + * for details (https://github.com/AztecProtocol/barretenberg/issues/746). + * + * @tparam Flavor + * @return plonk::proof& + */ +template plonk::proof& MergeProver_::construct_proof() +{ + size_t N = op_queue->get_current_size(); + + // Extract T_i, T_{i-1} + auto T_current = op_queue->get_aggregate_transcript(); + auto T_prev = op_queue->get_previous_aggregate_transcript(); + // TODO(#723): Cannot currently support an empty T_{i-1}. Need to be able to properly handle zero commitment. + ASSERT(T_prev[0].size() > 0); + + // Construct t_i^{shift} as T_i - T_{i-1} + std::array t_shift; + for (size_t i = 0; i < Flavor::NUM_WIRES; ++i) { + t_shift[i] = Polynomial(T_current[i]); + t_shift[i] -= T_prev[i]; + } + + // Compute/get commitments [t_i^{shift}], [T_{i-1}], and [T_i] and add to transcript + std::array C_T_current; + for (size_t idx = 0; idx < t_shift.size(); ++idx) { + // Get previous transcript commitment [T_{i-1}] from op queue + auto C_T_prev = op_queue->ultra_ops_commitments[idx]; + // Compute commitment [t_i^{shift}] directly + auto C_t_shift = pcs_commitment_key->commit(t_shift[idx]); + // Compute updated aggregate transcript commitment as [T_i] = [T_{i-1}] + [t_i^{shift}] + C_T_current[idx] = C_T_prev + C_t_shift; + + std::string suffix = std::to_string(idx + 1); + transcript.send_to_verifier("T_PREV_" + suffix, C_T_prev); + transcript.send_to_verifier("t_SHIFT_" + suffix, C_t_shift); + transcript.send_to_verifier("T_CURRENT_" + suffix, C_T_current[idx]); + } + + // Store the commitments [T_{i}] (to be used later in subsequent iterations as [T_{i-1}]). + op_queue->set_commitment_data(C_T_current); + + // Compute evaluations T_i(\kappa), T_{i-1}(\kappa), t_i^{shift}(\kappa), add to transcript. For each polynomial + // we add a univariate opening claim {p(X), (\kappa, p(\kappa))} to the set of claims to be checked via batched KZG. + auto kappa = transcript.get_challenge("kappa"); + + // Add univariate opening claims for each polynomial. + std::vector opening_claims; + // Compute evaluation T_{i-1}(\kappa) + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + auto polynomial = Polynomial(T_prev[idx]); + auto evaluation = polynomial.evaluate(kappa); + transcript.send_to_verifier("T_prev_eval_" + std::to_string(idx + 1), evaluation); + opening_claims.emplace_back(OpeningClaim{ polynomial, { kappa, evaluation } }); + } + // Compute evaluation t_i^{shift}(\kappa) + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + auto evaluation = t_shift[idx].evaluate(kappa); + transcript.send_to_verifier("t_shift_eval_" + std::to_string(idx + 1), evaluation); + opening_claims.emplace_back(OpeningClaim{ t_shift[idx], { kappa, evaluation } }); + } + // Compute evaluation T_i(\kappa) + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + auto polynomial = Polynomial(T_current[idx]); + auto evaluation = polynomial.evaluate(kappa); + transcript.send_to_verifier("T_current_eval_" + std::to_string(idx + 1), evaluation); + opening_claims.emplace_back(OpeningClaim{ polynomial, { kappa, evaluation } }); + } + + auto alpha = transcript.get_challenge("alpha"); + + // Constuct batched polynomial to opened via KZG + auto batched_polynomial = Polynomial(N); + auto batched_eval = FF(0); + auto alpha_pow = FF(1); + for (auto& claim : opening_claims) { + batched_polynomial.add_scaled(claim.polynomial, alpha_pow); + batched_eval += alpha_pow * claim.opening_pair.evaluation; + alpha_pow *= alpha; + } + + // Construct and commit to KZG quotient polynomial q = (f - v) / (X - kappa) + auto quotient = batched_polynomial; + quotient[0] -= batched_eval; + quotient.factor_roots(kappa); + + auto quotient_commitment = pcs_commitment_key->commit(quotient); + transcript.send_to_verifier("KZG:W", quotient_commitment); + + proof.proof_data = transcript.proof_data; + return proof; +} + +template class MergeProver_; +template class MergeProver_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp new file mode 100644 index 000000000000..7462018ec51f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_prover.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include "barretenberg/honk/flavor/goblin_ultra.hpp" +#include "barretenberg/honk/flavor/ultra.hpp" +#include "barretenberg/honk/pcs/claim.hpp" +#include "barretenberg/honk/transcript/transcript.hpp" +#include "barretenberg/plonk/proof_system/types/proof.hpp" +#include "barretenberg/proof_system/op_queue/ecc_op_queue.hpp" + +namespace proof_system::honk { + +/** + * @brief Prover class for the Goblin ECC op queue transcript merge protocol + * + * @tparam Flavor + */ +template class MergeProver_ { + using FF = typename Flavor::FF; + using Polynomial = typename Flavor::Polynomial; + using CommitmentKey = typename Flavor::CommitmentKey; + using Commitment = typename Flavor::Commitment; + using PCS = typename Flavor::PCS; + using Curve = typename Flavor::Curve; + using OpeningClaim = typename pcs::ProverOpeningClaim; + using OpeningPair = typename pcs::OpeningPair; + + public: + ProverTranscript transcript; + std::shared_ptr op_queue; + std::shared_ptr pcs_commitment_key; + + explicit MergeProver_(std::shared_ptr, std::shared_ptr); + plonk::proof& construct_proof(); + + private: + plonk::proof proof; +}; + +extern template class MergeProver_; +extern template class MergeProver_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.cpp new file mode 100644 index 000000000000..ca477c49eb6f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.cpp @@ -0,0 +1,83 @@ +#include "merge_verifier.hpp" + +namespace proof_system::honk { + +template +MergeVerifier_::MergeVerifier_(std::unique_ptr verification_key) + : pcs_verification_key(std::move(verification_key)){}; + +/** + * @brief Verify proper construction of the aggregate Goblin ECC op queue polynomials T_i^(j), j = 1,2,3,4. + * @details Let T_i^(j) be the jth column of the aggregate op queue after incorporating the contribution from the + * present circuit. T_{i-1}^(j) corresponds to the aggregate op queue at the previous stage and $t_i^(j)$ represents + * the contribution from the present circuit only. For each j, we have the relationship T_i = T_{i-1} + right_shift(t_i, + * M_{i-1}), where the shift magnitude M_{i-1} is the length of T_{i-1}. This protocol verfies that the aggregate op + * queue has been constructed correctly via a simple Schwartz-Zippel check. Evaluations are checked via batched KZG. + * + * @tparam Flavor + * @return plonk::proof& + */ +template bool MergeVerifier_::verify_proof(const plonk::proof& proof) +{ + transcript = VerifierTranscript{ proof.proof_data }; + + // Receive commitments [t_i^{shift}], [T_{i-1}], and [T_i] + std::array C_T_prev; + std::array C_t_shift; + std::array C_T_current; + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + C_T_prev[idx] = transcript.template receive_from_prover("T_PREV_" + std::to_string(idx + 1)); + C_t_shift[idx] = transcript.template receive_from_prover("t_SHIFT_" + std::to_string(idx + 1)); + C_T_current[idx] = transcript.template receive_from_prover("T_CURRENT_" + std::to_string(idx + 1)); + } + + FF kappa = transcript.get_challenge("kappa"); + + // Receive transcript poly evaluations and add corresponding univariate opening claims {(\kappa, p(\kappa), [p(X)]} + std::array T_prev_evals; + std::array t_shift_evals; + std::array T_current_evals; + std::vector opening_claims; + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + T_prev_evals[idx] = transcript.template receive_from_prover("T_prev_eval_" + std::to_string(idx + 1)); + opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, T_prev_evals[idx] }, C_T_prev[idx] }); + } + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + t_shift_evals[idx] = transcript.template receive_from_prover("t_shift_eval_" + std::to_string(idx + 1)); + opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, t_shift_evals[idx] }, C_t_shift[idx] }); + } + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + T_current_evals[idx] = transcript.template receive_from_prover("T_current_eval_" + std::to_string(idx + 1)); + opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, T_current_evals[idx] }, C_T_current[idx] }); + } + + // Check the identity T_i(\kappa) = T_{i-1}(\kappa) + t_i^{shift}(\kappa). If it fails, return false + bool identity_checked = true; + for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { + identity_checked = identity_checked && (T_current_evals[idx] == T_prev_evals[idx] + t_shift_evals[idx]); + } + + auto alpha = transcript.get_challenge("alpha"); + + // Constuct batched commitment and evaluation from constituents + auto batched_commitment = opening_claims[0].commitment; + auto batched_eval = opening_claims[0].opening_pair.evaluation; + auto alpha_pow = alpha; + for (size_t idx = 1; idx < opening_claims.size(); ++idx) { + auto& claim = opening_claims[idx]; + batched_commitment = batched_commitment + (claim.commitment * alpha_pow); + batched_eval += alpha_pow * claim.opening_pair.evaluation; + alpha_pow *= alpha; + } + + OpeningClaim batched_claim = { { kappa, batched_eval }, batched_commitment }; + + auto verified = PCS::verify(pcs_verification_key, batched_claim, transcript); + + return identity_checked && verified; +} + +template class MergeVerifier_; +template class MergeVerifier_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp new file mode 100644 index 000000000000..b195cd92a1ed --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/goblin_merge/merge_verifier.hpp @@ -0,0 +1,40 @@ +#pragma once + +#include "barretenberg/honk/flavor/goblin_ultra.hpp" +#include "barretenberg/honk/flavor/ultra.hpp" +#include "barretenberg/honk/pcs/claim.hpp" +#include "barretenberg/honk/transcript/transcript.hpp" +#include "barretenberg/plonk/proof_system/types/proof.hpp" +#include "barretenberg/proof_system/op_queue/ecc_op_queue.hpp" + +namespace proof_system::honk { + +/** + * @brief Verifier class for the Goblin ECC op queue transcript merge protocol + * + * @tparam Flavor + */ +template class MergeVerifier_ { + using FF = typename Flavor::FF; + using Polynomial = typename Flavor::Polynomial; + using CommitmentKey = typename Flavor::CommitmentKey; + using Commitment = typename Flavor::Commitment; + using PCS = typename Flavor::PCS; + using Curve = typename Flavor::Curve; + using OpeningClaim = typename pcs::OpeningClaim; + using VerificationKey = typename Flavor::VerificationKey; + using VerifierCommitmentKey = typename Flavor::VerifierCommitmentKey; + + public: + VerifierTranscript transcript; + std::shared_ptr op_queue; + std::shared_ptr pcs_verification_key; + + explicit MergeVerifier_(std::unique_ptr verification_key); + bool verify_proof(const plonk::proof& proof); +}; + +extern template class MergeVerifier_; +extern template class MergeVerifier_; + +} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/grand_product_library.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/grand_product_library.hpp index cd15a36c1f48..fa6334e23032 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/grand_product_library.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/grand_product_library.hpp @@ -47,12 +47,12 @@ namespace proof_system::honk::grand_product_library { */ template void compute_grand_product(const size_t circuit_size, - auto& full_polynomials, + typename Flavor::ProverPolynomials& full_polynomials, proof_system::RelationParameters& relation_parameters) { using FF = typename Flavor::FF; using Polynomial = typename Flavor::Polynomial; - using ValueAccumulatorsAndViews = typename GrandProdRelation::ValueAccumulatorsAndViews; + using Accumulator = std::tuple_element_t<0, typename GrandProdRelation::ArrayOfValuesOverSubrelations>; // Allocate numerator/denominator polynomials that will serve as scratch space // TODO(zac) we can re-use the permutation polynomial as the numerator polynomial. Reduces readability @@ -67,15 +67,14 @@ void compute_grand_product(const size_t circuit_size, const size_t start = thread_idx * block_size; const size_t end = (thread_idx + 1) * block_size; for (size_t i = start; i < end; ++i) { - - typename Flavor::ProverPolynomialsEvaluations evaluations; + typename Flavor::AllValues evaluations; for (size_t k = 0; k < Flavor::NUM_ALL_ENTITIES; ++k) { evaluations[k] = full_polynomials[k].size() > i ? full_polynomials[k][i] : 0; } - numerator[i] = GrandProdRelation::template compute_grand_product_numerator( - evaluations, relation_parameters, i); - denominator[i] = GrandProdRelation::template compute_grand_product_denominator( - evaluations, relation_parameters, i); + numerator[i] = GrandProdRelation::template compute_grand_product_numerator( + evaluations, relation_parameters); + denominator[i] = GrandProdRelation::template compute_grand_product_denominator( + evaluations, relation_parameters); } }); diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/lookup_library.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/lookup_library.hpp index bb590b49a52c..a2c83cfc8d8a 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/lookup_library.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/lookup_library.hpp @@ -23,33 +23,33 @@ namespace proof_system::honk::lookup_library { * The specific algebraic relations that define read terms and write terms are defined in Flavor::LookupRelation * */ -template -void compute_logderivative_inverse(auto& polynomials, +template +void compute_logderivative_inverse(Polynomials& polynomials, proof_system::RelationParameters& relation_parameters, const size_t circuit_size) { using FF = typename Flavor::FF; - using Accumulator = typename Relation::ValueAccumulatorsAndViews; + using Accumulator = typename Relation::ValueAccumulator0; constexpr size_t READ_TERMS = Relation::READ_TERMS; constexpr size_t WRITE_TERMS = Relation::WRITE_TERMS; auto& inverse_polynomial = polynomials.lookup_inverses; auto lookup_relation = Relation(); for (size_t i = 0; i < circuit_size; ++i) { - bool has_inverse = - lookup_relation.template lookup_exists_at_row_index(polynomials, relation_parameters, i); + auto row = polynomials.get_row(i); + bool has_inverse = lookup_relation.lookup_exists_at_row(row); if (!has_inverse) { continue; } FF denominator = 1; barretenberg::constexpr_for<0, READ_TERMS, 1>([&] { - auto denominator_term = lookup_relation.template compute_read_term( - polynomials, relation_parameters, i); + auto denominator_term = + lookup_relation.template compute_read_term(row, relation_parameters); denominator *= denominator_term; }); barretenberg::constexpr_for<0, WRITE_TERMS, 1>([&] { - auto denominator_term = lookup_relation.template compute_write_term( - polynomials, relation_parameters, i); + auto denominator_term = + lookup_relation.template compute_write_term(row, relation_parameters); denominator *= denominator_term; }); inverse_polynomial[i] = denominator; diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp index 91c72d224b50..e61c4a311d23 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp @@ -42,14 +42,14 @@ namespace proof_system::honk::permutation_library { * * Note: Step (3) utilizes Montgomery batch inversion to replace n-many inversions with */ -template +template void compute_permutation_grand_product(const size_t circuit_size, auto& full_polynomials, proof_system::RelationParameters& relation_parameters) { using FF = typename Flavor::FF; using Polynomial = typename Flavor::Polynomial; - using ValueAccumulatorsAndViews = typename PermutationRelation::ValueAccumulatorsAndViews; + using Accumulator = std::tuple_element_t<0, typename GrandProdRelation::ArrayOfValuesOverSubrelations>; // Allocate numerator/denominator polynomials that will serve as scratch space // TODO(zac) we can re-use the permutation polynomial as the numerator polynomial. @@ -58,7 +58,7 @@ void compute_permutation_grand_product(const size_t circuit_size, Polynomial denominator = Polynomial{ circuit_size }; // Step (1) - // Populate `numerator` and `denominator` with the algebra described by PermutationRelation + // Populate `numerator` and `denominator` with the algebra described by GrandProdRelation static constexpr size_t MIN_CIRCUIT_SIZE_TO_MULTITHREAD = 64; const size_t num_threads = circuit_size >= MIN_CIRCUIT_SIZE_TO_MULTITHREAD ? (circuit_size >= get_num_cpus_pow2() ? get_num_cpus_pow2() : 1) @@ -69,14 +69,14 @@ void compute_permutation_grand_product(const size_t circuit_size, const size_t end = (thread_idx + 1) * block_size; for (size_t i = start; i < end; ++i) { - typename Flavor::ProverPolynomialsEvaluations evaluations; + typename Flavor::AllValues evaluations; for (size_t k = 0; k < Flavor::NUM_ALL_ENTITIES; ++k) { evaluations[k] = full_polynomials[k].size() > i ? full_polynomials[k][i] : 0; } - numerator[i] = PermutationRelation::template compute_permutation_numerator( - evaluations, relation_parameters, i); - denominator[i] = PermutationRelation::template compute_permutation_denominator( - evaluations, relation_parameters, i); + numerator[i] = GrandProdRelation::template compute_permutation_numerator(evaluations, + relation_parameters); + denominator[i] = GrandProdRelation::template compute_permutation_denominator( + evaluations, relation_parameters); } }); @@ -128,7 +128,7 @@ void compute_permutation_grand_product(const size_t circuit_size, }); // Step (3) Compute z_perm[i] = numerator[i] / denominator[i] - auto& grand_product_polynomial = PermutationRelation::get_grand_product_polynomial(full_polynomials); + auto& grand_product_polynomial = GrandProdRelation::get_grand_product_polynomial(full_polynomials); grand_product_polynomial[0] = 0; parallel_for(num_threads, [&](size_t thread_idx) { const size_t start = thread_idx * block_size; diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.cpp index d14858bd80a3..ceea8fc7863c 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.cpp @@ -2,12 +2,6 @@ #include "barretenberg/proof_system/flavor/flavor.hpp" namespace proof_system::honk { -/** - * @brief Prior to folding we need to add all the public inputs to the transcript, labelled by their corresponding - * instance index, compute all the instance's polynomials and record the relation parameters involved in computing these - * polynomials in the transcript. - * - */ template void ProtoGalaxyProver_::prepare_for_folding() { // this doesnt work in the current format @@ -54,15 +48,11 @@ ProverFoldingResult ProtoGalaxyProver_ res; res.folding_data = transcript.proof_data; return res; } template class ProtoGalaxyProver_>; -template class ProtoGalaxyProver_>; template class ProtoGalaxyProver_>; } // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.hpp index 95e36f659025..7dc2f192a33a 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_prover.hpp @@ -1,7 +1,6 @@ #pragma once #include "barretenberg/honk/flavor/goblin_ultra.hpp" #include "barretenberg/honk/flavor/ultra.hpp" -#include "barretenberg/honk/flavor/ultra_grumpkin.hpp" #include "barretenberg/honk/instance/instances.hpp" #include "barretenberg/honk/proof_system/folding_result.hpp" #include "barretenberg/proof_system/flavor/flavor.hpp" @@ -12,10 +11,10 @@ template class ProtoGalaxyProver_ { using Flavor = typename ProverInstances::Flavor; using Instance = typename ProverInstances::Instance; using Utils = barretenberg::RelationUtils; - using RowEvaluations = typename Flavor::ProverPolynomialsEvaluations; - using RelationEvaluations = typename Flavor::RelationValues; - using FF = typename Flavor::FF; + using RowEvaluations = typename Flavor::AllValues; using ProverPolynomials = typename Flavor::ProverPolynomials; + using FF = typename Flavor::FF; + using TupleOfArraysOfValues = typename Flavor::TupleOfArraysOfValues; ProverInstances instances; ProverTranscript transcript; @@ -24,6 +23,12 @@ template class ProtoGalaxyProver_ { : instances(insts){}; ~ProtoGalaxyProver_() = default; + /** + * @brief Prior to folding we need to add all the public inputs to the transcript, labelled by their corresponding + * instance index, compute all the instance's polynomials and record the relation parameters involved in computing + * these polynomials in the transcript. + * + */ void prepare_for_folding(); /** @@ -40,22 +45,12 @@ template class ProtoGalaxyProver_ { return pows; } + // Returns the accumulator, which is the first element in ProverInstances. The accumulator is assumed to have the + // FoldingParameters set and be the result of a previous round of folding. + // TODO(https://github.com/AztecProtocol/barretenberg/issues/740): handle the case when the accumulator is empty + // (i.e. we are in the first round of folding)/ std::shared_ptr get_accumulator() { return instances[0]; } - /** - * @brief Compute the value of the full Honk relation at a row in the execution trace. - */ - static RowEvaluations get_execution_row(ProverPolynomials instance_polynomials, size_t row) - { - RowEvaluations row_evals; - size_t idx = 0; - for (auto& poly : instance_polynomials) { - row_evals[idx] = poly[row]; - idx++; - } - return row_evals; - } - /** * @brief Compute the values of the full Honk relation at each row in the execution trace, f_i(ω) in the * ProtoGalaxy paper, given the evaluations of all the prover polynomials and α (the parameter that helps establish @@ -69,8 +64,8 @@ template class ProtoGalaxyProver_ { std::vector full_honk_evaluations(instance_size); for (size_t row = 0; row < instance_size; row++) { - auto row_evaluations = get_execution_row(instance_polynomials, row); - RelationEvaluations relation_evaluations; + auto row_evaluations = instance_polynomials.get_row(row); + TupleOfArraysOfValues relation_evaluations; Utils::zero_elements(relation_evaluations); // Note that the evaluations are accumulated with the gate separation challenge being 1 at this stage, as @@ -87,13 +82,14 @@ template class ProtoGalaxyProver_ { } /** - * @brief Compute the parent nodes at the current level. Note that the resulting parent nodes will be polynomials - * degree (level + 1) as at each level we multiply by an additional factor of X. + * @brief Recursively compute the parent nodes of each level in there, starting from the leaves. Note that at each + * level, the resulting parent nodes will be polynomials of degree (level + 1) because we multiply by an additional + * factor of X. */ - static std::vector compute_level(size_t level, - std::vector betas, - std::vector deltas, - std::vector> prev_level_coeffs) + static std::vector compute_coefficients_tree(size_t level, + std::vector betas, + std::vector deltas, + std::vector> prev_level_coeffs) { // if we are at level t in the tree, where t = logn and n is the instance size, we have reached the root which // contains the coefficients of the perturbator polynomial @@ -104,16 +100,16 @@ template class ProtoGalaxyProver_ { auto degree = level + 1; auto prev_level_width = prev_level_coeffs.size(); // we need degree + 1 terms to represent the intermediate polynomials - std::vector> level_coeffs(prev_level_width / 2, std::vector(degree + 1, 0)); + std::vector> level_coeffs(prev_level_width >> 1, std::vector(degree + 1, 0)); for (size_t node = 0; node < prev_level_width; node += 2) { - auto parent = node / 2; + auto parent = node >> 1; std::copy(prev_level_coeffs[node].begin(), prev_level_coeffs[node].end(), level_coeffs[parent].begin()); for (size_t d = 0; d < degree; d++) { level_coeffs[parent][d] += prev_level_coeffs[node + 1][d] * betas[level]; level_coeffs[parent][d + 1] += prev_level_coeffs[node + 1][d] * deltas[level]; } } - return compute_level(level + 1, betas, deltas, level_coeffs); + return compute_coefficients_tree(level + 1, betas, deltas, level_coeffs); } /** @@ -132,13 +128,13 @@ template class ProtoGalaxyProver_ { { auto width = full_honk_evaluations.size(); - std::vector> first_level_coeffs(width / 2, std::vector(2, 0)); + std::vector> first_level_coeffs(width >> 1, std::vector(2, 0)); for (size_t node = 0; node < width; node += 2) { - auto parent = node / 2; + auto parent = node >> 1; first_level_coeffs[parent][0] = full_honk_evaluations[node] + full_honk_evaluations[node + 1] * betas[0]; first_level_coeffs[parent][1] = full_honk_evaluations[node + 1] * deltas[0]; } - return compute_level(1, betas, deltas, first_level_coeffs); + return compute_coefficients_tree(1, betas, deltas, first_level_coeffs); } /** @@ -149,7 +145,7 @@ template class ProtoGalaxyProver_ { { auto full_honk_evaluations = compute_full_honk_evaluations(accumulator->prover_polynomials, alpha, accumulator->relation_parameters); - auto betas = accumulator->folding_params.gate_separation_challenges; + auto betas = accumulator->folding_parameters.gate_separation_challenges; assert(betas.size() == deltas.size()); auto coeffs = construct_perturbator_coeffs(betas, deltas, full_honk_evaluations); return Polynomial(coeffs); @@ -159,6 +155,5 @@ template class ProtoGalaxyProver_ { }; extern template class ProtoGalaxyProver_>; -extern template class ProtoGalaxyProver_>; extern template class ProtoGalaxyProver_>; } // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.cpp index b04cd7ad7d4a..57397318541e 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.cpp @@ -46,6 +46,5 @@ VerifierFoldingResult ProtoGalaxyVerifier_< } template class ProtoGalaxyVerifier_>; -template class ProtoGalaxyVerifier_>; template class ProtoGalaxyVerifier_>; } // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.hpp index 81b026acf032..309fcadcdd58 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/protogalaxy_verifier.hpp @@ -1,7 +1,6 @@ #pragma once #include "barretenberg/honk/flavor/goblin_ultra.hpp" #include "barretenberg/honk/flavor/ultra.hpp" -#include "barretenberg/honk/flavor/ultra_grumpkin.hpp" #include "barretenberg/honk/instance/instances.hpp" #include "barretenberg/honk/proof_system/folding_result.hpp" #include "barretenberg/honk/transcript/transcript.hpp" @@ -40,6 +39,5 @@ template class ProtoGalaxyVerifier_ { }; extern template class ProtoGalaxyVerifier_>; -extern template class ProtoGalaxyVerifier_>; extern template class ProtoGalaxyVerifier_>; } // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp index ad3151b45bda..fbfc5d5fc111 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.cpp @@ -1,9 +1,6 @@ #include "ultra_prover.hpp" -#include "barretenberg/honk/pcs/claim.hpp" #include "barretenberg/honk/sumcheck/sumcheck.hpp" #include "barretenberg/honk/utils/power_polynomial.hpp" -#include "barretenberg/polynomials/polynomial.hpp" -#include "barretenberg/transcript/transcript_wrappers.hpp" namespace proof_system::honk { @@ -16,9 +13,8 @@ namespace proof_system::honk { * */ template UltraProver_::UltraProver_(std::shared_ptr inst) - : queue(inst->commitment_key, transcript) - , instance(std::move(inst)) - , pcs_commitment_key(instance->commitment_key) + : instance(std::move(inst)) + , commitment_key(instance->commitment_key) { instance->initialise_prover_polynomials(); } @@ -54,14 +50,14 @@ template void UltraProver_::execute_wire_commitment auto wire_polys = instance->proving_key->get_wires(); auto labels = commitment_labels.get_wires(); for (size_t idx = 0; idx < 3; ++idx) { - queue.add_commitment(wire_polys[idx], labels[idx]); + transcript.send_to_verifier(labels[idx], commitment_key->commit(wire_polys[idx])); } if constexpr (IsGoblinFlavor) { auto op_wire_polys = instance->proving_key->get_ecc_op_wires(); auto labels = commitment_labels.get_ecc_op_wires(); for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - queue.add_commitment(op_wire_polys[idx], labels[idx]); + transcript.send_to_verifier(labels[idx], commitment_key->commit(op_wire_polys[idx])); } } } @@ -78,8 +74,10 @@ template void UltraProver_::execute_sorted_list_acc // Commit to the sorted withness-table accumulator and the finalised (i.e. with memory records) fourth wire // polynomial - queue.add_commitment(instance->proving_key->sorted_accum, commitment_labels.sorted_accum); - queue.add_commitment(instance->proving_key->w_4, commitment_labels.w_4); + auto sorted_accum_commitment = commitment_key->commit(instance->proving_key->sorted_accum); + auto w_4_commitment = commitment_key->commit(instance->proving_key->w_4); + transcript.send_to_verifier(commitment_labels.sorted_accum, sorted_accum_commitment); + transcript.send_to_verifier(commitment_labels.w_4, w_4_commitment); } /** @@ -93,8 +91,10 @@ template void UltraProver_::execute_grand_product_c instance->compute_grand_product_polynomials(beta, gamma); - queue.add_commitment(instance->proving_key->z_perm, commitment_labels.z_perm); - queue.add_commitment(instance->proving_key->z_lookup, commitment_labels.z_lookup); + auto z_perm_commitment = commitment_key->commit(instance->proving_key->z_perm); + auto z_lookup_commitment = commitment_key->commit(instance->proving_key->z_lookup); + transcript.send_to_verifier(commitment_labels.z_perm, z_perm_commitment); + transcript.send_to_verifier(commitment_labels.z_lookup, z_lookup_commitment); } /** @@ -111,202 +111,18 @@ template void UltraProver_::execute_relation_check_ } /** - * - Get rho challenge - * - Compute d+1 Fold polynomials and their evaluations. + * @brief Execute the ZeroMorph protocol to prove the multilinear evaluations produced by Sumcheck + * @details See https://hackmd.io/dlf9xEwhTQyE3hiGbq4FsA?view for a complete description of the unrolled protocol. * * */ -template void UltraProver_::execute_univariatization_round() +template void UltraProver_::execute_zeromorph_rounds() { - const size_t NUM_POLYNOMIALS = Flavor::NUM_ALL_ENTITIES; - - // Generate batching challenge ρ and powers 1,ρ,…,ρᵐ⁻¹ - FF rho = transcript.get_challenge("rho"); - std::vector rhos = pcs::gemini::powers_of_rho(rho, NUM_POLYNOMIALS); - - // Batch the unshifted polynomials and the to-be-shifted polynomials using ρ - Polynomial batched_poly_unshifted(instance->proving_key->circuit_size); // batched unshifted polynomials - size_t poly_idx = 0; // TODO(#391) zip - for (auto& unshifted_poly : instance->prover_polynomials.get_unshifted()) { - batched_poly_unshifted.add_scaled(unshifted_poly, rhos[poly_idx]); - ++poly_idx; - } - - Polynomial batched_poly_to_be_shifted(instance->proving_key->circuit_size); // batched to-be-shifted polynomials - for (auto& to_be_shifted_poly : instance->prover_polynomials.get_to_be_shifted()) { - batched_poly_to_be_shifted.add_scaled(to_be_shifted_poly, rhos[poly_idx]); - ++poly_idx; - }; - - // Compute d-1 polynomials Fold^(i), i = 1, ..., d-1. - gemini_polynomials = Gemini::compute_gemini_polynomials( - sumcheck_output.challenge, std::move(batched_poly_unshifted), std::move(batched_poly_to_be_shifted)); - - // Compute and add to trasnscript the commitments [Fold^(i)], i = 1, ..., d-1 - for (size_t l = 0; l < instance->proving_key->log_circuit_size - 1; ++l) { - queue.add_commitment(gemini_polynomials[l + 2], "Gemini:FOLD_" + std::to_string(l + 1)); - } -} - -/** - * - Do Fiat-Shamir to get "r" challenge - * - Compute remaining two partially evaluated Fold polynomials Fold_{r}^(0) and Fold_{-r}^(0). - * - Compute and aggregate opening pairs (challenge, evaluation) for each of d Fold polynomials. - * - Add d-many Fold evaluations a_i, i = 0, ..., d-1 to the transcript, excluding eval of Fold_{r}^(0) - * */ -template void UltraProver_::execute_pcs_evaluation_round() -{ - const FF r_challenge = transcript.get_challenge("Gemini:r"); - univariate_openings = Gemini::compute_fold_polynomial_evaluations( - sumcheck_output.challenge, std::move(gemini_polynomials), r_challenge); - - for (size_t l = 0; l < instance->proving_key->log_circuit_size; ++l) { - std::string label = "Gemini:a_" + std::to_string(l); - const auto& evaluation = univariate_openings.opening_pairs[l + 1].evaluation; - transcript.send_to_verifier(label, evaluation); - } -} - -/** - * @brief Prove proper construction of the aggregate Goblin ECC op queue polynomials T_i^(j), j = 1,2,3,4. - * @details Let T_i^(j) be the jth column of the aggregate op queue after incorporating the contribution from the - * present circuit. T_{i-1}^(j) corresponds to the aggregate op queue at the previous stage and $t_i^(j)$ represents - * the contribution from the present circuit only. For each j, we have the relationship T_i = T_{i-1} + right_shift(t_i, - * M_{i-1}), where the shift magnitude M_{i-1} is the length of T_{i-1}. This stage of the protocol demonstrates that - * the aggregate op queue has been constructed correctly. - * - */ -template void UltraProver_::execute_op_queue_transcript_aggregation_round() -{ - if constexpr (IsGoblinFlavor) { - // Extract size M_{i-1} of T_{i-1} from op_queue - size_t prev_op_queue_size = instance->proving_key->op_queue->get_previous_size(); // M_{i-1} - // TODO(#723): Cannot currently support an empty T_{i-1}. Need to be able to properly handle zero commitment. - ASSERT(prev_op_queue_size > 0); - - auto circuit_size = instance->proving_key->circuit_size; - - // TODO(#723): The below assert ensures that M_{i-1} + m_i < n, i.e. the right shifted result can be expressed - // as a size n polynomial. If this is not the case then we should still be able to proceed without increasing - // the circuit size but need to handle with care. - ASSERT(prev_op_queue_size + instance->proving_key->num_ecc_op_gates < circuit_size); // M_{i-1} + m_i < n - - // Construct right-shift of op wires t_i^{shift} so that T_i(X) = T_{i-1}(X) + t_i^{shift}(X). - // Note: The op_wire polynomials (like all others) have constant coefficient equal to zero. Thus to obtain - // t_i^{shift} we must left-shift by 1 then right-shift by M_{i-1}, or equivalently, right-shift by - // M_{i-1} - 1. - std::array right_shifted_op_wires; - auto op_wires = instance->proving_key->get_ecc_op_wires(); - for (size_t i = 0; i < op_wires.size(); ++i) { - // Right shift by M_{i-1} - 1. - right_shifted_op_wires[i].set_to_right_shifted(op_wires[i], prev_op_queue_size - 1); - } - - // Compute/get commitments [t_i^{shift}], [T_{i-1}], and [T_i] and add to transcript - std::array prev_aggregate_op_queue_commitments; - std::array shifted_op_wire_commitments; - std::array aggregate_op_queue_commitments; - for (size_t idx = 0; idx < right_shifted_op_wires.size(); ++idx) { - // Get previous transcript commitment [T_{i-1}] from op queue - prev_aggregate_op_queue_commitments[idx] = instance->proving_key->op_queue->ultra_ops_commitments[idx]; - // Compute commitment [t_i^{shift}] directly - shifted_op_wire_commitments[idx] = pcs_commitment_key->commit(right_shifted_op_wires[idx]); - // Compute updated aggregate transcript commitmen as [T_i] = [T_{i-1}] + [t_i^{shift}] - aggregate_op_queue_commitments[idx] = - prev_aggregate_op_queue_commitments[idx] + shifted_op_wire_commitments[idx]; - - std::string suffix = std::to_string(idx + 1); - transcript.send_to_verifier("PREV_AGG_OP_QUEUE_" + suffix, prev_aggregate_op_queue_commitments[idx]); - transcript.send_to_verifier("SHIFTED_OP_WIRE_" + suffix, shifted_op_wire_commitments[idx]); - transcript.send_to_verifier("AGG_OP_QUEUE_" + suffix, aggregate_op_queue_commitments[idx]); - } - - // Store the commitments [T_{i}] (to be used later in subsequent iterations as [T_{i-1}]). - instance->proving_key->op_queue->set_commitment_data(aggregate_op_queue_commitments); - - // Compute evaluations T_i(\kappa), T_{i-1}(\kappa), t_i^{shift}(\kappa), add to transcript. For each polynomial - // we add a univariate opening claim {(\kappa, p(\kappa)), p(X)} to the set of claims to be combined in the - // batch univariate polynomial Q in Shplonk. (The other univariate claims come from the output of Gemini). - // TODO(#729): It should be possible to reuse the opening challenge from Gemini rather than generate a new one. - auto kappa = transcript.get_challenge("kappa"); - auto prev_aggregate_ecc_op_transcript = instance->proving_key->op_queue->get_previous_aggregate_transcript(); - auto aggregate_ecc_op_transcript = instance->proving_key->op_queue->get_aggregate_transcript(); - std::array prev_agg_op_queue_evals; - std::array right_shifted_op_wire_evals; - std::array agg_op_queue_evals; - std::array prev_agg_op_queue_polynomials; - std::array agg_op_queue_polynomials; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - std::string suffix = std::to_string(idx + 1); - - // Compute evaluation T_{i-1}(\kappa) - prev_agg_op_queue_polynomials[idx] = Polynomial(prev_aggregate_ecc_op_transcript[idx]); - prev_agg_op_queue_evals[idx] = prev_agg_op_queue_polynomials[idx].evaluate(kappa); - transcript.send_to_verifier("prev_agg_op_queue_eval_" + suffix, prev_agg_op_queue_evals[idx]); - - // Compute evaluation t_i^{shift}(\kappa) - right_shifted_op_wire_evals[idx] = right_shifted_op_wires[idx].evaluate(kappa); - transcript.send_to_verifier("op_wire_eval_" + suffix, right_shifted_op_wire_evals[idx]); - - // Compute evaluation T_i(\kappa) - agg_op_queue_polynomials[idx] = Polynomial(aggregate_ecc_op_transcript[idx]); - agg_op_queue_evals[idx] = agg_op_queue_polynomials[idx].evaluate(kappa); - transcript.send_to_verifier("agg_op_queue_eval_" + suffix, agg_op_queue_evals[idx]); - } - - // Add univariate opening claims for each polynomial. - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_openings.opening_pairs.emplace_back(OpenPair{ kappa, prev_agg_op_queue_evals[idx] }); - univariate_openings.witnesses.emplace_back(std::move(prev_agg_op_queue_polynomials[idx])); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_openings.opening_pairs.emplace_back(OpenPair{ kappa, right_shifted_op_wire_evals[idx] }); - univariate_openings.witnesses.emplace_back(std::move(right_shifted_op_wires[idx])); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_openings.opening_pairs.emplace_back(OpenPair{ kappa, agg_op_queue_evals[idx] }); - univariate_openings.witnesses.emplace_back(std::move(agg_op_queue_polynomials[idx])); - } - } -} - -/** - * - Do Fiat-Shamir to get "nu" challenge. - * - Compute commitment [Q]_1 - * */ -template void UltraProver_::execute_shplonk_batched_quotient_round() -{ - nu_challenge = transcript.get_challenge("Shplonk:nu"); - - batched_quotient_Q = Shplonk::compute_batched_quotient( - univariate_openings.opening_pairs, univariate_openings.witnesses, nu_challenge); - - // commit to Q(X) and add [Q] to the transcript - queue.add_commitment(batched_quotient_Q, "Shplonk:Q"); -} - -/** - * - Do Fiat-Shamir to get "z" challenge. - * - Compute polynomial Q(X) - Q_z(X) - * */ -template void UltraProver_::execute_shplonk_partial_evaluation_round() -{ - const FF z_challenge = transcript.get_challenge("Shplonk:z"); - - shplonk_output = Shplonk::compute_partially_evaluated_batched_quotient(univariate_openings.opening_pairs, - univariate_openings.witnesses, - std::move(batched_quotient_Q), - nu_challenge, - z_challenge); -} -/** - * - Compute final PCS opening proof: - * - For KZG, this is the quotient commitmecnt [W]_1 - * - For IPA, the vectors L and R - * */ -template void UltraProver_::execute_final_pcs_round() -{ - PCS::compute_opening_proof(pcs_commitment_key, shplonk_output.opening_pair, shplonk_output.witness, transcript); - // queue.add_commitment(quotient_W, "KZG:W"); + ZeroMorph::prove(instance->prover_polynomials.get_unshifted(), + instance->prover_polynomials.get_to_be_shifted(), + sumcheck_output.claimed_evaluations, + sumcheck_output.challenge, + commitment_key, + transcript); } template plonk::proof& UltraProver_::export_proof() @@ -322,51 +138,26 @@ template plonk::proof& UltraProver_::construct_proo // Compute first three wire commitments execute_wire_commitments_round(); - queue.process_queue(); // Compute sorted list accumulator and commitment execute_sorted_list_accumulator_round(); - queue.process_queue(); // Fiat-Shamir: beta & gamma // Compute grand product(s) and commitments. execute_grand_product_computation_round(); - queue.process_queue(); // Fiat-Shamir: alpha // Run sumcheck subprotocol. execute_relation_check_rounds(); - // Fiat-Shamir: rho - // Compute Fold polynomials and their commitments. - execute_univariatization_round(); - queue.process_queue(); - - // Fiat-Shamir: r - // Compute Fold evaluations - execute_pcs_evaluation_round(); - - // ECC op queue transcript aggregation - execute_op_queue_transcript_aggregation_round(); - - // Fiat-Shamir: nu - // Compute Shplonk batched quotient commitment Q - execute_shplonk_batched_quotient_round(); - queue.process_queue(); - - // Fiat-Shamir: z - // Compute partial evaluation Q_z - execute_shplonk_partial_evaluation_round(); - - // Fiat-Shamir: z - // Compute PCS opening proof (either KZG quotient commitment or IPA opening proof) - execute_final_pcs_round(); + // Fiat-Shamir: rho, y, x, z + // Execute Zeromorph multilinear PCS + execute_zeromorph_rounds(); return export_proof(); } template class UltraProver_; -template class UltraProver_; template class UltraProver_; } // namespace proof_system::honk diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.hpp index 43388fa380a0..4be07e693a87 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_prover.hpp @@ -1,11 +1,8 @@ #pragma once #include "barretenberg/honk/flavor/goblin_ultra.hpp" #include "barretenberg/honk/flavor/ultra.hpp" -#include "barretenberg/honk/flavor/ultra_grumpkin.hpp" #include "barretenberg/honk/instance/prover_instance.hpp" -#include "barretenberg/honk/pcs/gemini/gemini.hpp" -#include "barretenberg/honk/pcs/shplonk/shplonk.hpp" -#include "barretenberg/honk/proof_system/work_queue.hpp" +#include "barretenberg/honk/pcs/zeromorph/zeromorph.hpp" #include "barretenberg/honk/sumcheck/sumcheck_output.hpp" #include "barretenberg/honk/transcript/transcript.hpp" #include "barretenberg/plonk/proof_system/types/proof.hpp" @@ -16,7 +13,6 @@ namespace proof_system::honk { template class UltraProver_ { using FF = typename Flavor::FF; using Commitment = typename Flavor::Commitment; - using PCS = typename Flavor::PCS; using CommitmentKey = typename Flavor::CommitmentKey; using ProvingKey = typename Flavor::ProvingKey; using Polynomial = typename Flavor::Polynomial; @@ -24,7 +20,6 @@ template class UltraProver_ { using CommitmentLabels = typename Flavor::CommitmentLabels; using Curve = typename Flavor::Curve; using Instance = ProverInstance_; - using OpenPair = pcs::OpeningPair; public: explicit UltraProver_(std::shared_ptr); @@ -33,12 +28,7 @@ template class UltraProver_ { void execute_sorted_list_accumulator_round(); void execute_grand_product_computation_round(); void execute_relation_check_rounds(); - void execute_univariatization_round(); - void execute_pcs_evaluation_round(); - void execute_op_queue_transcript_aggregation_round(); - void execute_shplonk_batched_quotient_round(); - void execute_shplonk_partial_evaluation_round(); - void execute_final_pcs_round(); + void execute_zeromorph_rounds(); plonk::proof& export_proof(); plonk::proof& construct_proof(); @@ -50,32 +40,21 @@ template class UltraProver_ { CommitmentLabels commitment_labels; - // Container for d + 1 Fold polynomials produced by Gemini - std::vector gemini_polynomials; - - Polynomial batched_quotient_Q; // batched quotient poly computed by Shplonk - FF nu_challenge; // needed in both Shplonk rounds - Polynomial quotient_W; - work_queue queue; - std::shared_ptr instance; sumcheck::SumcheckOutput sumcheck_output; - pcs::gemini::ProverOutput univariate_openings; - pcs::shplonk::ProverOutput shplonk_output; - std::shared_ptr pcs_commitment_key; - using Gemini = pcs::gemini::GeminiProver_; - using Shplonk = pcs::shplonk::ShplonkProver_; + std::shared_ptr commitment_key; + + using ZeroMorph = pcs::zeromorph::ZeroMorphProver_; private: plonk::proof proof; }; extern template class UltraProver_; -extern template class UltraProver_; extern template class UltraProver_; using UltraProver = UltraProver_; diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp index 4d304709c0ba..b876b31947bb 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.cpp @@ -1,7 +1,5 @@ #include "./ultra_verifier.hpp" -#include "barretenberg/honk/pcs/claim.hpp" -#include "barretenberg/honk/pcs/gemini/gemini.hpp" -#include "barretenberg/honk/pcs/shplonk/shplonk.hpp" +#include "barretenberg/honk/pcs/zeromorph/zeromorph.hpp" #include "barretenberg/honk/transcript/transcript.hpp" #include "barretenberg/honk/utils/power_polynomial.hpp" #include "barretenberg/numeric/bitop/get_msb.hpp" @@ -36,12 +34,9 @@ template UltraVerifier_& UltraVerifier_::opera template bool UltraVerifier_::verify_proof(const plonk::proof& proof) { using FF = typename Flavor::FF; - using GroupElement = typename Flavor::GroupElement; using Commitment = typename Flavor::Commitment; - using PCS = typename Flavor::PCS; using Curve = typename Flavor::Curve; - using Gemini = pcs::gemini::GeminiVerifier_; - using Shplonk = pcs::shplonk::ShplonkVerifier_; + using ZeroMorph = pcs::zeromorph::ZeroMorphVerifier_; using VerifierCommitments = typename Flavor::VerifierCommitments; using CommitmentLabels = typename Flavor::CommitmentLabels; @@ -114,7 +109,7 @@ template bool UltraVerifier_::verify_proof(const plonk // Execute Sumcheck Verifier auto sumcheck = SumcheckVerifier(circuit_size); - auto [multivariate_challenge, purported_evaluations, sumcheck_verified] = + auto [multivariate_challenge, claimed_evaluations, sumcheck_verified] = sumcheck.verify(relation_parameters, transcript); // If Sumcheck did not verify, return false @@ -122,108 +117,16 @@ template bool UltraVerifier_::verify_proof(const plonk return false; } - // Execute Gemini/Shplonk verification: + // Execute ZeroMorph rounds. See https://hackmd.io/dlf9xEwhTQyE3hiGbq4FsA?view for a complete description of the + // unrolled protocol. + auto pairing_points = ZeroMorph::verify(commitments, claimed_evaluations, multivariate_challenge, transcript); - // Construct inputs for Gemini verifier: - // - Multivariate opening point u = (u_0, ..., u_{d-1}) - // - batched unshifted and to-be-shifted polynomial commitments - auto batched_commitment_unshifted = GroupElement::zero(); - auto batched_commitment_to_be_shifted = GroupElement::zero(); - - // Compute powers of batching challenge rho - FF rho = transcript.get_challenge("rho"); - std::vector rhos = pcs::gemini::powers_of_rho(rho, Flavor::NUM_ALL_ENTITIES); - - // Compute batched multivariate evaluation - FF batched_evaluation = FF::zero(); - size_t evaluation_idx = 0; - for (auto& value : purported_evaluations.get_unshifted_then_shifted()) { - batched_evaluation += value * rhos[evaluation_idx]; - ++evaluation_idx; - } - - // Construct batched commitment for NON-shifted polynomials - size_t commitment_idx = 0; - for (auto& commitment : commitments.get_unshifted()) { - batched_commitment_unshifted += commitment * rhos[commitment_idx]; - ++commitment_idx; - } - - // Construct batched commitment for to-be-shifted polynomials - for (auto& commitment : commitments.get_to_be_shifted()) { - batched_commitment_to_be_shifted += commitment * rhos[commitment_idx]; - ++commitment_idx; - } - - // Produce a Gemini claim consisting of: - // - d+1 commitments [Fold_{r}^(0)], [Fold_{-r}^(0)], and [Fold^(l)], l = 1:d-1 - // - d+1 evaluations a_0_pos, and a_l, l = 0:d-1 - auto univariate_opening_claims = Gemini::reduce_verification(multivariate_challenge, - batched_evaluation, - batched_commitment_unshifted, - batched_commitment_to_be_shifted, - transcript); - - // Perform ECC op queue transcript aggregation protocol - if constexpr (IsGoblinFlavor) { - // Receive commitments [t_i^{shift}], [T_{i-1}], and [T_i] - std::array prev_agg_op_queue_commitments; - std::array shifted_op_wire_commitments; - std::array agg_op_queue_commitments; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - prev_agg_op_queue_commitments[idx] = - transcript.template receive_from_prover("PREV_AGG_OP_QUEUE_" + std::to_string(idx + 1)); - shifted_op_wire_commitments[idx] = - transcript.template receive_from_prover("SHIFTED_OP_WIRE_" + std::to_string(idx + 1)); - agg_op_queue_commitments[idx] = - transcript.template receive_from_prover("AGG_OP_QUEUE_" + std::to_string(idx + 1)); - } - - // Receive transcript poly evaluations - FF kappa = transcript.get_challenge("kappa"); - std::array prev_agg_op_queue_evals; - std::array shifted_op_wire_evals; - std::array agg_op_queue_evals; - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - prev_agg_op_queue_evals[idx] = - transcript.template receive_from_prover("prev_agg_op_queue_eval_" + std::to_string(idx + 1)); - shifted_op_wire_evals[idx] = - transcript.template receive_from_prover("op_wire_eval_" + std::to_string(idx + 1)); - agg_op_queue_evals[idx] = - transcript.template receive_from_prover("agg_op_queue_eval_" + std::to_string(idx + 1)); - - // Check the identity T_i(\kappa) = T_{i-1}(\kappa) + t_i^{shift}(\kappa). If it fails, return false - if (agg_op_queue_evals[idx] != prev_agg_op_queue_evals[idx] + shifted_op_wire_evals[idx]) { - return false; - } - } - - // Add corresponding univariate opening claims {(\kappa, p(\kappa), [p(X)]} - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back(pcs::OpeningClaim{ { kappa, prev_agg_op_queue_evals[idx] }, - prev_agg_op_queue_commitments[idx] }); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back( - pcs::OpeningClaim{ { kappa, shifted_op_wire_evals[idx] }, shifted_op_wire_commitments[idx] }); - } - for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - univariate_opening_claims.emplace_back( - pcs::OpeningClaim{ { kappa, agg_op_queue_evals[idx] }, agg_op_queue_commitments[idx] }); - } - } - - // Produce a Shplonk claim: commitment [Q] - [Q_z], evaluation zero (at random challenge z) - auto shplonk_claim = Shplonk::reduce_verification(pcs_verification_key, univariate_opening_claims, transcript); - - // Verify the Shplonk claim with KZG or IPA - auto verified = PCS::verify(pcs_verification_key, shplonk_claim, transcript); + auto verified = pcs_verification_key->pairing_check(pairing_points[0], pairing_points[1]); return sumcheck_verified.value() && verified; } template class UltraVerifier_; -template class UltraVerifier_; template class UltraVerifier_; } // namespace proof_system::honk diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.hpp index 339630386c36..34c524672d52 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/ultra_verifier.hpp @@ -1,7 +1,6 @@ #pragma once #include "barretenberg/honk/flavor/goblin_ultra.hpp" #include "barretenberg/honk/flavor/ultra.hpp" -#include "barretenberg/honk/flavor/ultra_grumpkin.hpp" #include "barretenberg/honk/sumcheck/sumcheck.hpp" #include "barretenberg/plonk/proof_system/types/proof.hpp" @@ -28,7 +27,6 @@ template class UltraVerifier_ { }; extern template class UltraVerifier_; -extern template class UltraVerifier_; extern template class UltraVerifier_; using UltraVerifier = UltraVerifier_; diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/work_queue.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/work_queue.hpp deleted file mode 100644 index a95e40e45aa5..000000000000 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/work_queue.hpp +++ /dev/null @@ -1,137 +0,0 @@ -#pragma once - -#include "barretenberg/honk/transcript/transcript.hpp" -#include "barretenberg/srs/global_crs.hpp" -#include -#include - -namespace proof_system::honk { - -// Currently only one type of work queue operation but there will likely be others related to Sumcheck -enum WorkType { SCALAR_MULTIPLICATION }; - -template class work_queue { - - using CommitmentKey = pcs::CommitmentKey; - using FF = typename Curve::ScalarField; - using Commitment = typename Curve::AffineElement; - - struct work_item_info { - uint32_t num_scalar_multiplications; - }; - - struct work_item { - WorkType work_type = SCALAR_MULTIPLICATION; - std::span mul_scalars; - std::string label; - }; - - private: - proof_system::honk::ProverTranscript& transcript; - std::shared_ptr commitment_key; - std::vector work_item_queue; - - public: - explicit work_queue(auto commitment_key, proof_system::honk::ProverTranscript& prover_transcript) - : transcript(prover_transcript) - , commitment_key(commitment_key){}; - - work_queue(const work_queue& other) = default; - work_queue(work_queue&& other) noexcept = default; - ~work_queue() = default; - - [[nodiscard]] work_item_info get_queued_work_item_info() const - { - uint32_t scalar_mul_count = 0; - for (const auto& item : work_item_queue) { - if (item.work_type == WorkType::SCALAR_MULTIPLICATION) { - ++scalar_mul_count; - } - } - return work_item_info{ scalar_mul_count }; - }; - - [[nodiscard]] FF* get_scalar_multiplication_data(size_t work_item_number) const - { - size_t count = 0; - for (const auto& item : work_item_queue) { - if (item.work_type == WorkType::SCALAR_MULTIPLICATION) { - if (count == work_item_number) { - return const_cast(item.mul_scalars.data()); - } - ++count; - } - } - return nullptr; - }; - - [[nodiscard]] size_t get_scalar_multiplication_size(size_t work_item_number) const - { - size_t count = 0; - for (const auto& item : work_item_queue) { - if (item.work_type == WorkType::SCALAR_MULTIPLICATION) { - if (count == work_item_number) { - return item.mul_scalars.size(); - } - ++count; - } - } - return 0; - }; - - void put_scalar_multiplication_data(const Commitment& result, size_t work_item_number) - { - size_t count = 0; - for (const auto& item : work_item_queue) { - if (item.work_type == WorkType::SCALAR_MULTIPLICATION) { - if (count == work_item_number) { - transcript.send_to_verifier(item.label, result); - return; - } - ++count; - } - } - }; - - void flush_queue() { work_item_queue = std::vector(); }; - - void add_commitment(std::span polynomial, std::string label) - { - add_to_queue({ SCALAR_MULTIPLICATION, polynomial, label }); - } - - void process_queue() - { - for (const auto& item : work_item_queue) { - switch (item.work_type) { - - case WorkType::SCALAR_MULTIPLICATION: { - - // Run pippenger multi-scalar multiplication. - auto commitment = commitment_key->commit(item.mul_scalars); - - transcript.send_to_verifier(item.label, commitment); - - break; - } - default: { - } - } - } - work_item_queue = std::vector(); - }; - - [[nodiscard]] std::vector get_queue() const { return work_item_queue; }; - - private: - void add_to_queue(const work_item& item) - { - // Note: currently no difference between wasm and native but may be in the future -#if defined(__wasm__) - work_item_queue.push_back(item); -#else - work_item_queue.push_back(item); -#endif - }; -}; -} // namespace proof_system::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_correctness.test.cpp b/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_correctness.test.cpp index 8e713ac7dfec..ed902506b03b 100644 --- a/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_correctness.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_correctness.test.cpp @@ -29,28 +29,28 @@ void ensure_non_zero(auto& polynomial) * @tparam relation_idx Index into a tuple of provided relations * @tparam Flavor */ -template void check_relation(auto relation, auto circuit_size, auto polynomials, auto params) +template void check_relation(auto circuit_size, auto polynomials, auto params) { - using ClaimedEvaluations = typename Flavor::ProverPolynomialsEvaluations; + using AllValues = typename Flavor::AllValues; for (size_t i = 0; i < circuit_size; i++) { // Extract an array containing all the polynomial evaluations at a given row i - ClaimedEvaluations evaluations_at_index_i; + AllValues evaluations_at_index_i; size_t poly_idx = 0; for (auto& poly : polynomials) { evaluations_at_index_i[poly_idx] = poly[i]; ++poly_idx; } - // Define the appropriate RelationValues type for this relation and initialize to zero - using RelationValues = typename decltype(relation)::RelationValues; - RelationValues result; + // Define the appropriate ArrayOfValuesOverSubrelations type for this relation and initialize to zero + using ArrayOfValuesOverSubrelations = typename Relation::ArrayOfValuesOverSubrelations; + ArrayOfValuesOverSubrelations result; for (auto& element : result) { element = 0; } // Evaluate each constraint in the relation and check that each is satisfied - relation.add_full_relation_value_contribution(result, evaluations_at_index_i, params); + Relation::accumulate(result, evaluations_at_index_i, params, 1); for (auto& element : result) { ASSERT_EQ(element, 0); } @@ -253,22 +253,17 @@ TEST_F(RelationCorrectnessTests, UltraRelationCorrectness) ensure_non_zero(proving_key->q_aux); // Construct the round for applying sumcheck relations and results for storing computed results - auto relations = std::tuple(proof_system::UltraArithmeticRelation(), - proof_system::UltraPermutationRelation(), - proof_system::LookupRelation(), - proof_system::GenPermSortRelation(), - proof_system::EllipticRelation(), - proof_system::AuxiliaryRelation()); + using Relations = typename Flavor::Relations; auto prover_polynomials = instance->prover_polynomials; auto params = instance->relation_parameters; // Check that each relation is satisfied across each row of the prover polynomials - check_relation(std::get<0>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<1>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<2>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<3>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<4>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<5>(relations), circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); } TEST_F(RelationCorrectnessTests, GoblinUltraRelationCorrectness) @@ -311,25 +306,18 @@ TEST_F(RelationCorrectnessTests, GoblinUltraRelationCorrectness) ensure_non_zero(proving_key->q_aux); // Construct the round for applying sumcheck relations and results for storing computed results - auto relations = std::tuple(proof_system::UltraArithmeticRelation(), - proof_system::UltraPermutationRelation(), - proof_system::LookupRelation(), - proof_system::GenPermSortRelation(), - proof_system::EllipticRelation(), - proof_system::AuxiliaryRelation(), - proof_system::EccOpQueueRelation()); - + using Relations = typename Flavor::Relations; auto prover_polynomials = instance->prover_polynomials; auto params = instance->relation_parameters; // Check that each relation is satisfied across each row of the prover polynomials - check_relation(std::get<0>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<1>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<2>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<3>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<4>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<5>(relations), circuit_size, prover_polynomials, params); - check_relation(std::get<6>(relations), circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); + check_relation>(circuit_size, prover_polynomials, params); } } // namespace test_honk_relations diff --git a/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_definitions_fwd.hpp b/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_definitions_fwd.hpp index 73fa459a4fa3..1f7d3f0d5202 100644 --- a/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_definitions_fwd.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/sumcheck/relation_definitions_fwd.hpp @@ -3,32 +3,31 @@ #include "barretenberg/proof_system/relations/relation_types.hpp" #define ExtendedEdge(Flavor) Flavor::ExtendedEdges -#define EvaluationEdge(Flavor) Flavor::ProverPolynomialsEvaluations +#define EvaluationEdge(Flavor) Flavor::AllValues #define EntityEdge(Flavor) Flavor::AllEntities -#define ADD_EDGE_CONTRIBUTION(...) _ADD_EDGE_CONTRIBUTION(__VA_ARGS__) -#define _ADD_EDGE_CONTRIBUTION(Preface, RelationBase, Flavor, AccumulatorType, EdgeType) \ +#define ACCUMULATE(...) _ACCUMULATE(__VA_ARGS__) +#define _ACCUMULATE(Preface, RelationBase, Flavor, AccumulatorType, EdgeType) \ Preface template void \ RelationBase::accumulate>::AccumulatorType, \ EdgeType(Flavor)>( \ - proof_system::Relation>::AccumulatorType::Accumulators&, \ + proof_system::Relation>::AccumulatorType&, \ EdgeType(Flavor) const&, \ RelationParameters const&, \ Flavor::FF const&); #define PERMUTATION_METHOD(...) _PERMUTATION_METHOD(__VA_ARGS__) #define _PERMUTATION_METHOD(Preface, MethodName, RelationBase, Flavor, AccumulatorType, EdgeType) \ - Preface template RelationBase::template Accumulator< \ - proof_system::Relation>::AccumulatorType> \ + Preface template typename proof_system::Relation>::AccumulatorType \ RelationBase::MethodName>::AccumulatorType, \ - EdgeType(Flavor)>( \ - EdgeType(Flavor) const&, RelationParameters const&, size_t const); + EdgeType(Flavor)>(EdgeType(Flavor) const&, \ + RelationParameters const&); #define SUMCHECK_RELATION_CLASS(...) _SUMCHECK_RELATION_CLASS(__VA_ARGS__) #define _SUMCHECK_RELATION_CLASS(Preface, RelationBase, Flavor) \ - ADD_EDGE_CONTRIBUTION(Preface, RelationBase, Flavor, UnivariateAccumulatorsAndViews, ExtendedEdge) \ - ADD_EDGE_CONTRIBUTION(Preface, RelationBase, Flavor, ValueAccumulatorsAndViews, EvaluationEdge) \ - ADD_EDGE_CONTRIBUTION(Preface, RelationBase, Flavor, ValueAccumulatorsAndViews, EntityEdge) + ACCUMULATE(Preface, RelationBase, Flavor, TupleOfUnivariatesOverSubrelations, ExtendedEdge) \ + ACCUMULATE(Preface, RelationBase, Flavor, ArrayOfValuesOverSubrelations, EvaluationEdge) \ + ACCUMULATE(Preface, RelationBase, Flavor, ArrayOfValuesOverSubrelations, EntityEdge) #define DECLARE_SUMCHECK_RELATION_CLASS(RelationBase, Flavor) SUMCHECK_RELATION_CLASS(extern, RelationBase, Flavor) #define DEFINE_SUMCHECK_RELATION_CLASS(RelationBase, Flavor) SUMCHECK_RELATION_CLASS(, RelationBase, Flavor) @@ -36,17 +35,15 @@ #define SUMCHECK_PERMUTATION_CLASS(...) _SUMCHECK_PERMUTATION_CLASS(__VA_ARGS__) #define _SUMCHECK_PERMUTATION_CLASS(Preface, RelationBase, Flavor) \ PERMUTATION_METHOD( \ - Preface, compute_permutation_numerator, RelationBase, Flavor, UnivariateAccumulatorsAndViews, ExtendedEdge) \ - PERMUTATION_METHOD( \ - Preface, compute_permutation_numerator, RelationBase, Flavor, ValueAccumulatorsAndViews, EvaluationEdge) \ - PERMUTATION_METHOD( \ - Preface, compute_permutation_numerator, RelationBase, Flavor, ValueAccumulatorsAndViews, EntityEdge) \ + Preface, compute_permutation_numerator, RelationBase, Flavor, UnivariateAccumulator0, ExtendedEdge) \ PERMUTATION_METHOD( \ - Preface, compute_permutation_denominator, RelationBase, Flavor, UnivariateAccumulatorsAndViews, ExtendedEdge) \ + Preface, compute_permutation_numerator, RelationBase, Flavor, ValueAccumulator0, EvaluationEdge) \ + PERMUTATION_METHOD(Preface, compute_permutation_numerator, RelationBase, Flavor, ValueAccumulator0, EntityEdge) \ PERMUTATION_METHOD( \ - Preface, compute_permutation_denominator, RelationBase, Flavor, ValueAccumulatorsAndViews, EvaluationEdge) \ + Preface, compute_permutation_denominator, RelationBase, Flavor, UnivariateAccumulator0, ExtendedEdge) \ PERMUTATION_METHOD( \ - Preface, compute_permutation_denominator, RelationBase, Flavor, ValueAccumulatorsAndViews, EntityEdge) + Preface, compute_permutation_denominator, RelationBase, Flavor, ValueAccumulator0, EvaluationEdge) \ + PERMUTATION_METHOD(Preface, compute_permutation_denominator, RelationBase, Flavor, ValueAccumulator0, EntityEdge) #define DECLARE_SUMCHECK_PERMUTATION_CLASS(RelationBase, Flavor) \ SUMCHECK_PERMUTATION_CLASS(extern, RelationBase, Flavor) diff --git a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp index de28b0792792..c3dcf58abef4 100644 --- a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.hpp @@ -15,7 +15,7 @@ template class SumcheckProver { public: using FF = typename Flavor::FF; using PartiallyEvaluatedMultivariates = typename Flavor::PartiallyEvaluatedMultivariates; - using ClaimedEvaluations = typename Flavor::ProverPolynomialsEvaluations; + using ClaimedEvaluations = typename Flavor::AllValues; ProverTranscript& transcript; const size_t multivariate_n; @@ -108,7 +108,8 @@ template class SumcheckProver { // Final round: Extract multivariate evaluations from partially_evaluated_polynomials and add to transcript ClaimedEvaluations multivariate_evaluations; size_t evaluation_idx = 0; - for (auto& polynomial : partially_evaluated_polynomials) { // TODO(#391) zip + // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip + for (auto& polynomial : partially_evaluated_polynomials) { multivariate_evaluations[evaluation_idx] = polynomial[0]; ++evaluation_idx; } @@ -151,7 +152,7 @@ template class SumcheckVerifier { public: using Utils = barretenberg::RelationUtils; using FF = typename Flavor::FF; - using ClaimedEvaluations = typename Flavor::ProverPolynomialsEvaluations; + using ClaimedEvaluations = typename Flavor::AllValues; static constexpr size_t MAX_RANDOM_RELATION_LENGTH = Flavor::MAX_RANDOM_RELATION_LENGTH; static constexpr size_t NUM_POLYNOMIALS = Flavor::NUM_ALL_ENTITIES; diff --git a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp index f8ae29e46e33..963a271201fc 100644 --- a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck.test.cpp @@ -143,6 +143,16 @@ TEST_F(SumcheckTests, PolynomialNormalization) l_6 * full_polynomials[i][6] + l_7 * full_polynomials[i][7]; EXPECT_EQ(hand_computed_value, sumcheck.partially_evaluated_polynomials[i][0]); } + + // We can also check the correctness of the multilinear evaluations produced by Sumcheck by directly evaluating the + // full polynomials at challenge u via the evaluate_mle() function + std::vector u_challenge = { u_0, u_1, u_2 }; + for (size_t i = 0; i < NUM_POLYNOMIALS; i++) { + barretenberg::Polynomial poly(full_polynomials[i]); + auto v_expected = poly.evaluate_mle(u_challenge); + auto v_result = output.claimed_evaluations[i]; + EXPECT_EQ(v_expected, v_result); + } } TEST_F(SumcheckTests, Prover) diff --git a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_output.hpp b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_output.hpp index 38b4311a9465..d300d6d4d728 100644 --- a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_output.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_output.hpp @@ -11,7 +11,7 @@ namespace proof_system::honk::sumcheck { */ template struct SumcheckOutput { using FF = typename Flavor::FF; - using ClaimedEvaluations = typename Flavor::ProverPolynomialsEvaluations; + using ClaimedEvaluations = typename Flavor::AllValues; // u = (u_0, ..., u_{d-1}) std::vector challenge; // Evaluations in `u` of the polynomials used in Sumcheck diff --git a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp index 8a6722777ffe..1453c338f1aa 100644 --- a/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/sumcheck/sumcheck_round.hpp @@ -6,6 +6,7 @@ #include "barretenberg/polynomials/pow.hpp" #include "barretenberg/proof_system/flavor/flavor.hpp" #include "barretenberg/proof_system/relations/relation_parameters.hpp" +#include "barretenberg/proof_system/relations/relation_types.hpp" #include "barretenberg/proof_system/relations/utils.hpp" namespace proof_system::honk::sumcheck { @@ -55,7 +56,7 @@ namespace proof_system::honk::sumcheck { template class SumcheckProverRound { using Relations = typename Flavor::Relations; - using RelationUnivariates = typename Flavor::RelationUnivariates; + using TupleOfTuplesOfUnivariates = typename Flavor::TupleOfTuplesOfUnivariates; public: using FF = typename Flavor::FF; @@ -64,12 +65,11 @@ template class SumcheckProverRound { size_t round_size; // a power of 2 - Relations relations; static constexpr size_t NUM_RELATIONS = Flavor::NUM_RELATIONS; static constexpr size_t MAX_RELATION_LENGTH = Flavor::MAX_RELATION_LENGTH; static constexpr size_t MAX_RANDOM_RELATION_LENGTH = Flavor::MAX_RANDOM_RELATION_LENGTH; - RelationUnivariates univariate_accumulators; + TupleOfTuplesOfUnivariates univariate_accumulators; // TODO(#224)(Cody): this should go away barretenberg::BarycentricData barycentric_2_to_max; @@ -111,7 +111,7 @@ template class SumcheckProverRound { */ void extend_edges(auto& extended_edges, auto& multivariates, size_t edge_idx) { - size_t univariate_idx = 0; // TODO(#391) zip + size_t univariate_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip for (auto& poly : multivariates) { auto edge = barretenberg::Univariate({ poly[edge_idx], poly[edge_idx + 1] }); extended_edges[univariate_idx] = barycentric_2_to_max.extend(edge); @@ -148,7 +148,7 @@ template class SumcheckProverRound { size_t iterations_per_thread = round_size / num_threads; // actual iterations per thread // Constuct univariate accumulator containers; one per thread - std::vector thread_univariate_accumulators(num_threads); + std::vector thread_univariate_accumulators(num_threads); for (auto& accum : thread_univariate_accumulators) { zero_univariates(accum); } @@ -205,12 +205,13 @@ template class SumcheckProverRound { * appropriate scaling factors, produces S_l. */ template - void accumulate_relation_univariates(RelationUnivariates& univariate_accumulators, + void accumulate_relation_univariates(TupleOfTuplesOfUnivariates& univariate_accumulators, const auto& extended_edges, const proof_system::RelationParameters& relation_parameters, const FF& scaling_factor) { - std::get(relations).add_edge_contribution( + using Relation = std::tuple_element_t; + Relation::accumulate( std::get(univariate_accumulators), extended_edges, relation_parameters, scaling_factor); // Repeat for the next relation. @@ -221,9 +222,8 @@ template class SumcheckProverRound { } public: - // TODO(luke): Potentially make RelationUnivarites (tuple of tuples of Univariates) a class and make these utility - // functions class methods. Alternatively, move all of these tuple utilities (and the ones living elsewhere) to - // their own module. + // TODO(luke): Potentially make TupleOfTuplesOfUnivariates a class and make these utility functions class methods. + // Alternatively, move all of these tuple utilities (and the ones living elsewhere) to their own module. /** * Utility methods for tuple of tuples of Univariates */ @@ -254,9 +254,7 @@ template class SumcheckProverRound { barretenberg::BarycentricData barycentric_utils; auto extended = barycentric_utils.extend(element); - const bool is_subrelation_linearly_independent = - Relation::template is_subrelation_linearly_independent(); - if (is_subrelation_linearly_independent) { + if constexpr (subrelation_is_linearly_independent()) { // if subrelation is linearly independent, multiply by random polynomial result += extended * extended_random_polynomial_edge; } else { @@ -369,11 +367,11 @@ template class SumcheckProverRound { template class SumcheckVerifierRound { using Utils = barretenberg::RelationUtils; using Relations = typename Flavor::Relations; - using RelationEvaluations = typename Flavor::RelationValues; + using TupleOfArraysOfValues = typename Flavor::TupleOfArraysOfValues; public: using FF = typename Flavor::FF; - using ClaimedEvaluations = typename Flavor::ProverPolynomialsEvaluations; + using ClaimedEvaluations = typename Flavor::AllValues; bool round_failed = false; @@ -382,8 +380,7 @@ template class SumcheckVerifierRound { FF target_total_sum = 0; - Relations relations; - RelationEvaluations relation_evaluations; + TupleOfArraysOfValues relation_evaluations; // Verifier constructor explicit SumcheckVerifierRound() { Utils::zero_elements(relation_evaluations); }; @@ -429,12 +426,10 @@ template class SumcheckVerifierRound { } /** - * @brief Calculate the contribution of each relation to the expected value of the full Honk relation. + * @brief General purpose method for applying a tuple of arrays (of FFs) * - * @details For each relation, use the purported values (supplied by the prover) of the multivariates to calculate - * a contribution to the purported value of the full Honk relation. These are stored in `evaluations`. Adding these - * together, with appropriate scaling factors, produces the expected value of the full Honk relation. This value is - * checked against the final value of the target total sum, defined as sigma_d. + * @tparam Operation Any operation valid on elements of the inner arrays (FFs) + * @param tuple Tuple of arrays (of FFs) */ // also copy paste in PG // so instead of having claimed evaluations of each relation in part you have the actual evaluations diff --git a/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.test.cpp b/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.test.cpp index b2e8127180be..833fb0e673e0 100644 --- a/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/honk/transcript/transcript.test.cpp @@ -72,23 +72,16 @@ class UltraTranscriptTests : public ::testing::Test { manifest_expected.add_entry(round, "Sumcheck:evaluations", size_evals); manifest_expected.add_challenge(round, "rho"); - round++; - for (size_t i = 1; i < log_n; ++i) { - std::string idx = std::to_string(i); - manifest_expected.add_entry(round, "Gemini:FOLD_" + idx, size_G); - } - manifest_expected.add_challenge(round, "Gemini:r"); - round++; for (size_t i = 0; i < log_n; ++i) { std::string idx = std::to_string(i); - manifest_expected.add_entry(round, "Gemini:a_" + idx, size_FF); + manifest_expected.add_entry(round, "ZM:C_q_" + idx, size_G); } - manifest_expected.add_challenge(round, "Shplonk:nu"); + manifest_expected.add_challenge(round, "ZM:y"); round++; - manifest_expected.add_entry(round, "Shplonk:Q", size_G); - manifest_expected.add_challenge(round, "Shplonk:z"); + manifest_expected.add_entry(round, "ZM:C_q", size_G); + manifest_expected.add_challenge(round, "ZM:x", "ZM:z"); round++; // TODO(Mara): Make testing more flavor agnostic so we can test this with all flavors @@ -108,7 +101,7 @@ class UltraTranscriptTests : public ::testing::Test { round++; manifest_expected.add_entry(round, "IPA:a_0", size_FF); } else { - manifest_expected.add_entry(round, "KZG:W", size_G); + manifest_expected.add_entry(round, "ZM:PI", size_G); } manifest_expected.add_challenge(round); // no challenge @@ -207,40 +200,36 @@ TEST_F(UltraTranscriptTests, ChallengeGenerationTest) TEST_F(UltraTranscriptTests, FoldingManifestTest) { using Flavor = flavor::Ultra; - auto builder_one = proof_system::UltraCircuitBuilder(); - auto a = 2; - auto b = 3; - builder_one.add_variable(a); - builder_one.add_public_variable(a); - builder_one.add_public_variable(b); - - auto builder_two = proof_system::UltraCircuitBuilder(); - a = 3; - b = 4; - builder_two.add_variable(a); - builder_two.add_variable(b); - builder_two.add_public_variable(a); - builder_two.add_public_variable(b); - auto composer = UltraComposer(); - auto instance_one = composer.create_instance(builder_one); + + std::vector>> insts(2); + std::generate(insts.begin(), insts.end(), [&]() { + auto builder = proof_system::UltraCircuitBuilder(); + auto a = FF::random_element(); + auto b = FF::random_element(); + builder.add_variable(a); + builder.add_public_variable(a); + builder.add_public_variable(b); + return composer.create_instance(builder); + }); + info("here"); + // artificially make first instance relaxed - auto log_instance_size = static_cast(numeric::get_msb(instance_one->proving_key->circuit_size)); + auto log_instance_size = static_cast(numeric::get_msb(insts[0]->proving_key->circuit_size)); + info("here"); std::vector betas(log_instance_size); for (size_t idx = 0; idx < log_instance_size; idx++) { betas[idx] = FF::random_element(); } - instance_one->folding_params = { betas, FF(1) }; - auto instance_two = composer.create_instance(builder_two); + insts[0]->folding_parameters = { betas, FF(1) }; + info("here"); - std::vector>> insts; - insts.emplace_back(instance_one); - insts.emplace_back(instance_two); auto prover = composer.create_folding_prover(insts); auto verifier = composer.create_folding_verifier(insts); auto prover_res = prover.fold_instances(); verifier.fold_public_parameters(prover_res.folding_data); + info("here"); // Check consistency between the manifests generated by the prover and verifier auto prover_manifest = prover.transcript.get_manifest(); diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp index 40f39d24e4e9..cdf0dbeb1712 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/compute_circuit_data.hpp @@ -56,9 +56,9 @@ circuit_data get_circuit_data(std::string const& name, circuit_data data; data.srs = srs; data.mock = mock; - Composer composer(srs); + Composer composer; Builder builder; - Composer mock_proof_composer(srs); + Composer mock_proof_composer; Builder mock_builder; BenchmarkInfoCollator benchmark_collator; diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp deleted file mode 100644 index fcddcbfdb942..000000000000 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.cpp +++ /dev/null @@ -1,98 +0,0 @@ -#include -#include - -#include "../mock/mock_circuit.hpp" -#include "barretenberg/common/container.hpp" -#include "barretenberg/common/mem.hpp" -#include "barretenberg/common/streams.hpp" -#include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" -#include "barretenberg/join_split_example/types.hpp" -#include "barretenberg/plonk/proof_system/proving_key/serialize.hpp" -#include "barretenberg/srs/global_crs.hpp" -#include "c_bind.h" -#include "compute_signing_data.hpp" -#include "join_split.hpp" - -using namespace barretenberg; -using namespace join_split_example::proofs::join_split; - -WASM_EXPORT void join_split__init_proving_key(bool mock) -{ - init_proving_key(barretenberg::srs::get_crs_factory(), mock); -} - -// WASM_EXPORT void join_split__init_proving_key_from_buffer(uint8_t const* pk_buf) -// { -// std::shared_ptr crs; -// plonk::proving_key_data pk_data; -// read(pk_buf, pk_data); -// init_proving_key(crs, std::move(pk_data)); -// } - -WASM_EXPORT void join_split__release_key() -{ - release_proving_key(); -} - -WASM_EXPORT uint32_t join_split__get_new_proving_key_data(uint8_t** output) -{ - // Computing the size of the serialized key is non trivial. We know it's ~331mb. - // Allocate a buffer large enough to hold it, and abort if we overflow. - // This is to keep memory usage down. - - auto proving_key = get_proving_key(); - auto buffer = to_buffer(*proving_key); - auto raw_buf = (uint8_t*)malloc(buffer.size()); - memcpy(raw_buf, (void*)buffer.data(), buffer.size()); - *output = raw_buf; - - return static_cast(buffer.size()); -} - -WASM_EXPORT void join_split__init_verification_key(void* /*unused*/, uint8_t const* /*unused*/) -{ - init_verification_key(barretenberg::srs::get_crs_factory()); -} - -// WASM_EXPORT void join_split__init_verification_key_from_buffer(uint8_t const* vk_buf, uint8_t const* g2x) -// { -// auto crs = std::make_shared(g2x); -// plonk::verification_key_data vk_data; -// read(vk_buf, vk_data); -// init_verification_key(crs, std::move(vk_data)); -// } - -WASM_EXPORT uint32_t join_split__get_new_verification_key_data(uint8_t** output) -{ - auto buffer = to_buffer(*get_verification_key()); - auto raw_buf = (uint8_t*)malloc(buffer.size()); - memcpy(raw_buf, (void*)buffer.data(), buffer.size()); - *output = raw_buf; - return static_cast(buffer.size()); -} - -WASM_EXPORT void join_split__compute_signing_data(uint8_t const* join_split_tx_buf, uint8_t* output) -{ - auto tx = from_buffer(join_split_tx_buf); - auto signing_data = compute_signing_data(tx); - barretenberg::fr::serialize_to_buffer(signing_data, output); -} - -WASM_EXPORT void* join_split__new_prover(uint8_t const* join_split_buf, bool mock) -{ - auto tx = from_buffer(join_split_buf); - auto prover = new_join_split_prover(tx, mock); - auto heapProver = new join_split_example::Prover(std::move(prover)); - return heapProver; -} - -WASM_EXPORT void join_split__delete_prover(void* prover) -{ - delete reinterpret_cast(prover); -} - -WASM_EXPORT bool join_split__verify_proof(uint8_t* proof, uint32_t length) -{ - plonk::proof pp = { std::vector(proof, proof + length) }; - return verify_proof(pp); -} diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.h b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.h deleted file mode 100644 index cd7390b1dad1..000000000000 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/c_bind.h +++ /dev/null @@ -1,3 +0,0 @@ -#include - -WASM_EXPORT uint32_t join_split__get_new_proving_key_data(uint8_t** output); diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp index 1da1caee45fe..c4c52a1be381 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.cpp @@ -14,8 +14,7 @@ using namespace proof_system::plonk::stdlib::merkle_tree; static std::shared_ptr proving_key; static std::shared_ptr verification_key; -void init_proving_key(std::shared_ptr> const& crs_factory, - bool mock) +void init_proving_key(bool mock) { if (proving_key) { return; @@ -27,12 +26,12 @@ void init_proving_key(std::shared_ptr> const& crs_factory) +void init_verification_key() { if (!proving_key) { std::abort(); } - // Patch the 'nothing' reference string fed to init_proving_key. - proving_key->reference_string = crs_factory->get_prover_crs(proving_key->circuit_size + 1); verification_key = - proof_system::plonk::compute_verification_key_common(proving_key, crs_factory->get_verifier_crs()); + proof_system::plonk::compute_verification_key_common(proving_key, srs::get_crs_factory()->get_verifier_crs()); } Prover new_join_split_prover(join_split_tx const& tx, bool mock) diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp index 1d4a1dd5fa7a..a436d99f884a 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.hpp @@ -7,12 +7,11 @@ namespace join_split_example { namespace proofs { namespace join_split { -void init_proving_key(std::shared_ptr> const& crs_factory, - bool mock); +void init_proving_key(bool mock); void release_proving_key(); -void init_verification_key(std::shared_ptr> const& crs_factory); +void init_verification_key(); Prover new_join_split_prover(join_split_tx const& tx, bool mock); diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp index 8b86c4d1641a..b018a9cd7b8d 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split.test.cpp @@ -44,11 +44,10 @@ class join_split_tests : public ::testing::Test { static void SetUpTestCase() { barretenberg::srs::init_crs_factory("../srs_db/ignition"); - auto null_crs_factory = std::make_shared>(); - init_proving_key(null_crs_factory, false); + init_proving_key(false); auto crs_factory = std::make_unique>("../srs_db/ignition"); - init_verification_key(std::move(crs_factory)); + init_verification_key(); info("vk hash: ", get_verification_key()->sha256_hash()); } @@ -213,7 +212,7 @@ class join_split_tests : public ::testing::Test { uint32_t account_note_index = 0, bool account_required = false) { - // The tree, user and notes are initialised in SetUp(). + // The tree, user and notes are initialized in SetUp(). preload_value_notes(); preload_account_notes(); // indices: [ACCOUNT_INDEX, ACCOUNT_INDEX + 1] return create_join_split_tx(input_indices, diff --git a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp index f23bceef85f2..ba601d934474 100644 --- a/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp +++ b/barretenberg/cpp/src/barretenberg/join_split_example/proofs/join_split/join_split_js_parity.test.cpp @@ -25,11 +25,9 @@ class join_split_js_parity_tests : public ::testing::Test { protected: static void SetUpTestCase() { - auto null_crs_factory = std::make_shared>(); - init_proving_key(null_crs_factory, false); - auto crs_factory = - std::make_unique>("../srs_db/ignition"); - init_verification_key(std::move(crs_factory)); + srs::init_crs_factory("../srs_db/ignition"); + init_proving_key(false); + init_verification_key(); info("vk hash: ", get_verification_key()->sha256_hash()); } diff --git a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp index 7b0ded945c83..1026193bdf7e 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp @@ -375,10 +375,11 @@ std::shared_ptr UltraComposer::compute_proving_key(CircuitBuilder& const size_t minimum_circuit_size = tables_size + lookups_size; const size_t num_randomized_gates = NUM_RESERVED_GATES; + auto crs_factory = srs::get_crs_factory(); // Initialize circuit_proving_key // TODO(#392)(Kesha): replace composer types. circuit_proving_key = initialize_proving_key( - circuit_constructor, crs_factory_.get(), minimum_circuit_size, num_randomized_gates, CircuitType::ULTRA); + circuit_constructor, crs_factory.get(), minimum_circuit_size, num_randomized_gates, CircuitType::ULTRA); construct_selector_polynomials(circuit_constructor, circuit_proving_key.get()); @@ -491,10 +492,12 @@ std::shared_ptr UltraComposer::compute_verification_key return circuit_verification_key; } + auto crs_factory = srs::get_crs_factory(); + if (!circuit_proving_key) { compute_proving_key(circuit_constructor); } - circuit_verification_key = compute_verification_key_common(circuit_proving_key, crs_factory_->get_verifier_crs()); + circuit_verification_key = compute_verification_key_common(circuit_proving_key, crs_factory->get_verifier_crs()); circuit_verification_key->circuit_type = CircuitType::ULTRA; diff --git a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp index fcea9028d830..0d365a6c0c4c 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.hpp @@ -27,7 +27,6 @@ class UltraComposer { std::shared_ptr circuit_verification_key; // The crs_factory holds the path to the srs and exposes methods to extract the srs elements - std::shared_ptr> crs_factory_; bool computed_witness = false; @@ -37,11 +36,7 @@ class UltraComposer { // vanishing_polynomial cannot be trivially fetched here, I am directly setting this to 4 - 1 = 3. static constexpr size_t s_randomness = 3; - UltraComposer() { crs_factory_ = barretenberg::srs::get_crs_factory(); } - - explicit UltraComposer(std::shared_ptr> crs_factory) - : crs_factory_(std::move(crs_factory)) - {} + UltraComposer() = default; UltraComposer(std::shared_ptr p_key, std::shared_ptr v_key) : circuit_proving_key(std::move(p_key)) diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp index c898d9d8a6f5..e3f94e9c8720 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.cpp @@ -3,6 +3,7 @@ #include "barretenberg/common/slab_allocator.hpp" #include "barretenberg/common/thread.hpp" #include "barretenberg/common/thread_utils.hpp" +#include "barretenberg/numeric/bitop/pow.hpp" #include "polynomial_arithmetic.hpp" #include #include @@ -129,7 +130,7 @@ template Fr Polynomial::evaluate(const Fr& z) const /** * @brief sets a block of memory to all zeroes * Used to zero out unintialized memory to ensure that, when writing to the polynomial in future, - * memory requests made to the OS do not return virtual pages (performance optimisation). + * memory requests made to the OS do not return virtual pages (performance optimization). * Used, for example, when one polynomial is instantiated from another one with size_>= other.size_. * * @param opening_proof Opening proof computed by `batch_open` @@ -417,6 +418,49 @@ template Fr Polynomial::evaluate_mle(std::span evalu return result; } +template Polynomial Polynomial::partial_evaluate_mle(std::span evaluation_points) const +{ + // Get size of partial evaluation point u = (u_0,...,u_{m-1}) + const size_t m = evaluation_points.size(); + + // Assert that the size of the polynomial being evaluated is a power of 2 greater than (1 << m) + ASSERT(numeric::is_power_of_two(size_)); + ASSERT(size_ >= static_cast(1 << m)); + size_t n = numeric::get_msb(size_); + + // Partial evaluation is done in m rounds l = 0,...,m-1. At the end of round l, the polynomial has been partially + // evaluated at u_{m-l-1}, ..., u_{m-1} in variables X_{n-l-1}, ..., X_{n-1}. The size of this polynomial is n_l. + size_t n_l = 1 << (n - 1); + + // Temporary buffer of half the size of the polynomial + pointer tmp_ptr = allocate_aligned_memory(sizeof(Fr) * n_l); + auto tmp = tmp_ptr.get(); + + Fr* prev = coefficients_.get(); + + // Evaluate variable X_{n-1} at u_{m-1} + Fr u_l = evaluation_points[m - 1]; + for (size_t i = 0; i < n_l; ++i) { + tmp[i] = prev[i] + u_l * (prev[i + n_l] - prev[i]); + } + // Evaluate m-1 variables X_{n-l-1}, ..., X_{n-2} at m-1 remaining values u_0,...,u_{m-2}) + for (size_t l = 1; l < m; ++l) { + n_l = 1 << (n - l - 1); + u_l = evaluation_points[m - l - 1]; + for (size_t i = 0; i < n_l; ++i) { + tmp[i] = tmp[i] + u_l * (tmp[i + n_l] - tmp[i]); + } + } + + // Construct resulting polynomial g(X_0,…,X_{n-m-1})) = p(X_0,…,X_{n-m-1},u_0,...u_{m-1}) from buffer + auto result = Polynomial(n_l); + for (size_t idx = 0; idx < n_l; ++idx) { + result[idx] = tmp[idx]; + } + + return result; +} + template typename Polynomial::pointer Polynomial::allocate_aligned_memory(const size_t size) const { return std::static_pointer_cast(get_mem_slab(size)); diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp index d22f47c5f738..b0ebaae33f62 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.hpp @@ -202,6 +202,25 @@ template class Polynomial { */ Fr evaluate_mle(std::span evaluation_points, bool shift = false) const; + /** + * @brief Partially evaluates in the last k variables a polynomial interpreted as a multilinear extension. + * + * @details Partially evaluates p(X) = (a_0, ..., a_{2^n-1}) considered as multilinear extension p(X_0,…,X_{n-1}) = + * \sum_i a_i*L_i(X_0,…,X_{n-1}) at u = (u_0,…,u_{m-1}), m < n, in the last m variables X_n-m,…,X_{n-1}. The result + * is a multilinear polynomial in n-m variables g(X_0,…,X_{n-m-1})) = p(X_0,…,X_{n-m-1},u_0,...u_{m-1}). + * + * @note Intuitively, partially evaluating in one variable collapses the hypercube in one dimension, halving the + * number of coefficients needed to represent the result. To partially evaluate starting with the first variable (as + * is done in evaluate_mle), the vector of coefficents is halved by combining adjacent rows in a pairwise + * fashion (similar to what is done in Sumcheck via "edges"). To evaluate starting from the last variable, we + * instead bisect the whole vector and combine the two halves. I.e. rather than coefficents being combined with + * their immediate neighbor, they are combined with the coefficient that lives n/2 indices away. + * + * @param evaluation_points an MLE partial evaluation point u = (u_0,…,u_{m-1}) + * @return Polynomial g(X_0,…,X_{n-m-1})) = p(X_0,…,X_{n-m-1},u_0,...u_{m-1}) + */ + Polynomial partial_evaluate_mle(std::span evaluation_points) const; + /** * @brief Divides p(X) by (X-r₁)⋯(X−rₘ) in-place. * Assumes that p(rⱼ)=0 for all j diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp index e462c1d2ca16..b0a57e135d8d 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial_arithmetic.test.cpp @@ -1095,6 +1095,43 @@ TYPED_TEST(PolynomialTests, evaluate_mle) test_case(2); } +/** + * @brief Test the function for partially evaluating MLE polynomials + * + */ +TYPED_TEST(PolynomialTests, partial_evaluate_mle) +{ + // Initialize a random polynomial + using FF = TypeParam; + size_t N = 32; + Polynomial poly(N); + for (auto& coeff : poly) { + coeff = FF::random_element(); + } + + // Define a random multivariate evaluation point u = (u_0, u_1, u_2, u_3, u_4) + auto u_0 = FF::random_element(); + auto u_1 = FF::random_element(); + auto u_2 = FF::random_element(); + auto u_3 = FF::random_element(); + auto u_4 = FF::random_element(); + std::vector u_challenge = { u_0, u_1, u_2, u_3, u_4 }; + + // Show that directly computing v = p(u_0,...,u_4) yields the same result as first computing the partial evaluation + // in the last 3 variables g(X_0,X_1) = p(X_0,X_1,u_2,u_3,u_4), then v = g(u_0,u_1) + + // Compute v = p(u_0,...,u_4) + auto v_expected = poly.evaluate_mle(u_challenge); + + // Compute g(X_0,X_1) = p(X_0,X_1,u_2,u_3,u_4), then v = g(u_0,u_1) + std::vector u_part_1 = { u_0, u_1 }; + std::vector u_part_2 = { u_2, u_3, u_4 }; + auto partial_evaluated_poly = poly.partial_evaluate_mle(u_part_2); + auto v_result = partial_evaluated_poly.evaluate_mle(u_part_1); + + EXPECT_EQ(v_result, v_expected); +} + TYPED_TEST(PolynomialTests, factor_roots) { using FF = TypeParam; diff --git a/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp b/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp index 42f6090f329e..afa5aa5c0b38 100644 --- a/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp +++ b/barretenberg/cpp/src/barretenberg/polynomials/univariate.hpp @@ -21,6 +21,7 @@ template class UnivariateView; template class Univariate { public: static constexpr size_t LENGTH = _length; + using View = UnivariateView; // TODO(https://github.com/AztecProtocol/barretenberg/issues/714) Try out std::valarray? std::array evaluations; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp b/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp index 671ab2e43044..59c573229f7a 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/arithmetization/arithmetization.hpp @@ -174,7 +174,7 @@ template class Ultra : public Arithmetization { +class GoblinTranslator : public Arithmetization { public: // Dirty hack using Selectors = bool; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp index 978d0bed55b7..3c9a39fd80a3 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_builder_types.hpp @@ -21,6 +21,17 @@ template struct VMOperation { uint256_t z1 = 0; uint256_t z2 = 0; typename CycleGroup::subgroup_field mul_scalar_full = 0; + [[nodiscard]] uint32_t get_opcode_value() const + { + auto res = static_cast(add); + res += res; + res += static_cast(mul); + res += res; + res += static_cast(eq); + res += res; + res += static_cast(reset); + return res; + } }; template struct ScalarMul { uint32_t pc; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp index 119b84100403..8f71dd6244c0 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp @@ -17,10 +17,13 @@ namespace proof_system { template class ECCVMCircuitBuilder { public: using CycleGroup = typename Flavor::CycleGroup; - using CycleScalar = typename CycleGroup::subgroup_field; using FF = typename Flavor::FF; + using Polynomial = typename Flavor::Polynomial; + + using CycleScalar = typename CycleGroup::subgroup_field; using Element = typename CycleGroup::element; using AffineElement = typename CycleGroup::affine_element; + static constexpr size_t NUM_SCALAR_BITS = proof_system_eccvm::NUM_SCALAR_BITS; static constexpr size_t WNAF_SLICE_BITS = proof_system_eccvm::WNAF_SLICE_BITS; static constexpr size_t NUM_WNAF_SLICES = proof_system_eccvm::NUM_WNAF_SLICES; @@ -36,8 +39,7 @@ template class ECCVMCircuitBuilder { using VMOperation = proof_system_eccvm::VMOperation; std::vector vm_operations; using ScalarMul = proof_system_eccvm::ScalarMul; - using RawPolynomials = typename Flavor::RawPolynomials; - using Polynomial = barretenberg::Polynomial; + using AllPolynomials = typename Flavor::AllPolynomials; ECCVMCircuitBuilder() = default; @@ -312,9 +314,9 @@ template class ECCVMCircuitBuilder { (reads come from msm_x/y1, msm_x/y2) * lookup_read_counts_1: stores number of times a point has been read from a Straus precomputation table (reads come from msm_x/y3, msm_x/y4) - * @return RawPolynomials + * @return AllPolynomials */ - RawPolynomials compute_full_polynomials() + AllPolynomials compute_polynomials() { const auto msms = get_msms(); const auto flattened_muls = get_flattened_scalar_muls(msms); @@ -336,14 +338,14 @@ template class ECCVMCircuitBuilder { const auto num_rows_log2 = static_cast(numeric::get_msb64(num_rows)); size_t num_rows_pow2 = 1UL << (num_rows_log2 + (1UL << num_rows_log2 == num_rows ? 0 : 1)); - RawPolynomials rows; + AllPolynomials polys; for (size_t j = 0; j < NUM_POLYNOMIALS; ++j) { - rows[j] = Polynomial(num_rows_pow2); + polys[j] = Polynomial(num_rows_pow2); } - rows.lagrange_first[0] = 1; - rows.lagrange_second[1] = 1; - rows.lagrange_last[rows.lagrange_last.size() - 1] = 1; + polys.lagrange_first[0] = 1; + polys.lagrange_second[1] = 1; + polys.lagrange_last[polys.lagrange_last.size() - 1] = 1; for (size_t i = 0; i < point_table_read_counts[0].size(); ++i) { // Explanation of off-by-one offset @@ -352,30 +354,30 @@ template class ECCVMCircuitBuilder { // `lookup_read_counts`. We do this mapping in `ecc_msm_relation`. We are off-by-one because we add an empty // row at the start of the WNAF columns that is not accounted for (index of lookup_read_counts maps to the // row in our WNAF columns that computes a slice for a given value of pc and round) - rows.lookup_read_counts_0[i + 1] = point_table_read_counts[0][i]; - rows.lookup_read_counts_1[i + 1] = point_table_read_counts[1][i]; + polys.lookup_read_counts_0[i + 1] = point_table_read_counts[0][i]; + polys.lookup_read_counts_1[i + 1] = point_table_read_counts[1][i]; } for (size_t i = 0; i < transcript_state.size(); ++i) { - rows.transcript_accumulator_empty[i] = transcript_state[i].accumulator_empty; - rows.transcript_add[i] = transcript_state[i].q_add; - rows.transcript_mul[i] = transcript_state[i].q_mul; - rows.transcript_eq[i] = transcript_state[i].q_eq; - rows.transcript_reset_accumulator[i] = transcript_state[i].q_reset_accumulator; - rows.transcript_msm_transition[i] = transcript_state[i].msm_transition; - rows.transcript_pc[i] = transcript_state[i].pc; - rows.transcript_msm_count[i] = transcript_state[i].msm_count; - rows.transcript_x[i] = transcript_state[i].base_x; - rows.transcript_y[i] = transcript_state[i].base_y; - rows.transcript_z1[i] = transcript_state[i].z1; - rows.transcript_z2[i] = transcript_state[i].z2; - rows.transcript_z1zero[i] = transcript_state[i].z1_zero; - rows.transcript_z2zero[i] = transcript_state[i].z2_zero; - rows.transcript_op[i] = transcript_state[i].opcode; - rows.transcript_accumulator_x[i] = transcript_state[i].accumulator_x; - rows.transcript_accumulator_y[i] = transcript_state[i].accumulator_y; - rows.transcript_msm_x[i] = transcript_state[i].msm_output_x; - rows.transcript_msm_y[i] = transcript_state[i].msm_output_y; - rows.transcript_collision_check[i] = transcript_state[i].collision_check; + polys.transcript_accumulator_empty[i] = transcript_state[i].accumulator_empty; + polys.transcript_add[i] = transcript_state[i].q_add; + polys.transcript_mul[i] = transcript_state[i].q_mul; + polys.transcript_eq[i] = transcript_state[i].q_eq; + polys.transcript_reset_accumulator[i] = transcript_state[i].q_reset_accumulator; + polys.transcript_msm_transition[i] = transcript_state[i].msm_transition; + polys.transcript_pc[i] = transcript_state[i].pc; + polys.transcript_msm_count[i] = transcript_state[i].msm_count; + polys.transcript_x[i] = transcript_state[i].base_x; + polys.transcript_y[i] = transcript_state[i].base_y; + polys.transcript_z1[i] = transcript_state[i].z1; + polys.transcript_z2[i] = transcript_state[i].z2; + polys.transcript_z1zero[i] = transcript_state[i].z1_zero; + polys.transcript_z2zero[i] = transcript_state[i].z2_zero; + polys.transcript_op[i] = transcript_state[i].opcode; + polys.transcript_accumulator_x[i] = transcript_state[i].accumulator_x; + polys.transcript_accumulator_y[i] = transcript_state[i].accumulator_y; + polys.transcript_msm_x[i] = transcript_state[i].msm_output_x; + polys.transcript_msm_y[i] = transcript_state[i].msm_output_y; + polys.transcript_collision_check[i] = transcript_state[i].collision_check; } // TODO(@zac-williamson) if final opcode resets accumulator, all subsequent "is_accumulator_empty" row values @@ -383,102 +385,101 @@ template class ECCVMCircuitBuilder { // all zero (issue #2217) if (transcript_state[transcript_state.size() - 1].accumulator_empty == 1) { for (size_t i = transcript_state.size(); i < num_rows_pow2; ++i) { - rows.transcript_accumulator_empty[i] = 1; + polys.transcript_accumulator_empty[i] = 1; } } for (size_t i = 0; i < precompute_table_state.size(); ++i) { // first row is always an empty row (to accomodate shifted polynomials which must have 0 as 1st // coefficient). All other rows in the precompute_table_state represent active wnaf gates (i.e. // precompute_select = 1) - rows.precompute_select[i] = (i != 0) ? 1 : 0; - rows.precompute_pc[i] = precompute_table_state[i].pc; - rows.precompute_point_transition[i] = static_cast(precompute_table_state[i].point_transition); - rows.precompute_round[i] = precompute_table_state[i].round; - rows.precompute_scalar_sum[i] = precompute_table_state[i].scalar_sum; - - rows.precompute_s1hi[i] = precompute_table_state[i].s1; - rows.precompute_s1lo[i] = precompute_table_state[i].s2; - rows.precompute_s2hi[i] = precompute_table_state[i].s3; - rows.precompute_s2lo[i] = precompute_table_state[i].s4; - rows.precompute_s3hi[i] = precompute_table_state[i].s5; - rows.precompute_s3lo[i] = precompute_table_state[i].s6; - rows.precompute_s4hi[i] = precompute_table_state[i].s7; - rows.precompute_s4lo[i] = precompute_table_state[i].s8; + polys.precompute_select[i] = (i != 0) ? 1 : 0; + polys.precompute_pc[i] = precompute_table_state[i].pc; + polys.precompute_point_transition[i] = static_cast(precompute_table_state[i].point_transition); + polys.precompute_round[i] = precompute_table_state[i].round; + polys.precompute_scalar_sum[i] = precompute_table_state[i].scalar_sum; + + polys.precompute_s1hi[i] = precompute_table_state[i].s1; + polys.precompute_s1lo[i] = precompute_table_state[i].s2; + polys.precompute_s2hi[i] = precompute_table_state[i].s3; + polys.precompute_s2lo[i] = precompute_table_state[i].s4; + polys.precompute_s3hi[i] = precompute_table_state[i].s5; + polys.precompute_s3lo[i] = precompute_table_state[i].s6; + polys.precompute_s4hi[i] = precompute_table_state[i].s7; + polys.precompute_s4lo[i] = precompute_table_state[i].s8; // If skew is active (i.e. we need to subtract a base point from the msm result), // write `7` into rows.precompute_skew. `7`, in binary representation, equals `-1` when converted into WNAF // form - rows.precompute_skew[i] = precompute_table_state[i].skew ? 7 : 0; + polys.precompute_skew[i] = precompute_table_state[i].skew ? 7 : 0; - rows.precompute_dx[i] = precompute_table_state[i].precompute_double.x; - rows.precompute_dy[i] = precompute_table_state[i].precompute_double.y; - rows.precompute_tx[i] = precompute_table_state[i].precompute_accumulator.x; - rows.precompute_ty[i] = precompute_table_state[i].precompute_accumulator.y; + polys.precompute_dx[i] = precompute_table_state[i].precompute_double.x; + polys.precompute_dy[i] = precompute_table_state[i].precompute_double.y; + polys.precompute_tx[i] = precompute_table_state[i].precompute_accumulator.x; + polys.precompute_ty[i] = precompute_table_state[i].precompute_accumulator.y; } for (size_t i = 0; i < msm_state.size(); ++i) { - rows.msm_transition[i] = static_cast(msm_state[i].msm_transition); - rows.msm_add[i] = static_cast(msm_state[i].q_add); - rows.msm_double[i] = static_cast(msm_state[i].q_double); - rows.msm_skew[i] = static_cast(msm_state[i].q_skew); - rows.msm_accumulator_x[i] = msm_state[i].accumulator_x; - rows.msm_accumulator_y[i] = msm_state[i].accumulator_y; - rows.msm_pc[i] = msm_state[i].pc; - rows.msm_size_of_msm[i] = msm_state[i].msm_size; - rows.msm_count[i] = msm_state[i].msm_count; - rows.msm_round[i] = msm_state[i].msm_round; - rows.msm_add1[i] = static_cast(msm_state[i].add_state[0].add); - rows.msm_add2[i] = static_cast(msm_state[i].add_state[1].add); - rows.msm_add3[i] = static_cast(msm_state[i].add_state[2].add); - rows.msm_add4[i] = static_cast(msm_state[i].add_state[3].add); - rows.msm_x1[i] = msm_state[i].add_state[0].point.x; - rows.msm_y1[i] = msm_state[i].add_state[0].point.y; - rows.msm_x2[i] = msm_state[i].add_state[1].point.x; - rows.msm_y2[i] = msm_state[i].add_state[1].point.y; - rows.msm_x3[i] = msm_state[i].add_state[2].point.x; - rows.msm_y3[i] = msm_state[i].add_state[2].point.y; - rows.msm_x4[i] = msm_state[i].add_state[3].point.x; - rows.msm_y4[i] = msm_state[i].add_state[3].point.y; - rows.msm_collision_x1[i] = msm_state[i].add_state[0].collision_inverse; - rows.msm_collision_x2[i] = msm_state[i].add_state[1].collision_inverse; - rows.msm_collision_x3[i] = msm_state[i].add_state[2].collision_inverse; - rows.msm_collision_x4[i] = msm_state[i].add_state[3].collision_inverse; - rows.msm_lambda1[i] = msm_state[i].add_state[0].lambda; - rows.msm_lambda2[i] = msm_state[i].add_state[1].lambda; - rows.msm_lambda3[i] = msm_state[i].add_state[2].lambda; - rows.msm_lambda4[i] = msm_state[i].add_state[3].lambda; - rows.msm_slice1[i] = msm_state[i].add_state[0].slice; - rows.msm_slice2[i] = msm_state[i].add_state[1].slice; - rows.msm_slice3[i] = msm_state[i].add_state[2].slice; - rows.msm_slice4[i] = msm_state[i].add_state[3].slice; + polys.msm_transition[i] = static_cast(msm_state[i].msm_transition); + polys.msm_add[i] = static_cast(msm_state[i].q_add); + polys.msm_double[i] = static_cast(msm_state[i].q_double); + polys.msm_skew[i] = static_cast(msm_state[i].q_skew); + polys.msm_accumulator_x[i] = msm_state[i].accumulator_x; + polys.msm_accumulator_y[i] = msm_state[i].accumulator_y; + polys.msm_pc[i] = msm_state[i].pc; + polys.msm_size_of_msm[i] = msm_state[i].msm_size; + polys.msm_count[i] = msm_state[i].msm_count; + polys.msm_round[i] = msm_state[i].msm_round; + polys.msm_add1[i] = static_cast(msm_state[i].add_state[0].add); + polys.msm_add2[i] = static_cast(msm_state[i].add_state[1].add); + polys.msm_add3[i] = static_cast(msm_state[i].add_state[2].add); + polys.msm_add4[i] = static_cast(msm_state[i].add_state[3].add); + polys.msm_x1[i] = msm_state[i].add_state[0].point.x; + polys.msm_y1[i] = msm_state[i].add_state[0].point.y; + polys.msm_x2[i] = msm_state[i].add_state[1].point.x; + polys.msm_y2[i] = msm_state[i].add_state[1].point.y; + polys.msm_x3[i] = msm_state[i].add_state[2].point.x; + polys.msm_y3[i] = msm_state[i].add_state[2].point.y; + polys.msm_x4[i] = msm_state[i].add_state[3].point.x; + polys.msm_y4[i] = msm_state[i].add_state[3].point.y; + polys.msm_collision_x1[i] = msm_state[i].add_state[0].collision_inverse; + polys.msm_collision_x2[i] = msm_state[i].add_state[1].collision_inverse; + polys.msm_collision_x3[i] = msm_state[i].add_state[2].collision_inverse; + polys.msm_collision_x4[i] = msm_state[i].add_state[3].collision_inverse; + polys.msm_lambda1[i] = msm_state[i].add_state[0].lambda; + polys.msm_lambda2[i] = msm_state[i].add_state[1].lambda; + polys.msm_lambda3[i] = msm_state[i].add_state[2].lambda; + polys.msm_lambda4[i] = msm_state[i].add_state[3].lambda; + polys.msm_slice1[i] = msm_state[i].add_state[0].slice; + polys.msm_slice2[i] = msm_state[i].add_state[1].slice; + polys.msm_slice3[i] = msm_state[i].add_state[2].slice; + polys.msm_slice4[i] = msm_state[i].add_state[3].slice; } - rows.transcript_mul_shift = typename Flavor::Polynomial(rows.transcript_mul.shifted()); - rows.transcript_msm_count_shift = typename Flavor::Polynomial(rows.transcript_msm_count.shifted()); - rows.transcript_accumulator_x_shift = typename Flavor::Polynomial(rows.transcript_accumulator_x.shifted()); - rows.transcript_accumulator_y_shift = typename Flavor::Polynomial(rows.transcript_accumulator_y.shifted()); - rows.precompute_scalar_sum_shift = typename Flavor::Polynomial(rows.precompute_scalar_sum.shifted()); - rows.precompute_s1hi_shift = typename Flavor::Polynomial(rows.precompute_s1hi.shifted()); - rows.precompute_dx_shift = typename Flavor::Polynomial(rows.precompute_dx.shifted()); - rows.precompute_dy_shift = typename Flavor::Polynomial(rows.precompute_dy.shifted()); - rows.precompute_tx_shift = typename Flavor::Polynomial(rows.precompute_tx.shifted()); - rows.precompute_ty_shift = typename Flavor::Polynomial(rows.precompute_ty.shifted()); - rows.msm_transition_shift = typename Flavor::Polynomial(rows.msm_transition.shifted()); - rows.msm_add_shift = typename Flavor::Polynomial(rows.msm_add.shifted()); - rows.msm_double_shift = typename Flavor::Polynomial(rows.msm_double.shifted()); - rows.msm_skew_shift = typename Flavor::Polynomial(rows.msm_skew.shifted()); - rows.msm_accumulator_x_shift = typename Flavor::Polynomial(rows.msm_accumulator_x.shifted()); - rows.msm_accumulator_y_shift = typename Flavor::Polynomial(rows.msm_accumulator_y.shifted()); - rows.msm_count_shift = typename Flavor::Polynomial(rows.msm_count.shifted()); - rows.msm_round_shift = typename Flavor::Polynomial(rows.msm_round.shifted()); - rows.msm_add1_shift = typename Flavor::Polynomial(rows.msm_add1.shifted()); - rows.msm_pc_shift = typename Flavor::Polynomial(rows.msm_pc.shifted()); - rows.precompute_pc_shift = typename Flavor::Polynomial(rows.precompute_pc.shifted()); - rows.transcript_pc_shift = typename Flavor::Polynomial(rows.transcript_pc.shifted()); - rows.precompute_round_shift = typename Flavor::Polynomial(rows.precompute_round.shifted()); - rows.transcript_accumulator_empty_shift = - typename Flavor::Polynomial(rows.transcript_accumulator_empty.shifted()); - rows.precompute_select_shift = typename Flavor::Polynomial(rows.precompute_select.shifted()); - return rows; + polys.transcript_mul_shift = Polynomial(polys.transcript_mul.shifted()); + polys.transcript_msm_count_shift = Polynomial(polys.transcript_msm_count.shifted()); + polys.transcript_accumulator_x_shift = Polynomial(polys.transcript_accumulator_x.shifted()); + polys.transcript_accumulator_y_shift = Polynomial(polys.transcript_accumulator_y.shifted()); + polys.precompute_scalar_sum_shift = Polynomial(polys.precompute_scalar_sum.shifted()); + polys.precompute_s1hi_shift = Polynomial(polys.precompute_s1hi.shifted()); + polys.precompute_dx_shift = Polynomial(polys.precompute_dx.shifted()); + polys.precompute_dy_shift = Polynomial(polys.precompute_dy.shifted()); + polys.precompute_tx_shift = Polynomial(polys.precompute_tx.shifted()); + polys.precompute_ty_shift = Polynomial(polys.precompute_ty.shifted()); + polys.msm_transition_shift = Polynomial(polys.msm_transition.shifted()); + polys.msm_add_shift = Polynomial(polys.msm_add.shifted()); + polys.msm_double_shift = Polynomial(polys.msm_double.shifted()); + polys.msm_skew_shift = Polynomial(polys.msm_skew.shifted()); + polys.msm_accumulator_x_shift = Polynomial(polys.msm_accumulator_x.shifted()); + polys.msm_accumulator_y_shift = Polynomial(polys.msm_accumulator_y.shifted()); + polys.msm_count_shift = Polynomial(polys.msm_count.shifted()); + polys.msm_round_shift = Polynomial(polys.msm_round.shifted()); + polys.msm_add1_shift = Polynomial(polys.msm_add1.shifted()); + polys.msm_pc_shift = Polynomial(polys.msm_pc.shifted()); + polys.precompute_pc_shift = Polynomial(polys.precompute_pc.shifted()); + polys.transcript_pc_shift = Polynomial(polys.transcript_pc.shifted()); + polys.precompute_round_shift = Polynomial(polys.precompute_round.shifted()); + polys.transcript_accumulator_empty_shift = Polynomial(polys.transcript_accumulator_empty.shifted()); + polys.precompute_select_shift = Polynomial(polys.precompute_select.shifted()); + return polys; } bool check_circuit() @@ -501,31 +502,26 @@ template class ECCVMCircuitBuilder { .eccvm_set_permutation_delta = eccvm_set_permutation_delta, }; - auto rows = compute_full_polynomials(); - const size_t num_rows = rows[0].size(); + auto polynomials = compute_polynomials(); + const size_t num_rows = polynomials[0].size(); proof_system::honk::lookup_library::compute_logderivative_inverse>( - rows, params, num_rows); + polynomials, params, num_rows); honk::permutation_library::compute_permutation_grand_product>( - num_rows, rows, params); + num_rows, polynomials, params); - rows.z_perm_shift = typename Flavor::Polynomial(rows.z_perm.shifted()); + polynomials.z_perm_shift = Polynomial(polynomials.z_perm.shifted()); const auto evaluate_relation = [&](const std::string& relation_name) { - auto relation = Relation(); - typename Relation::RelationValues result; + typename Relation::ArrayOfValuesOverSubrelations result; for (auto& r : result) { r = 0; } constexpr size_t NUM_SUBRELATIONS = result.size(); for (size_t i = 0; i < num_rows; ++i) { - typename Flavor::RowPolynomials row; - for (size_t j = 0; j < NUM_POLYNOMIALS; ++j) { - row[j] = rows[j][i]; - } - relation.add_full_relation_value_contribution(result, row, params, 1); + Relation::accumulate(result, polynomials.get_row(i), params, 1); bool x = true; for (size_t j = 0; j < NUM_SUBRELATIONS; ++j) { @@ -553,19 +549,13 @@ template class ECCVMCircuitBuilder { result = result && evaluate_relation.template operator()>("ECCVMSetRelation"); - auto lookup_relation = honk::sumcheck::ECCVMLookupRelation(); - typename honk::sumcheck::ECCVMLookupRelation::RelationValues lookup_result; + using LookupRelation = honk::sumcheck::ECCVMLookupRelation; + typename honk::sumcheck::ECCVMLookupRelation::ArrayOfValuesOverSubrelations lookup_result; for (auto& r : lookup_result) { r = 0; } for (size_t i = 0; i < num_rows; ++i) { - typename Flavor::RowPolynomials row; - for (size_t j = 0; j < NUM_POLYNOMIALS; ++j) { - row[j] = rows[j][i]; - } - { - lookup_relation.add_full_relation_value_contribution(lookup_result, row, params, 1); - } + LookupRelation::accumulate(lookup_result, polynomials.get_row(i), params, 1); } for (auto r : lookup_result) { if (r != 0) { diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.cpp index 445e10cfd495..9384ef1f86db 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.cpp @@ -9,42 +9,85 @@ */ #include "goblin_translator_circuit_builder.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" +#include "barretenberg/plonk/proof_system/constants.hpp" +#include "barretenberg/proof_system/op_queue/ecc_op_queue.hpp" +#include namespace proof_system { +using ECCVMOperation = ECCOpQueue::ECCVMOperation; + +/** + * @brief Given the transcript values from the EccOpQueue, the values of the previous accumulator, batching challenge + * and input x, compute witness for one step of accumulation + * + * @tparam Fq + * @tparam Fr + * @param op_code Opcode value + * @param p_x_lo Low 136 bits of P.x + * @param p_x_hi High 118 bits of P.x + * @param p_y_lo Low 136 bits of P.y + * @param p_y_hi High 118 bits of P.y + * @param z1 z1 scalar + * @param z2 z2 scalar + * @param previous_accumulator The value of the previous accumulator (we assume standard decomposition into limbs) + * @param batching_challenge_v The value of the challenge for batching polynomial evaluations + * @param evaluation_input_x The value at which we evaluate the polynomials + * @return GoblinTranslatorCircuitBuilder::AccumulationInput + */ template -GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - Fr op_code, Fr p_x_lo, Fr p_x_hi, Fr p_y_lo, Fr p_y_hi, Fr z_1, Fr z_2, Fq previous_accumulator, Fq v, Fq x) +GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values(Fr op_code, + Fr p_x_lo, + Fr p_x_hi, + Fr p_y_lo, + Fr p_y_hi, + Fr z1, + Fr z2, + Fq previous_accumulator, + Fq batching_challenge_v, + Fq evaluation_input_x) { + // All parameters are well-described in the header, this is just for convenience constexpr size_t NUM_LIMB_BITS = GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS; + constexpr size_t NUM_BINARY_LIMBS = GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; + constexpr size_t NUM_MICRO_LIMBS = GoblinTranslatorCircuitBuilder::NUM_MICRO_LIMBS; + constexpr size_t NUM_LAST_LIMB_BITS = GoblinTranslatorCircuitBuilder::NUM_LAST_LIMB_BITS; constexpr size_t MICRO_LIMB_BITS = GoblinTranslatorCircuitBuilder::MICRO_LIMB_BITS; + constexpr size_t TOP_STANDARD_MICROLIMB_BITS = NUM_LAST_LIMB_BITS % MICRO_LIMB_BITS; + constexpr size_t NUM_Z_BITS = GoblinTranslatorCircuitBuilder::NUM_Z_BITS; + constexpr size_t TOP_Z_MICROLIMB_BITS = (NUM_Z_BITS % NUM_LIMB_BITS) % MICRO_LIMB_BITS; + constexpr size_t TOP_QUOTIENT_MICROLIMB_BITS = + (GoblinTranslatorCircuitBuilder::NUM_QUOTIENT_BITS % NUM_LIMB_BITS) % MICRO_LIMB_BITS; constexpr auto shift_1 = GoblinTranslatorCircuitBuilder::SHIFT_1; - constexpr auto shift_2 = GoblinTranslatorCircuitBuilder::SHIFT_2; - // constexpr auto modulus_u512 = GoblinTranslatorCircuitBuilder::MODULUS_U512; constexpr auto neg_modulus_limbs = GoblinTranslatorCircuitBuilder::NEGATIVE_MODULUS_LIMBS; constexpr auto shift_2_inverse = GoblinTranslatorCircuitBuilder::SHIFT_2_INVERSE; /** - * @brief A small function to transform a native element Fq into its bigfield representation in Fr scalars + * @brief A small function to transform a native element Fq into its bigfield representation in Fr scalars + * + * @details We transform Fq into an integer and then split it into 68-bit limbs, then convert them to Fr. * */ - auto base_element_to_bigfield = [](Fq& original) { + auto base_element_to_limbs = [](Fq& original) { uint256_t original_uint = original; - return std::array({ Fr(original_uint.slice(0, NUM_LIMB_BITS)), - Fr(original_uint.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)), - Fr(original_uint.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS)), - Fr(original_uint.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS)), - Fr(original_uint) }); + return std::array({ + Fr(original_uint.slice(0, NUM_LIMB_BITS)), + Fr(original_uint.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)), + Fr(original_uint.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS)), + Fr(original_uint.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS)), + }); }; /** - * @brief A small function to transform a uint512_t element into its bigfield representation in Fr scalars + * @brief A small function to transform a uint512_t element into its 4 68-bit limbs in Fr scalars + * + * @details Split and integer stored in uint512_T into 4 68-bit chunks (we assume that it is lower than 2²⁷²), + * convert to Fr * */ - auto uint512_t_to_bigfield = [&shift_2](uint512_t& original) { - return std::make_tuple(Fr(original.slice(0, NUM_LIMB_BITS).lo), - Fr(original.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS).lo), - Fr(original.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS).lo), - Fr(original.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS).lo), - Fr(original.slice(0, NUM_LIMB_BITS * 2).lo) + - Fr(original.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 4).lo) * shift_2); + auto uint512_t_to_limbs = [](uint512_t& original) { + return std::array{ Fr(original.slice(0, NUM_LIMB_BITS).lo), + Fr(original.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS).lo), + Fr(original.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS).lo), + Fr(original.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS).lo) }; }; /** @@ -52,10 +95,67 @@ GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( * */ auto split_wide_limb_into_2_limbs = [](Fr& wide_limb) { - return std::make_tuple(Fr(uint256_t(wide_limb).slice(0, NUM_LIMB_BITS)), - Fr(uint256_t(wide_limb).slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS))); + return std::array{ + Fr(uint256_t(wide_limb).slice(0, NUM_LIMB_BITS)), + Fr(uint256_t(wide_limb).slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)) + }; }; + /** + * @brief A method to split a full 68-bit limb into 5 14-bit limb and 1 shifted limb for a more secure constraint + * + */ auto split_standard_limb_into_micro_limbs = [](Fr& limb) { + static_assert(MICRO_LIMB_BITS == 14); + return std::array{ + uint256_t(limb).slice(0, MICRO_LIMB_BITS), + uint256_t(limb).slice(MICRO_LIMB_BITS, 2 * MICRO_LIMB_BITS), + uint256_t(limb).slice(2 * MICRO_LIMB_BITS, 3 * MICRO_LIMB_BITS), + uint256_t(limb).slice(3 * MICRO_LIMB_BITS, 4 * MICRO_LIMB_BITS), + uint256_t(limb).slice(4 * MICRO_LIMB_BITS, 5 * MICRO_LIMB_BITS), + uint256_t(limb).slice(4 * MICRO_LIMB_BITS, 5 * MICRO_LIMB_BITS) + << (MICRO_LIMB_BITS - (NUM_LIMB_BITS % MICRO_LIMB_BITS)), + }; + }; + + /** + * @brief A method to split the top 50-bit limb into 4 14-bit limbs and 1 shifted limb for a more secure constraint + * (plus there is 1 extra space for other constraints) + * + */ + auto split_top_limb_into_micro_limbs = [](Fr& limb, size_t last_limb_bits) { + static_assert(MICRO_LIMB_BITS == 14); + return std::array{ uint256_t(limb).slice(0, MICRO_LIMB_BITS), + uint256_t(limb).slice(MICRO_LIMB_BITS, 2 * MICRO_LIMB_BITS), + uint256_t(limb).slice(2 * MICRO_LIMB_BITS, 3 * MICRO_LIMB_BITS), + uint256_t(limb).slice(3 * MICRO_LIMB_BITS, 4 * MICRO_LIMB_BITS), + uint256_t(limb).slice(3 * MICRO_LIMB_BITS, 4 * MICRO_LIMB_BITS) + << (MICRO_LIMB_BITS - (last_limb_bits % MICRO_LIMB_BITS)), + 0 }; + }; + + /** + * @brief A method for splitting the top 60-bit z limb into microlimbs (differs from the 68-bit limb by the shift in + * the last limb) + * + */ + auto split_top_z_limb_into_micro_limbs = [](Fr& limb, size_t last_limb_bits) { + static_assert(MICRO_LIMB_BITS == 14); + return std::array{ uint256_t(limb).slice(0, MICRO_LIMB_BITS), + uint256_t(limb).slice(MICRO_LIMB_BITS, 2 * MICRO_LIMB_BITS), + uint256_t(limb).slice(2 * MICRO_LIMB_BITS, 3 * MICRO_LIMB_BITS), + uint256_t(limb).slice(3 * MICRO_LIMB_BITS, 4 * MICRO_LIMB_BITS), + uint256_t(limb).slice(4 * MICRO_LIMB_BITS, 5 * MICRO_LIMB_BITS), + uint256_t(limb).slice(4 * MICRO_LIMB_BITS, 5 * MICRO_LIMB_BITS) + << (MICRO_LIMB_BITS - (last_limb_bits % MICRO_LIMB_BITS)) }; + }; + + /** + * @brief Split a 72-bit relation limb into 6 14-bit limbs (we can allow the slack here, since we only need to + * ensure non-overflow of the modulus) + * + */ + auto split_relation_limb_into_micro_limbs = [](Fr& limb) { + static_assert(MICRO_LIMB_BITS == 14); return std::array{ uint256_t(limb).slice(0, MICRO_LIMB_BITS), uint256_t(limb).slice(MICRO_LIMB_BITS, 2 * MICRO_LIMB_BITS), @@ -65,32 +165,32 @@ GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( uint256_t(limb).slice(5 * MICRO_LIMB_BITS, 6 * MICRO_LIMB_BITS), }; }; - // x and powers of v are given to use in challenge form, so the verifier has to deal with this :) + // x and powers of v are given to us in challenge form, so the verifier has to deal with this :) Fq v_squared; Fq v_cubed; Fq v_quarted; - v_squared = v * v; - v_cubed = v_squared * v; - v_quarted = v_cubed * v; + v_squared = batching_challenge_v * batching_challenge_v; + v_cubed = v_squared * batching_challenge_v; + v_quarted = v_cubed * batching_challenge_v; // Convert the accumulator, powers of v and x into "bigfield" form - auto previous_accumulator_witnesses = base_element_to_bigfield(previous_accumulator); - auto v_witnesses = base_element_to_bigfield(v); - auto v_squared_witnesses = base_element_to_bigfield(v_squared); - auto v_cubed_witnesses = base_element_to_bigfield(v_cubed); - auto v_quarted_witnesses = base_element_to_bigfield(v_quarted); - auto x_witnesses = base_element_to_bigfield(x); + auto previous_accumulator_limbs = base_element_to_limbs(previous_accumulator); + auto v_witnesses = base_element_to_limbs(batching_challenge_v); + auto v_squared_witnesses = base_element_to_limbs(v_squared); + auto v_cubed_witnesses = base_element_to_limbs(v_cubed); + auto v_quarted_witnesses = base_element_to_limbs(v_quarted); + auto x_witnesses = base_element_to_limbs(evaluation_input_x); // To calculate the quotient, we need to evaluate the expression in integers. So we need uint512_t versions of all // elements involved auto uint_previous_accumulator = uint512_t(previous_accumulator); - auto uint_x = uint512_t(x); + auto uint_x = uint512_t(evaluation_input_x); auto uint_op = uint512_t(op_code); auto uint_p_x = uint512_t(uint256_t(p_x_lo) + (uint256_t(p_x_hi) << (NUM_LIMB_BITS << 1))); auto uint_p_y = uint512_t(uint256_t(p_y_lo) + (uint256_t(p_y_hi) << (NUM_LIMB_BITS << 1))); - auto uint_z_1 = uint512_t(z_1); - auto uint_z_2 = uint512_t(z_2); - auto uint_v = uint512_t(v); + auto uint_z1 = uint512_t(z1); + auto uint_z2 = uint512_t(z2); + auto uint_v = uint512_t(batching_challenge_v); auto uint_v_squared = uint512_t(v_squared); auto uint_v_cubed = uint512_t(v_cubed); auto uint_v_quarted = uint512_t(v_quarted); @@ -99,140 +199,164 @@ GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( Fq base_op = Fq(uint256_t(op_code)); Fq base_p_x = Fq(uint256_t(p_x_lo) + (uint256_t(p_x_hi) << (NUM_LIMB_BITS << 1))); Fq base_p_y = Fq(uint256_t(p_y_lo) + (uint256_t(p_y_hi) << (NUM_LIMB_BITS << 1))); - Fq base_z_1 = Fq(uint256_t(z_1)); - Fq base_z_2 = Fq(uint256_t(z_2)); + Fq base_z_1 = Fq(uint256_t(z1)); + Fq base_z_2 = Fq(uint256_t(z2)); // Construct bigfield representations of P.x and P.y auto [p_x_0, p_x_1] = split_wide_limb_into_2_limbs(p_x_lo); auto [p_x_2, p_x_3] = split_wide_limb_into_2_limbs(p_x_hi); - Fr p_x_prime = p_x_lo + p_x_hi * Fr(shift_2); - std::array p_x_witnesses = { p_x_0, p_x_1, p_x_2, p_x_3, p_x_prime }; + std::array p_x_limbs = { p_x_0, p_x_1, p_x_2, p_x_3 }; auto [p_y_0, p_y_1] = split_wide_limb_into_2_limbs(p_y_lo); auto [p_y_2, p_y_3] = split_wide_limb_into_2_limbs(p_y_hi); - Fr p_y_prime = p_y_lo + p_y_hi * Fr(shift_2); - std::array p_y_witnesses = { p_y_0, p_y_1, p_y_2, p_y_3, p_y_prime }; + std::array p_y_limbs = { p_y_0, p_y_1, p_y_2, p_y_3 }; // Construct bigfield representations of z1 and z2 only using 2 limbs each - // z_1 and z_2 are low enough to act as their own prime limbs - auto [z_1_lo, z_1_hi] = split_wide_limb_into_2_limbs(z_1); - auto [z_2_lo, z_2_hi] = split_wide_limb_into_2_limbs(z_2); + auto z_1_limbs = split_wide_limb_into_2_limbs(z1); + auto z_2_limbs = split_wide_limb_into_2_limbs(z2); // The formula is `accumulator = accumulator⋅x + (op + v⋅p.x + v²⋅p.y + v³⋅z₁ + v⁴z₂)`. We need to compute the // remainder (new accumulator value) - Fq remainder = previous_accumulator * x + base_z_2 * v_quarted + base_z_1 * v_cubed + base_p_y * v_squared + - base_p_x * v + base_op; - uint512_t quotient_by_modulus = uint_previous_accumulator * uint_x + uint_z_2 * uint_v_quarted + - uint_z_1 * uint_v_cubed + uint_p_y * uint_v_squared + uint_p_x * uint_v + uint_op - + Fq remainder = previous_accumulator * evaluation_input_x + base_z_2 * v_quarted + base_z_1 * v_cubed + + base_p_y * v_squared + base_p_x * batching_challenge_v + base_op; + + // We also need to compute the quotient + uint512_t quotient_by_modulus = uint_previous_accumulator * uint_x + uint_z2 * uint_v_quarted + + uint_z1 * uint_v_cubed + uint_p_y * uint_v_squared + uint_p_x * uint_v + uint_op - uint512_t(remainder); uint512_t quotient = quotient_by_modulus / uint512_t(Fq::modulus); - // constexpr uint512_t MAX_CONSTRAINED_SIZE = uint512_t(1) << 254; - // constexpr uint512_t MAX_Z_SIZE = uint512_t(1) << (NUM_LIMB_BITS * 2); - // numeric::uint1024_t max_quotient = - // (uint1024_t(MAX_CONSTRAINED_SIZE) * MAX_CONSTRAINED_SIZE * 3 + MAX_Z_SIZE * MAX_CONSTRAINED_SIZE * 2 + 4) / - // modulus_u512; - // info("Max quotient: ", max_quotient); - // info("Max quotient range constraint: ", max_quotient.get_msb() + 1); - - auto [remainder_0, remainder_1, remainder_2, remainder_3, remainder_prime] = base_element_to_bigfield(remainder); - std::array remainder_witnesses = { remainder_0, remainder_1, remainder_2, remainder_3, remainder_prime }; - auto [quotient_0, quotient_1, quotient_2, quotient_3, quotient_prime] = uint512_t_to_bigfield(quotient); - std::array quotient_witnesses = { quotient_0, quotient_1, quotient_2, quotient_3, quotient_prime }; + + ASSERT(quotient_by_modulus == (quotient * uint512_t(Fq::modulus))); + + // Compute quotient and remainder bigfield representation + auto remainder_limbs = base_element_to_limbs(remainder); + std::array quotient_limbs = uint512_t_to_limbs(quotient); // We will divide by shift_2 instantly in the relation itself, but first we need to compute the low part (0*0) and - // the high part (0*1, 1*0) multiplied by a signle limb shift - Fr low_wide_relation_limb_part_1 = - previous_accumulator_witnesses[0] * x_witnesses[0] + op_code + v_witnesses[0] * p_x_witnesses[0] + - v_squared_witnesses[0] * p_y_witnesses[0] + v_cubed_witnesses[0] * z_1_lo + v_quarted_witnesses[0] * z_2_lo + - quotient_witnesses[0] * neg_modulus_limbs[0] - remainder_witnesses[0]; // This covers the lowest limb - // info("LW1:", low_wide_relation_limb_part_1); + // the high part (0*1, 1*0) multiplied by a single limb shift + Fr low_wide_relation_limb_part_1 = previous_accumulator_limbs[0] * x_witnesses[0] + op_code + + v_witnesses[0] * p_x_limbs[0] + v_squared_witnesses[0] * p_y_limbs[0] + + v_cubed_witnesses[0] * z_1_limbs[0] + v_quarted_witnesses[0] * z_2_limbs[0] + + quotient_limbs[0] * neg_modulus_limbs[0] - + remainder_limbs[0]; // This covers the lowest limb + Fr low_wide_relation_limb = low_wide_relation_limb_part_1 + - (previous_accumulator_witnesses[1] * x_witnesses[0] + previous_accumulator_witnesses[0] * x_witnesses[1] + - v_witnesses[1] * p_x_witnesses[0] + p_x_witnesses[1] * v_witnesses[0] + - v_squared_witnesses[1] * p_y_witnesses[0] + v_squared_witnesses[0] * p_y_witnesses[1] + - v_cubed_witnesses[1] * z_1_lo + z_1_hi * v_cubed_witnesses[0] + v_quarted_witnesses[1] * z_2_lo + - v_quarted_witnesses[0] * z_2_hi + quotient_witnesses[0] * neg_modulus_limbs[1] + - quotient_witnesses[1] * neg_modulus_limbs[0] - remainder_witnesses[1]) * - shift_1; // And this covers the limb shifted by 68 - // for (auto& limb : quotient_witnesses) { - // info("Q: ", limb); - // } - // Treating accumulator as 254-bit constrained value - // constexpr auto max_limb_size = (uint512_t(1) << NUM_LIMB_BITS) - 1; - // constexpr auto shift_1_u512 = uint512_t(shift_1); - // constexpr auto op_max_size = uint512_t(4); - // constexpr uint512_t low_wide_limb_maximum_value = - // op_max_size + (max_limb_size * max_limb_size) * ((shift_1_u512 * 12) + 6); - // constexpr uint512_t low_wide_limb_maximum_value_constraint = - // (low_wide_limb_maximum_value >> (2 * NUM_LIMB_BITS)).lo + - // uint256_t(uint64_t((low_wide_limb_maximum_value % uint512_t(1) << (2 * NUM_LIMB_BITS)) != 0)); - // constexpr auto low_wide_limb_range_consraint_size = low_wide_limb_maximum_value_constraint.get_msb() + 1; - // info("Low limb range constraint: ", low_wide_limb_range_consraint_size); - // Low bits have to be zero + (previous_accumulator_limbs[1] * x_witnesses[0] + previous_accumulator_limbs[0] * x_witnesses[1] + + v_witnesses[1] * p_x_limbs[0] + p_x_limbs[1] * v_witnesses[0] + v_squared_witnesses[1] * p_y_limbs[0] + + v_squared_witnesses[0] * p_y_limbs[1] + v_cubed_witnesses[1] * z_1_limbs[0] + + z_1_limbs[1] * v_cubed_witnesses[0] + v_quarted_witnesses[1] * z_2_limbs[0] + + v_quarted_witnesses[0] * z_2_limbs[1] + quotient_limbs[0] * neg_modulus_limbs[1] + + quotient_limbs[1] * neg_modulus_limbs[0] - remainder_limbs[1]) * + shift_1; + + // Low bits have to be zero ASSERT(uint256_t(low_wide_relation_limb).slice(0, 2 * NUM_LIMB_BITS) == 0); Fr low_wide_relation_limb_divided = low_wide_relation_limb * shift_2_inverse; - // We need to range constrain the low_wide_relation_limb_divided - // constexpr size_t NUM_LAST_BN254_LIMB_BITS = modulus_u512.get_msb() + 1 - NUM_LIMB_BITS * 3; - - // constexpr auto max_high_limb_size = (uint512_t(1) << NUM_LAST_BN254_LIMB_BITS) - 1; - // constexpr uint512_t high_wide_limb_maximum_value = - // low_wide_limb_maximum_value_constraint + (max_limb_size * max_limb_size) * 16 + - // (max_limb_size * max_limb_size * 10 + max_limb_size * max_high_limb_size * 10) * shift_1_u512; - // constexpr uint512_t high_wide_limb_maximum_value_constraint = - // (high_wide_limb_maximum_value >> (2 * NUM_LIMB_BITS)).lo + - // uint256_t(uint64_t((high_wide_limb_maximum_value % uint512_t(1) << (2 * NUM_LIMB_BITS)) != 0)); - // constexpr auto high_wide_limb_range_constraint_size = high_wide_limb_maximum_value_constraint.get_msb() + 1; - // info(high_wide_limb_range_constraint_size); - // 4 high combinations = 8 ml*ml + 8 ml*last_ml. 2 low combinations = 2*ml*ml + 2*ml*last_ml + + // The high relation limb is the accumulation of the low limb divided by 2¹³⁶ and the combination of limbs with + // indices (0*2,1*1,2*0) with limbs with indices (0*3,1*2,2*1,3*0) multiplied by 2⁶⁸ + Fr high_wide_relation_limb = - low_wide_relation_limb_divided + previous_accumulator_witnesses[2] * x_witnesses[0] + - previous_accumulator_witnesses[1] * x_witnesses[1] + previous_accumulator_witnesses[0] * x_witnesses[2] + - v_witnesses[2] * p_x_witnesses[0] + v_witnesses[1] * p_x_witnesses[1] + v_witnesses[0] * p_x_witnesses[2] + - v_squared_witnesses[2] * p_y_witnesses[0] + v_squared_witnesses[1] * p_y_witnesses[1] + - v_squared_witnesses[0] * p_y_witnesses[2] + v_cubed_witnesses[2] * z_1_lo + v_cubed_witnesses[1] * z_1_hi + - v_quarted_witnesses[2] * z_2_lo + v_quarted_witnesses[1] * z_2_hi + - quotient_witnesses[2] * neg_modulus_limbs[0] + quotient_witnesses[1] * neg_modulus_limbs[1] + - quotient_witnesses[0] * neg_modulus_limbs[2] - remainder_witnesses[2] + - (previous_accumulator_witnesses[3] * x_witnesses[0] + previous_accumulator_witnesses[2] * x_witnesses[1] + - previous_accumulator_witnesses[1] * x_witnesses[2] + previous_accumulator_witnesses[0] * x_witnesses[3] + - v_witnesses[3] * p_x_witnesses[0] + v_witnesses[2] * p_x_witnesses[1] + v_witnesses[1] * p_x_witnesses[2] + - v_witnesses[0] * p_x_witnesses[3] + v_squared_witnesses[3] * p_y_witnesses[0] + - v_squared_witnesses[2] * p_y_witnesses[1] + v_squared_witnesses[1] * p_y_witnesses[2] + - v_squared_witnesses[0] * p_y_witnesses[3] + v_cubed_witnesses[3] * z_1_lo + v_cubed_witnesses[2] * z_1_hi + - v_quarted_witnesses[3] * z_2_lo + v_quarted_witnesses[2] * z_2_hi + - quotient_witnesses[3] * neg_modulus_limbs[0] + quotient_witnesses[2] * neg_modulus_limbs[1] + - quotient_witnesses[1] * neg_modulus_limbs[2] + quotient_witnesses[0] * neg_modulus_limbs[3] - - remainder_witnesses[3]) * + low_wide_relation_limb_divided + previous_accumulator_limbs[2] * x_witnesses[0] + + previous_accumulator_limbs[1] * x_witnesses[1] + previous_accumulator_limbs[0] * x_witnesses[2] + + v_witnesses[2] * p_x_limbs[0] + v_witnesses[1] * p_x_limbs[1] + v_witnesses[0] * p_x_limbs[2] + + v_squared_witnesses[2] * p_y_limbs[0] + v_squared_witnesses[1] * p_y_limbs[1] + + v_squared_witnesses[0] * p_y_limbs[2] + v_cubed_witnesses[2] * z_1_limbs[0] + + v_cubed_witnesses[1] * z_1_limbs[1] + v_quarted_witnesses[2] * z_2_limbs[0] + + v_quarted_witnesses[1] * z_2_limbs[1] + quotient_limbs[2] * neg_modulus_limbs[0] + + quotient_limbs[1] * neg_modulus_limbs[1] + quotient_limbs[0] * neg_modulus_limbs[2] - remainder_limbs[2] + + (previous_accumulator_limbs[3] * x_witnesses[0] + previous_accumulator_limbs[2] * x_witnesses[1] + + previous_accumulator_limbs[1] * x_witnesses[2] + previous_accumulator_limbs[0] * x_witnesses[3] + + v_witnesses[3] * p_x_limbs[0] + v_witnesses[2] * p_x_limbs[1] + v_witnesses[1] * p_x_limbs[2] + + v_witnesses[0] * p_x_limbs[3] + v_squared_witnesses[3] * p_y_limbs[0] + v_squared_witnesses[2] * p_y_limbs[1] + + v_squared_witnesses[1] * p_y_limbs[2] + v_squared_witnesses[0] * p_y_limbs[3] + + v_cubed_witnesses[3] * z_1_limbs[0] + v_cubed_witnesses[2] * z_1_limbs[1] + + v_quarted_witnesses[3] * z_2_limbs[0] + v_quarted_witnesses[2] * z_2_limbs[1] + + quotient_limbs[3] * neg_modulus_limbs[0] + quotient_limbs[2] * neg_modulus_limbs[1] + + quotient_limbs[1] * neg_modulus_limbs[2] + quotient_limbs[0] * neg_modulus_limbs[3] - remainder_limbs[3]) * shift_1; - // info("Value: ", high_wide_relation_limb); - // info("Value: ", high_wide_relation_limb * shift_2_inverse); + + // Check that the results lower 136 bits are zero ASSERT(uint256_t(high_wide_relation_limb).slice(0, 2 * NUM_LIMB_BITS) == 0); + // Get divided version + auto high_wide_relation_limb_divided = high_wide_relation_limb * shift_2_inverse; + + const auto last_limb_index = GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS - 1; + + const auto NUM_Z_LIMBS = GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS; + std::array, NUM_BINARY_LIMBS> P_x_microlimbs; + std::array, NUM_BINARY_LIMBS> P_y_microlimbs; + std::array, NUM_Z_LIMBS> z_1_microlimbs; + std::array, NUM_Z_LIMBS> z_2_microlimbs; + std::array, NUM_BINARY_LIMBS> current_accumulator_microlimbs; + std::array, NUM_BINARY_LIMBS> quotient_microlimbs; + // Split P_x into microlimbs for range constraining + for (size_t i = 0; i < last_limb_index; i++) { + P_x_microlimbs[i] = split_standard_limb_into_micro_limbs(p_x_limbs[i]); + } + P_x_microlimbs[last_limb_index] = + split_top_limb_into_micro_limbs(p_x_limbs[last_limb_index], TOP_STANDARD_MICROLIMB_BITS); + + // Split P_y into microlimbs for range constraining + for (size_t i = 0; i < last_limb_index; i++) { + P_y_microlimbs[i] = split_standard_limb_into_micro_limbs(p_y_limbs[i]); + } + P_y_microlimbs[last_limb_index] = + split_top_limb_into_micro_limbs(p_y_limbs[last_limb_index], TOP_STANDARD_MICROLIMB_BITS); + + // Split z scalars into microlimbs for range constraining + for (size_t i = 0; i < NUM_Z_LIMBS - 1; i++) { + z_1_microlimbs[i] = split_standard_limb_into_micro_limbs(z_1_limbs[i]); + z_2_microlimbs[i] = split_standard_limb_into_micro_limbs(z_2_limbs[i]); + } + z_1_microlimbs[GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS - 1] = split_top_z_limb_into_micro_limbs( + z_1_limbs[GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS - 1], TOP_Z_MICROLIMB_BITS); + z_2_microlimbs[GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS - 1] = split_top_z_limb_into_micro_limbs( + z_2_limbs[GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS - 1], TOP_Z_MICROLIMB_BITS); + + // Split current accumulator into microlimbs for range constraining + for (size_t i = 0; i < last_limb_index; i++) { + current_accumulator_microlimbs[i] = split_standard_limb_into_micro_limbs(remainder_limbs[i]); + } + current_accumulator_microlimbs[last_limb_index] = + split_top_limb_into_micro_limbs(remainder_limbs[last_limb_index], TOP_STANDARD_MICROLIMB_BITS); + + // Split quotient into microlimbs for range constraining + for (size_t i = 0; i < last_limb_index; i++) { + quotient_microlimbs[i] = split_standard_limb_into_micro_limbs(quotient_limbs[i]); + } + quotient_microlimbs[last_limb_index] = + split_top_limb_into_micro_limbs(quotient_limbs[last_limb_index], TOP_QUOTIENT_MICROLIMB_BITS); + + // Start filling the witness container GoblinTranslatorCircuitBuilder::AccumulationInput input{ .op_code = op_code, .P_x_lo = p_x_lo, .P_x_hi = p_x_hi, - .P_x_limbs = p_x_witnesses, - .P_x_microlimbs = {}, + .P_x_limbs = p_x_limbs, + .P_x_microlimbs = P_x_microlimbs, .P_y_lo = p_y_lo, .P_y_hi = p_y_hi, - .P_y_limbs = p_y_witnesses, - .P_y_microlimbs = {}, - .z_1 = z_1, - .z_1_limbs = { z_1_lo, z_1_hi }, - .z_1_microlimbs = {}, - .z_2 = z_2, - .z_2_limbs = { z_2_lo, z_2_hi }, - .z_2_microlimbs = {}, - .previous_accumulator = previous_accumulator_witnesses, - .current_accumulator = remainder_witnesses, - .current_accumulator_microlimbs = {}, - .quotient_binary_limbs = quotient_witnesses, - .quotient_microlimbs = {}, - .relation_wide_limbs = { low_wide_relation_limb_divided, high_wide_relation_limb * shift_2_inverse }, + .P_y_limbs = p_y_limbs, + .P_y_microlimbs = P_y_microlimbs, + .z_1 = z1, + .z_1_limbs = z_1_limbs, + .z_1_microlimbs = z_1_microlimbs, + .z_2 = z2, + .z_2_limbs = z_2_limbs, + .z_2_microlimbs = z_2_microlimbs, + .previous_accumulator = previous_accumulator_limbs, + .current_accumulator = remainder_limbs, + .current_accumulator_microlimbs = current_accumulator_microlimbs, + .quotient_binary_limbs = quotient_limbs, + .quotient_microlimbs = quotient_microlimbs, + .relation_wide_limbs = { low_wide_relation_limb_divided, high_wide_relation_limb_divided }, + .relation_wide_microlimbs = { split_relation_limb_into_micro_limbs(low_wide_relation_limb_divided), + split_relation_limb_into_micro_limbs(high_wide_relation_limb_divided) }, .x_limbs = x_witnesses, .v_limbs = v_witnesses, .v_squared_limbs = v_squared_witnesses, @@ -240,36 +364,715 @@ GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( .v_quarted_limbs = v_quarted_witnesses, }; - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.P_x_microlimbs[i] = split_standard_limb_into_micro_limbs(input.P_x_limbs[i]); - } - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.P_y_microlimbs[i] = split_standard_limb_into_micro_limbs(input.P_y_limbs[i]); - } - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_Z_LIMBS; i++) { - input.z_1_microlimbs[i] = split_standard_limb_into_micro_limbs(input.z_1_limbs[i]); - input.z_2_microlimbs[i] = split_standard_limb_into_micro_limbs(input.z_2_limbs[i]); + return input; +} +/** + * @brief Create a single accumulation gate + * + * @param acc_step + */ +void GoblinTranslatorCircuitBuilder::create_accumulation_gate(const AccumulationInput acc_step) +{ + // The first wires OpQueue/Transcript wires + // Opcode should be {0,1,2,3,4,8} + ASSERT(acc_step.op_code == 0 || acc_step.op_code == 1 || acc_step.op_code == 2 || acc_step.op_code == 3 || + acc_step.op_code == 4 || acc_step.op_code == 8); + + auto& op_wire = std::get(wires); + op_wire.push_back(add_variable(acc_step.op_code)); + // Every second op value in the transcript (indices 3, 5, etc) are not defined so let's just put zero there + op_wire.push_back(zero_idx); + + /** + * @brief Insert two values into the same wire sequentially + * + */ + auto insert_pair_into_wire = [this](WireIds wire_index, Fr first, Fr second) { + auto& current_wire = wires[wire_index]; + current_wire.push_back(add_variable(first)); + current_wire.push_back(add_variable(second)); + }; + + // Check and insert P_x_lo and P_y_hi into wire 1 + ASSERT(uint256_t(acc_step.P_x_lo) <= MAX_LOW_WIDE_LIMB_SIZE); + ASSERT(uint256_t(acc_step.P_y_hi) <= MAX_HIGH_WIDE_LIMB_SIZE); + insert_pair_into_wire(WireIds::X_LOW_Y_HI, acc_step.P_x_lo, acc_step.P_y_hi); + + // Check and insert P_x_hi and z_1 into wire 2 + ASSERT(uint256_t(acc_step.P_x_hi) <= MAX_HIGH_WIDE_LIMB_SIZE); + ASSERT(uint256_t(acc_step.z_1) <= MAX_LOW_WIDE_LIMB_SIZE); + insert_pair_into_wire(WireIds::X_HIGH_Z_1, acc_step.P_x_hi, acc_step.z_1); + + // Check and insert P_y_lo and z_2 into wire 3 + ASSERT(uint256_t(acc_step.P_y_lo) <= MAX_LOW_WIDE_LIMB_SIZE); + ASSERT(uint256_t(acc_step.z_2) <= MAX_LOW_WIDE_LIMB_SIZE); + insert_pair_into_wire(WireIds::Y_LOW_Z_2, acc_step.P_y_lo, acc_step.z_2); + + // Check decomposition of values from the Queue into limbs used in bigfield evaluations + ASSERT(acc_step.P_x_lo == (acc_step.P_x_limbs[0] + acc_step.P_x_limbs[1] * SHIFT_1)); + ASSERT(acc_step.P_x_hi == (acc_step.P_x_limbs[2] + acc_step.P_x_limbs[3] * SHIFT_1)); + ASSERT(acc_step.P_y_lo == (acc_step.P_y_limbs[0] + acc_step.P_y_limbs[1] * SHIFT_1)); + ASSERT(acc_step.P_y_hi == (acc_step.P_y_limbs[2] + acc_step.P_y_limbs[3] * SHIFT_1)); + ASSERT(acc_step.z_1 == (acc_step.z_1_limbs[0] + acc_step.z_1_limbs[1] * SHIFT_1)); + ASSERT(acc_step.z_2 == (acc_step.z_2_limbs[0] + acc_step.z_2_limbs[1] * SHIFT_1)); + + /** + * @brief Check correctness of limbs values + * + */ + auto check_binary_limbs_maximum_values = [](const std::array& limbs, + const uint256_t& MAX_LAST_LIMB = + (uint256_t(1) << NUM_LAST_LIMB_BITS)) { + for (size_t i = 0; i < total_limbs - 1; i++) { + ASSERT(uint256_t(limbs[i]) < SHIFT_1); + } + ASSERT(uint256_t(limbs[total_limbs - 1]) < MAX_LAST_LIMB); + }; + /** + * @brief Check correctness of values for range constraint limbs + * + */ + auto check_micro_limbs_maximum_values = + []( + const std::array, binary_limb_count>& limbs) { + for (size_t i = 0; i < binary_limb_count; i++) { + for (size_t j = 0; j < micro_limb_count; j++) { + ASSERT(uint256_t(limbs[i][j]) < MICRO_SHIFT); + } + } + }; + + const auto MAX_Z_LAST_LIMB = uint256_t(1) << (NUM_Z_BITS - NUM_LIMB_BITS); + const auto MAX_QUOTIENT_LAST_LIMB = uint256_t(1) << (NUM_LAST_QUOTIENT_LIMB_BITS); + // Check limb values are in 68-bit range + check_binary_limbs_maximum_values(acc_step.P_x_limbs); + check_binary_limbs_maximum_values(acc_step.P_y_limbs); + check_binary_limbs_maximum_values(acc_step.z_1_limbs, /*MAX_LAST_LIMB=*/MAX_Z_LAST_LIMB); + check_binary_limbs_maximum_values(acc_step.z_2_limbs, /*MAX_LAST_LIMB=*/MAX_Z_LAST_LIMB); + check_binary_limbs_maximum_values(acc_step.previous_accumulator); + check_binary_limbs_maximum_values(acc_step.current_accumulator); + check_binary_limbs_maximum_values(acc_step.quotient_binary_limbs, /*MAX_LAST_LIMB=*/MAX_QUOTIENT_LAST_LIMB); + + // Insert limbs used in bigfield evaluations + insert_pair_into_wire(P_X_LOW_LIMBS, acc_step.P_x_limbs[0], acc_step.P_x_limbs[1]); + insert_pair_into_wire(P_X_HIGH_LIMBS, acc_step.P_x_limbs[2], acc_step.P_x_limbs[3]); + insert_pair_into_wire(P_Y_LOW_LIMBS, acc_step.P_y_limbs[0], acc_step.P_y_limbs[1]); + insert_pair_into_wire(P_Y_HIGH_LIMBS, acc_step.P_y_limbs[2], acc_step.P_y_limbs[3]); + insert_pair_into_wire(Z_LOW_LIMBS, acc_step.z_1_limbs[0], acc_step.z_2_limbs[0]); + insert_pair_into_wire(Z_HIGH_LIMBS, acc_step.z_1_limbs[1], acc_step.z_2_limbs[1]); + insert_pair_into_wire( + QUOTIENT_LOW_BINARY_LIMBS, acc_step.quotient_binary_limbs[0], acc_step.quotient_binary_limbs[1]); + insert_pair_into_wire( + QUOTIENT_HIGH_BINARY_LIMBS, acc_step.quotient_binary_limbs[2], acc_step.quotient_binary_limbs[3]); + insert_pair_into_wire(RELATION_WIDE_LIMBS, acc_step.relation_wide_limbs[0], acc_step.relation_wide_limbs[1]); + + // Check limbs used in range constraints are in range + check_micro_limbs_maximum_values(acc_step.P_x_microlimbs); + check_micro_limbs_maximum_values(acc_step.P_y_microlimbs); + check_micro_limbs_maximum_values(acc_step.z_1_microlimbs); + check_micro_limbs_maximum_values(acc_step.z_2_microlimbs); + check_micro_limbs_maximum_values(acc_step.current_accumulator_microlimbs); + + // Check that relation limbs are in range + ASSERT(uint256_t(acc_step.relation_wide_limbs[0]) < MAX_RELATION_WIDE_LIMB_SIZE); + ASSERT(uint256_t(acc_step.relation_wide_limbs[1]) < MAX_RELATION_WIDE_LIMB_SIZE); + + /** + * @brief Put several values in sequential wires + * + */ + auto lay_limbs_in_row = + [this](std::array input, WireIds starting_wire, size_t number_of_elements) { + ASSERT(number_of_elements <= array_size); + for (size_t i = 0; i < number_of_elements; i++) { + wires[starting_wire + i].push_back(add_variable(input[i])); + } + }; + + // We are using some leftover crevices for relation_wide_microlimbs + auto low_relation_microlimbs = acc_step.relation_wide_microlimbs[0]; + auto high_relation_microlimbs = acc_step.relation_wide_microlimbs[1]; + + // We have 4 wires specifically for the relation microlimbs + insert_pair_into_wire( + RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_0, low_relation_microlimbs[0], high_relation_microlimbs[0]); + insert_pair_into_wire( + RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_1, low_relation_microlimbs[1], high_relation_microlimbs[1]); + insert_pair_into_wire( + RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_2, low_relation_microlimbs[2], high_relation_microlimbs[2]); + insert_pair_into_wire( + RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_3, low_relation_microlimbs[3], high_relation_microlimbs[3]); + + // Next ones go into top P_x and P_y, current accumulator and quotient unused microlimbs + + // Insert the second highest low relation microlimb into the space left in P_x range constraints highest wire + auto top_p_x_microlimbs = acc_step.P_x_microlimbs[NUM_BINARY_LIMBS - 1]; + top_p_x_microlimbs[NUM_MICRO_LIMBS - 1] = low_relation_microlimbs[NUM_MICRO_LIMBS - 2]; + + // Insert the second highest high relation microlimb into the space left in P_y range constraints highest wire + auto top_p_y_microlimbs = acc_step.P_y_microlimbs[NUM_BINARY_LIMBS - 1]; + top_p_y_microlimbs[NUM_MICRO_LIMBS - 1] = high_relation_microlimbs[NUM_MICRO_LIMBS - 2]; + + // The highest low relation microlimb goes into the crevice left in current accumulator microlimbs + auto top_current_accumulator_microlimbs = acc_step.current_accumulator_microlimbs[NUM_BINARY_LIMBS - 1]; + top_current_accumulator_microlimbs[NUM_MICRO_LIMBS - 1] = low_relation_microlimbs[NUM_MICRO_LIMBS - 1]; + + // The highest high relation microlimb goes into the quotient crevice + auto top_quotient_microlimbs = acc_step.quotient_microlimbs[NUM_BINARY_LIMBS - 1]; + top_quotient_microlimbs[NUM_MICRO_LIMBS - 1] = high_relation_microlimbs[NUM_MICRO_LIMBS - 1]; + + // Now put all microlimbs into appropriate wires + lay_limbs_in_row(acc_step.P_x_microlimbs[0], P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.P_x_microlimbs[1], P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.P_x_microlimbs[2], P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(top_p_x_microlimbs, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.P_y_microlimbs[0], P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.P_y_microlimbs[1], P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.P_y_microlimbs[2], P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(top_p_y_microlimbs, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.z_1_microlimbs[0], Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.z_2_microlimbs[0], Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.z_1_microlimbs[1], Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.z_2_microlimbs[1], Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.current_accumulator, ACCUMULATORS_BINARY_LIMBS_0, NUM_BINARY_LIMBS); + lay_limbs_in_row(acc_step.previous_accumulator, ACCUMULATORS_BINARY_LIMBS_0, NUM_BINARY_LIMBS); + lay_limbs_in_row( + acc_step.current_accumulator_microlimbs[0], ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row( + acc_step.current_accumulator_microlimbs[1], ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row( + acc_step.current_accumulator_microlimbs[2], ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(top_current_accumulator_microlimbs, ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.quotient_microlimbs[0], QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.quotient_microlimbs[1], QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(acc_step.quotient_microlimbs[2], QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); + lay_limbs_in_row(top_quotient_microlimbs, QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); + + num_gates += 2; + + // Check that all the wires are filled equally + barretenberg::constexpr_for<0, TOTAL_COUNT, 1>([&]() { ASSERT(std::get(wires).size() == num_gates); }); +} + +/** + * @brief Given an ECCVM operation, previous accumulator and necessary challenges, compute witnesses for one + * accumulation + * + * @tparam Fq + * @return GoblinTranslatorCircuitBuilder::AccumulationInput + */ +template +GoblinTranslatorCircuitBuilder::AccumulationInput compute_witness_values_for_one_ecc_op(const ECCVMOperation& ecc_op, + Fq previous_accumulator, + Fq batching_challenge_v, + Fq evaluation_input_x) +{ + using Fr = barretenberg::fr; + + // Get the Opcode value + Fr op(ecc_op.get_opcode_value()); + Fr p_x_lo(0); + Fr p_x_hi(0); + Fr p_y_lo(0); + Fr p_y_hi(0); + + // Split P.x and P.y into their representations in bn254 transcript + p_x_lo = Fr(uint256_t(ecc_op.base_point.x).slice(0, 2 * GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS)); + p_x_hi = Fr(uint256_t(ecc_op.base_point.x) + .slice(2 * GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS, + 4 * GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS)); + p_y_lo = Fr(uint256_t(ecc_op.base_point.y).slice(0, 2 * GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS)); + p_y_hi = Fr(uint256_t(ecc_op.base_point.y) + .slice(2 * GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS, + 4 * GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS)); + + // Generate the full witness values + return generate_witness_values(op, + p_x_lo, + p_x_hi, + p_y_lo, + p_y_hi, + Fr(ecc_op.z1), + Fr(ecc_op.z2), + previous_accumulator, + batching_challenge_v, + evaluation_input_x); +} +void GoblinTranslatorCircuitBuilder::feed_ecc_op_queue_into_circuit(ECCOpQueue& ecc_op_queue) +{ + using Fq = barretenberg::fq; + std::vector accumulator_trace; + Fq current_accumulator(0); + if (ecc_op_queue.raw_ops.empty()) { + return; } - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.current_accumulator_microlimbs[i] = split_standard_limb_into_micro_limbs(input.current_accumulator[i]); - // info("Stored: ", single_accumulation_step.current_accumulator_microlimbs[i][5], " at ", i); + // Rename for ease of use + auto x = evaluation_input_x; + auto v = batching_challenge_v; + + // We need to precompute the accumulators at each step, because in the actual circuit we compute the values starting + // from the later indices. We need to know the previous accumulator to create the gate + for (size_t i = 0; i < ecc_op_queue.raw_ops.size(); i++) { + auto& ecc_op = ecc_op_queue.raw_ops[ecc_op_queue.raw_ops.size() - 1 - i]; + current_accumulator *= x; + current_accumulator += + (Fq(ecc_op.get_opcode_value()) + + v * (ecc_op.base_point.x + v * (ecc_op.base_point.y + v * (ecc_op.z1 + v * ecc_op.z2)))); + accumulator_trace.push_back(current_accumulator); } - for (size_t i = 0; i < GoblinTranslatorCircuitBuilder::NUM_BINARY_LIMBS; i++) { - input.quotient_microlimbs[i] = split_standard_limb_into_micro_limbs(input.quotient_binary_limbs[i]); - // info("Stored: ", single_accumulation_step.current_accumulator_microlimbs[i][5], " at ", i); + + // We don't care about the last value since we'll recompute it during witness generation anyway + accumulator_trace.pop_back(); + + for (auto& raw_op : ecc_op_queue.raw_ops) { + Fq previous_accumulator = 0; + // Pop the last value from accumulator trace and use it as previous accumulator + if (!accumulator_trace.empty()) { + previous_accumulator = accumulator_trace.back(); + accumulator_trace.pop_back(); + } + // Compute witness values + auto one_accumulation_step = compute_witness_values_for_one_ecc_op(raw_op, previous_accumulator, v, x); + + // And put them into the wires + create_accumulation_gate(one_accumulation_step); } - return input; } -template GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - barretenberg::fr op_code, - barretenberg::fr p_x_lo, - barretenberg::fr p_x_hi, - barretenberg::fr p_y_lo, - barretenberg::fr p_y_hi, - barretenberg::fr z_1, - barretenberg::fr z_2, - barretenberg::fq previous_accumulator, - barretenberg::fq v, - barretenberg::fq x); +bool GoblinTranslatorCircuitBuilder::check_circuit() +{ + // Compute the limbs of evaluation_input_x and powers of batching_challenge_v (these go into the relation) + RelationInputs relation_inputs = compute_relation_inputs_limbs(batching_challenge_v, evaluation_input_x); + // Get the main wires (we will operate with range constraint wires mainly through indices, since this is easier) + auto& op_wire = std::get(wires); + auto& x_lo_y_hi_wire = std::get(wires); + auto& x_hi_z_1_wire = std::get(wires); + auto& y_lo_z_2_wire = std::get(wires); + auto& p_x_0_p_x_1_wire = std::get(wires); + auto& p_x_2_p_x_3_wire = std::get(wires); + auto& p_y_0_p_y_1_wire = std::get(wires); + auto& p_y_2_p_y_3_wire = std::get(wires); + auto& z_lo_wire = std::get(wires); + auto& z_hi_wire = std::get(wires); + auto& accumulators_binary_limbs_0_wire = std::get(wires); + auto& accumulators_binary_limbs_1_wire = std::get(wires); + auto& accumulators_binary_limbs_2_wire = std::get(wires); + auto& accumulators_binary_limbs_3_wire = std::get(wires); + auto& quotient_low_binary_limbs = std::get(wires); + auto& quotient_high_binary_limbs = std::get(wires); + auto& relation_wide_limbs_wire = std::get(wires); + auto reconstructed_evaluation_input_x = Fr(uint256_t(evaluation_input_x)); + auto reconstructed_batching_evaluation_v = Fr(uint256_t(batching_challenge_v)); + auto reconstructed_batching_evaluation_v2 = Fr(uint256_t(batching_challenge_v.pow(2))); + auto reconstructed_batching_evaluation_v3 = Fr(uint256_t(batching_challenge_v.pow(3))); + auto reconstructed_batching_evaluation_v4 = Fr(uint256_t(batching_challenge_v.pow(4))); + /** + * @brief Get elements at the same index from several sequential wires and put them into a vector + * + */ + auto get_sequential_micro_chunks = [this](size_t gate_index, WireIds starting_wire_index, size_t chunk_count) { + std::vector chunks; + for (size_t i = starting_wire_index; i < starting_wire_index + chunk_count; i++) { + chunks.push_back(get_variable(wires[i][gate_index])); + } + return chunks; + }; + + /** + * @brief Reconstruct the value of one regular limb used in relation computation from micro chunks used to + * create range constraints + * + * @details We might ant to skip several items at the end, since those will be shifted or used + * for another decomposition + * + */ + auto accumulate_limb_from_micro_chunks = [](const std::vector& chunks, const int skipped_at_end = 1) { + Fr mini_accumulator(0); + auto end = chunks.end(); + std::advance(end, -skipped_at_end); + for (auto it = end; it != chunks.begin();) { + --it; + mini_accumulator = mini_accumulator * MICRO_SHIFT + *it; + } + return mini_accumulator; + }; + /** + * @brief Go through each gate + * + */ + for (size_t i = 1; i < num_gates - 1; i++) { + bool gate_is_odd = i & 1; + // The main relation is computed between odd and the next even indices. For example, 1 and 2 + if (gate_is_odd) { + // Get the values of P.x + Fr op_code = get_variable(op_wire[i]); + Fr p_x_lo = get_variable(x_lo_y_hi_wire[i]); + Fr p_x_hi = get_variable(x_hi_z_1_wire[i]); + Fr p_x_0 = get_variable(p_x_0_p_x_1_wire[i]); + Fr p_x_1 = get_variable(p_x_0_p_x_1_wire[i + 1]); + Fr p_x_2 = get_variable(p_x_2_p_x_3_wire[i]); + Fr p_x_3 = get_variable(p_x_2_p_x_3_wire[i + 1]); + const std::vector p_x_binary_limbs = { p_x_0, p_x_1, p_x_2, p_x_3 }; + + // P.y + Fr p_y_lo = get_variable(y_lo_z_2_wire[i]); + Fr p_y_hi = get_variable(x_lo_y_hi_wire[i + 1]); + Fr p_y_0 = get_variable(p_y_0_p_y_1_wire[i]); + Fr p_y_1 = get_variable(p_y_0_p_y_1_wire[i + 1]); + Fr p_y_2 = get_variable(p_y_2_p_y_3_wire[i]); + Fr p_y_3 = get_variable(p_y_2_p_y_3_wire[i + 1]); + const std::vector p_y_binary_limbs = { p_y_0, p_y_1, p_y_2, p_y_3 }; + // z1, z2 + Fr z_1 = get_variable(x_hi_z_1_wire[i + 1]); + Fr z_2 = get_variable(y_lo_z_2_wire[i + 1]); + + Fr z_1_lo = get_variable(z_lo_wire[i]); + Fr z_2_lo = get_variable(z_lo_wire[i + 1]); + Fr z_1_hi = get_variable(z_hi_wire[i]); + Fr z_2_hi = get_variable(z_hi_wire[i + 1]); + + const std::vector z_1_binary_limbs = { z_1_lo, z_1_hi }; + const std::vector z_2_binary_limbs = { z_2_lo, z_2_hi }; + // Relation limbs + Fr low_wide_relation_limb = get_variable(relation_wide_limbs_wire[i]); + Fr high_wide_relation_limb = get_variable(relation_wide_limbs_wire[i + 1]); + + // Current accumulator (updated value) + const std::vector current_accumulator_binary_limbs = { + get_variable(accumulators_binary_limbs_0_wire[i]), + get_variable(accumulators_binary_limbs_1_wire[i]), + get_variable(accumulators_binary_limbs_2_wire[i]), + get_variable(accumulators_binary_limbs_3_wire[i]), + }; + + // Previous accumulator + const std::vector previous_accumulator_binary_limbs = { + get_variable(accumulators_binary_limbs_0_wire[i + 1]), + get_variable(accumulators_binary_limbs_1_wire[i + 1]), + get_variable(accumulators_binary_limbs_2_wire[i + 1]), + get_variable(accumulators_binary_limbs_3_wire[i + 1]), + }; + + // Quotient + const std::vector quotient_binary_limbs = { + get_variable(quotient_low_binary_limbs[i]), + get_variable(quotient_low_binary_limbs[i + 1]), + get_variable(quotient_high_binary_limbs[i]), + get_variable(quotient_high_binary_limbs[i + 1]), + }; + + // Get micro chunks for checking decomposition and range + auto p_x_micro_chunks = { + get_sequential_micro_chunks(i, P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + auto p_y_micro_chunks = { + get_sequential_micro_chunks(i, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + auto z_1_micro_chunks = { + get_sequential_micro_chunks(i, Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + }; + + auto z_2_micro_chunks = { + + get_sequential_micro_chunks(i + 1, Z_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, Z_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) + }; + + auto current_accumulator_micro_chunks = { + get_sequential_micro_chunks(i, ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), + }; + auto quotient_micro_chunks = { + get_sequential_micro_chunks(i, QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, QUOTIENT_LOW_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i, QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + get_sequential_micro_chunks(i + 1, QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), + }; + + // Lambda for checking the correctness of decomposition of values in the Queue into limbs for checking + // the relation + auto check_wide_limb_into_binary_limb_relation = [](const std::vector& wide_limbs, + const std::vector& binary_limbs) { + ASSERT(wide_limbs.size() * 2 == binary_limbs.size()); + for (size_t i = 0; i < wide_limbs.size(); i++) { + if ((binary_limbs[i * 2] + Fr(SHIFT_1) * binary_limbs[i * 2 + 1]) != wide_limbs[i]) { + return false; + } + } + return true; + }; + // Check that everything has been decomposed correctly + // P.xₗₒ = P.xₗₒ_0 + SHIFT_1 * P.xₗₒ_1 + // P.xₕᵢ = P.xₕᵢ_0 + SHIFT_1 * P.xₕᵢ_1 + // z_1 = z_1ₗₒ + SHIFT_1 * z_1ₕᵢ + // z_2 = z_2ₗₒ + SHIFT_2 * z_1ₕᵢ + if (!(check_wide_limb_into_binary_limb_relation({ p_x_lo, p_x_hi }, p_x_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ p_y_lo, p_y_hi }, p_y_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ z_1 }, z_1_binary_limbs) && + check_wide_limb_into_binary_limb_relation({ z_2 }, z_2_binary_limbs))) { + return false; + } + + enum LimbSeriesType { STANDARD_COORDINATE, Z_SCALAR, QUOTIENT }; + + // Check that limbs have been decomposed into microlimbs correctly + // value = ∑ (2ˡ)ⁱ⋅ chunkᵢ, where 2ˡ is the shift + auto check_micro_limb_decomposition_correctness = [&accumulate_limb_from_micro_chunks]( + const std::vector& binary_limbs, + const std::vector>& micro_limbs, + const LimbSeriesType limb_series_type) { + // Shifts for decompositions + constexpr auto SHIFT_12_TO_14 = Fr(4); + constexpr auto SHIFT_10_TO_14 = Fr(16); + constexpr auto SHIFT_8_TO_14 = Fr(64); + constexpr auto SHIFT_4_TO_14 = Fr(1024); + + ASSERT(binary_limbs.size() == micro_limbs.size()); + // First check that all the microlimbs are properly range constrained + for (auto& micro_limb_series : micro_limbs) { + for (auto& micro_limb : micro_limb_series) { + if (uint256_t(micro_limb) > MAX_MICRO_LIMB_SIZE) { + return false; + } + } + } + // For low limbs the last microlimb is used with the shift, so we skip it when reconstructing + // the limb + const size_t SKIPPED_FOR_LOW_LIMBS = 1; + for (size_t i = 0; i < binary_limbs.size() - 1; i++) { + if (binary_limbs[i] != accumulate_limb_from_micro_chunks(micro_limbs[i], SKIPPED_FOR_LOW_LIMBS)) { + return false; + } + // Check last additional constraint (68->70) + if (micro_limbs[i][NUM_MICRO_LIMBS - 1] != (SHIFT_12_TO_14 * micro_limbs[i][NUM_MICRO_LIMBS - 2])) { + return false; + } + } + + const size_t SKIPPED_FOR_STANDARD = 2; + const size_t SKIPPED_FOR_Z_SCALARS = 1; + const size_t SKIPPED_FOR_QUOTIENT = 2; + switch (limb_series_type) { + case STANDARD_COORDINATE: + // For standard Fq value the highest limb is 50 bits, so we skip the top 2 microlimbs + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_STANDARD)) { + return false; + } + // Check last additional constraint (50->56) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD] != + (SHIFT_8_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_STANDARD - 1])) { + + return false; + } + break; + // For z top limbs we need as many microlimbs as for the low limbs + case Z_SCALAR: + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], + SKIPPED_FOR_Z_SCALARS)) { + return false; + } + // Check last additional constraint (60->70) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS] != + (SHIFT_4_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_Z_SCALARS - 1])) { + return false; + } + break; + // Quotient also doesn't need the top 2 + case QUOTIENT: + if (binary_limbs[binary_limbs.size() - 1] != + accumulate_limb_from_micro_chunks(micro_limbs[binary_limbs.size() - 1], SKIPPED_FOR_QUOTIENT)) { + return false; + } + // Check last additional constraint (52->56) + if (micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT] != + (SHIFT_10_TO_14 * + micro_limbs[binary_limbs.size() - 1][NUM_MICRO_LIMBS - SKIPPED_FOR_QUOTIENT - 1])) { + return false; + } + break; + default: + abort(); + } + + return true; + }; + // Check all micro limb decompositions + if (!check_micro_limb_decomposition_correctness(p_x_binary_limbs, p_x_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(p_y_binary_limbs, p_y_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(z_1_binary_limbs, z_1_micro_chunks, Z_SCALAR)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(z_2_binary_limbs, z_2_micro_chunks, Z_SCALAR)) { + return false; + } + if (!check_micro_limb_decomposition_correctness( + current_accumulator_binary_limbs, current_accumulator_micro_chunks, STANDARD_COORDINATE)) { + return false; + } + if (!check_micro_limb_decomposition_correctness(quotient_binary_limbs, quotient_micro_chunks, QUOTIENT)) { + return false; + } + + // The logic we are trying to enforce is: + // current_accumulator = previous_accumulator ⋅ x + op_code + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ + // mod Fq To ensure this we transform the relation into the form: previous_accumulator ⋅ x + op + P.x ⋅ + // v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - current_accumulator = 0 However, we don't have + // integers. Despite that, we can approximate integers for a certain range, if we know that there will + // not be any overflows. For now we set the range to 2²⁷² ⋅ r. We can evaluate the logic modulo 2²⁷² + // with range constraints and r is native. + // + // previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - + // current_accumulator = 0 => + // 1. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ + quotient ⋅ (-p mod + // 2²⁷²) - current_accumulator = 0 mod 2²⁷² + // 2. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - + // current_accumulator = 0 mod r + // + // The second relation is straightforward and easy to check. The first, not so much. We have to evaluate + // certain bit chunks of the equation and ensure that they are zero. For example, for the lowest limb it + // would be (inclusive ranges): + // + // previous_accumulator[0:67] ⋅ x[0:67] + op + P.x[0:67] ⋅ v[0:67] + P.y[0:67] ⋅ v²[0:67] + z_1[0:67] ⋅ + // v³[0:67] + z_2[0:67] ⋅ v⁴[0:67] + quotient[0:67] ⋅ (-p mod 2²⁷²)[0:67] - current_accumulator[0:67] = + // intermediate_value; (we don't take parts of op, because it's supposed to be between 0 and 3) + // + // We could check that this intermediate_value is equal to 0 mod 2⁶⁸ by dividing it by 2⁶⁸ and + // constraining it. For efficiency, we actually compute wider evaluations for 136 bits, which require us + // to also obtain and shift products of [68:135] by [0:67] and [0:67] by [68:135] bits. + // The result of division goes into the next evaluation (the same as a carry flag would) + // So the lowest wide limb is : (∑everything[0:67]⋅everything[0:67] + + // 2⁶⁸⋅(∑everything[0:67]⋅everything[68:135]))/ 2¹³⁶ + // + // The high is: + // (low_limb + ∑everything[0:67]⋅everything[136:203] + ∑everything[68:135]⋅everything[68:135] + + // 2⁶⁸(∑everything[0:67]⋅everything[204:271] + ∑everything[68:135]⋅everything[136:203])) / 2¹³⁶ + // + // We also limit computation on limbs of op, z_1 and z_2, since we know that op has only the lowest limb + // and z_1 and z_2 have only the two lowest limbs + Fr low_wide_limb_relation_check = + + (previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[0] + op_code + + relation_inputs.v_limbs[0] * p_x_0 + relation_inputs.v_squared_limbs[0] * p_y_0 + + relation_inputs.v_cubed_limbs[0] * z_1_lo + relation_inputs.v_quarted_limbs[0] * z_2_lo + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[0] - current_accumulator_binary_limbs[0]) + + (previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[0] + + relation_inputs.v_limbs[1] * p_x_0 + relation_inputs.v_squared_limbs[1] * p_y_0 + + relation_inputs.v_cubed_limbs[1] * z_1_lo + relation_inputs.v_quarted_limbs[1] * z_2_lo + + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[0] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[1] + + relation_inputs.v_limbs[0] * p_x_1 + relation_inputs.v_squared_limbs[0] * p_y_1 + + relation_inputs.v_cubed_limbs[0] * z_1_hi + relation_inputs.v_quarted_limbs[0] * z_2_hi + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[1] - current_accumulator_binary_limbs[1]) * + Fr(SHIFT_1); + if (low_wide_limb_relation_check != (low_wide_relation_limb * SHIFT_2)) { + return false; + } + Fr high_wide_relation_limb_check = + low_wide_relation_limb + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[0] + + previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[1] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[2] + relation_inputs.v_limbs[2] * p_x_0 + + relation_inputs.v_limbs[1] * p_x_1 + relation_inputs.v_limbs[0] * p_x_2 + + relation_inputs.v_squared_limbs[2] * p_y_0 + relation_inputs.v_squared_limbs[1] * p_y_1 + + relation_inputs.v_squared_limbs[0] * p_y_2 + relation_inputs.v_cubed_limbs[2] * z_1_lo + + relation_inputs.v_cubed_limbs[1] * z_1_hi + relation_inputs.v_quarted_limbs[2] * z_2_lo + + relation_inputs.v_quarted_limbs[1] * z_2_hi + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[0] + + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[1] + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[2] - current_accumulator_binary_limbs[2] + + (previous_accumulator_binary_limbs[3] * relation_inputs.x_limbs[0] + + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[1] + + previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[2] + + previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[3] + + relation_inputs.v_limbs[3] * p_x_0 + relation_inputs.v_limbs[2] * p_x_1 + + relation_inputs.v_limbs[1] * p_x_2 + relation_inputs.v_limbs[0] * p_x_3 + + relation_inputs.v_squared_limbs[3] * p_y_0 + relation_inputs.v_squared_limbs[2] * p_y_1 + + relation_inputs.v_squared_limbs[1] * p_y_2 + relation_inputs.v_squared_limbs[0] * p_y_3 + + relation_inputs.v_cubed_limbs[3] * z_1_lo + relation_inputs.v_cubed_limbs[2] * z_1_hi + + relation_inputs.v_quarted_limbs[3] * z_2_lo + relation_inputs.v_quarted_limbs[2] * z_2_hi + + quotient_binary_limbs[3] * NEGATIVE_MODULUS_LIMBS[0] + + quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[1] + + quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[2] + + quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[3] - current_accumulator_binary_limbs[3]) * + SHIFT_1; + if (high_wide_relation_limb_check != (high_wide_relation_limb * SHIFT_2)) { + return false; + } + // Apart from checking the correctness of the evaluation modulo 2²⁷² we also need to ensure that the + // logic works in our scalar field. For this we reconstruct the scalar field values from individual + // limbs + auto reconstructed_p_x = (p_x_0 + p_x_1 * SHIFT_1 + p_x_2 * SHIFT_2 + p_x_3 * SHIFT_3); + auto reconstructed_p_y = (p_y_0 + p_y_1 * SHIFT_1 + p_y_2 * SHIFT_2 + p_y_3 * SHIFT_3); + auto reconstructed_current_accumulator = + (current_accumulator_binary_limbs[0] + current_accumulator_binary_limbs[1] * SHIFT_1 + + current_accumulator_binary_limbs[2] * SHIFT_2 + current_accumulator_binary_limbs[3] * SHIFT_3); + auto reconstructed_previous_accumulator = + (previous_accumulator_binary_limbs[0] + previous_accumulator_binary_limbs[1] * SHIFT_1 + + previous_accumulator_binary_limbs[2] * SHIFT_2 + previous_accumulator_binary_limbs[3] * SHIFT_3); + + auto reconstructed_z1 = (z_1_lo + z_1_hi * SHIFT_1); + auto reconstructed_z2 = (z_2_lo + z_2_hi * SHIFT_1); + auto reconstructed_quotient = (quotient_binary_limbs[0] + quotient_binary_limbs[1] * SHIFT_1 + + quotient_binary_limbs[2] * SHIFT_2 + quotient_binary_limbs[3] * SHIFT_3); + + // Check the relation + if (!(reconstructed_previous_accumulator * reconstructed_evaluation_input_x + op_code + + reconstructed_p_x * reconstructed_batching_evaluation_v + + reconstructed_p_y * reconstructed_batching_evaluation_v2 + + reconstructed_z1 * reconstructed_batching_evaluation_v3 + + reconstructed_z2 * reconstructed_batching_evaluation_v4 + + reconstructed_quotient * NEGATIVE_MODULUS_LIMBS[4] - reconstructed_current_accumulator) + .is_zero()) { + return false; + }; + + } else { + // Check the accumulator is copied correctly + const std::vector current_accumulator_binary_limbs_copy = { + get_variable(accumulators_binary_limbs_0_wire[i]), + get_variable(accumulators_binary_limbs_1_wire[i]), + get_variable(accumulators_binary_limbs_2_wire[i]), + get_variable(accumulators_binary_limbs_3_wire[i]), + }; + const std::vector current_accumulator_binary_limbs = { + get_variable(accumulators_binary_limbs_0_wire[i + 1]), + get_variable(accumulators_binary_limbs_1_wire[i + 1]), + get_variable(accumulators_binary_limbs_2_wire[i + 1]), + get_variable(accumulators_binary_limbs_3_wire[i + 1]), + }; + + for (size_t j = 0; j < current_accumulator_binary_limbs.size(); j++) { + if (current_accumulator_binary_limbs_copy[j] != current_accumulator_binary_limbs[j]) { + return false; + } + } + } + } + return true; +}; +template GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values(barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fq, + barretenberg::fq, + barretenberg::fq); } // namespace proof_system \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp index 052a986238a1..04c13069c915 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp @@ -8,13 +8,72 @@ * @copyright Copyright (c) 2023 * */ +#include "barretenberg/common/constexpr_utils.hpp" #include "barretenberg/ecc/curves/bn254/fq.hpp" +#include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/proof_system/arithmetization/arithmetization.hpp" +#include "barretenberg/proof_system/op_queue/ecc_op_queue.hpp" #include "circuit_builder_base.hpp" #include #include +#include +#include +#include namespace proof_system { -class GoblinTranslatorCircuitBuilder : CircuitBuilderBase { +/** + * @brief GoblinTranslatorCircuitBuilder creates a circuit that evaluates the correctness of the evaluation of + * EccOpQueue in Fq while operating in the Fr scalar field (r is the modulus of Fr and p is the modulus of Fp) + * + * @details Goblin Translator Circuit Builder builds a circuit the purpose of which is to calculate the batched + * evaluation of 5 polynomials in non-native field represented through coefficients in 4 native polynomials (op, + * x_lo_y_hi, x_hi_z_1, y_lo_z_2): + * + * OP | X_LO | X_HI | Y_LO + * 0 | Y_HI | Z1 | Z2 + * + * OP is supposed to be { 0, 1, 2, 3, 4, 8 }. X_LO and Y_LO need to be < 2¹³⁶, X_HI and Y_LO < 2¹¹⁸, Z1 and Z2 < 2¹²⁸. + * X_* and Y_* are supposed to be the decompositions of bn254 base fields elements P.x and P.y and are split into two + * chunks each because the scalar field we are operating on can't fit them + * + * Goblin Translator calculates the result of evaluation of a polynomial op + P.x⋅v +P.y⋅v² + z1 ⋅ v³ + z2⋅v⁴ at the + * given challenge x (evaluation_input_x). For this it uses logic similar to the stdlib bigfield class. We operate in Fr + * while trying to calculate values in Fq. To show that a⋅b=c mod p, we: + * 1) Compute a⋅b in integers + * 2) Compute quotient=a⋅b/p + * 3) Show that a⋅b - quotient⋅p - c = 0 mod 2²⁷² + * 4) Show that a⋅b - quotient⋅p - c = 0 mod r (scalar field modulus) + * This ensures that the logic is sound modulo 2²⁷²⋅r, which means it's correct in integers, if all the values are + * sufficiently constrained (there is no way to undeflow or overflow) + * + * Concretely, Goblin Translator computes one accumulation every two gates: + * previous_accumulator⋅x + op + P.x⋅v +P.y⋅v² + z1⋅v³ + z2⋅v⁴ = current_accumulator mod p. Because of the nature of + * polynomial commitment, previous_accumulator is located at higher index than the current_accumulator. Values of x + * (evaluation_input_x) and v (batching_challenge_v) are precomputed and considered inputs to the relations. + * + * P.x and P.y are deconstructed into 4 limbs (3 68-bit and 1 50-bit) for non-native arithmetic + * z1 and z2 are deconstructed into 2 limbs each (68 and 60 bits) + * op is small and doesn't have to be deconstructed + * + * To show the accumulation is correct we also need to provide the quotient and accumulators as witnesses. Accumulator + * is split the same way as P.x and P.y, but quotient is 256 bits,so the top limb is 52 bits. + * + * Ensuring that the relation mod 2²⁷² is correct is done through splitting this check into two checks modulo 2¹³⁶. + * First, we check that a proper combination of the values in the lower limbs gives the correct result modulo 2¹³⁶ (by + * dividing the result by 2¹³⁶ and range constraining it). Then we use the overlow and higher limbs to prove the same + * modulo 2¹³⁶ again and as a result we get correctness modulo 2²⁷². + * + * One big issue are range constraints. In Goblin Translator we check ranges by decomposing LIMBS into special other + * range constrained MICROLIMBS (have "_CONSTRAINT_" in the name of their wires). These wires always have the range of + * 14 bits, so when we need to constrain something further we use two wires at once and scale the values (for example, + * 68 bits are decomposed into 5 14-bit limbs + 1 shifted limb, which is equal to the highest microlimb multiplied by + * 4). The shifted wires usually have "_TAIL" in the name, but that is not a strict rule. To save space and because of + * the proving system requirements we put some of the decomposed values from relation limbs (limbs which compute the + * result of computation modulo 2²⁷² divided by shifts) into constraint wires named after P.x, P.y, accumulator and + * quotient. This is due to the fact that the highest limb in these four is less than 56 bits, which frees up an extra + * microlimb. + * + */ +class GoblinTranslatorCircuitBuilder : public CircuitBuilderBase { // We don't need templating for Goblin using Fr = barretenberg::fr; using Fq = barretenberg::fq; @@ -36,9 +95,9 @@ class GoblinTranslatorCircuitBuilder : CircuitBuilderBase NEGATIVE_MODULUS_LIMBS = { Fr(NEGATIVE_PRIME_MODULUS.slice(0, NUM_LIMB_BITS).lo), Fr(NEGATIVE_PRIME_MODULUS.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2).lo), @@ -143,27 +267,6 @@ class GoblinTranslatorCircuitBuilder : CircuitBuilderBase P_x_limbs; + std::array P_x_limbs; std::array, NUM_BINARY_LIMBS> P_x_microlimbs; Fr P_y_lo; Fr P_y_hi; - std::array P_y_limbs; + std::array P_y_limbs; std::array, NUM_BINARY_LIMBS> P_y_microlimbs; Fr z_1; @@ -191,49 +294,109 @@ class GoblinTranslatorCircuitBuilder : CircuitBuilderBase z_2_limbs; std::array, NUM_Z_LIMBS> z_2_microlimbs; - std::array previous_accumulator; - std::array current_accumulator; + std::array previous_accumulator; + std::array current_accumulator; std::array, NUM_BINARY_LIMBS> current_accumulator_microlimbs; - std::array quotient_binary_limbs; + std::array quotient_binary_limbs; std::array, NUM_BINARY_LIMBS> quotient_microlimbs; - std::array relation_wide_limbs; + std::array relation_wide_limbs; + std::array, 2> relation_wide_microlimbs; // Additional - std::array x_limbs; - std::array v_limbs; - std::array v_squared_limbs = { 0 }; - std::array v_cubed_limbs = { 0 }; - std::array v_quarted_limbs = { 0 }; + std::array x_limbs; + std::array v_limbs; + std::array v_squared_limbs = { 0 }; + std::array v_cubed_limbs = { 0 }; + std::array v_quarted_limbs = { 0 }; }; struct RelationInputs { - std::array x_limbs; - std::array v_limbs; - std::array v_squared_limbs = { 0 }; - std::array v_cubed_limbs = { 0 }; - std::array v_quarted_limbs = { 0 }; + std::array x_limbs; + std::array v_limbs; + std::array v_squared_limbs = { 0 }; + std::array v_cubed_limbs = { 0 }; + std::array v_quarted_limbs = { 0 }; }; + static constexpr std::string_view NAME_STRING = "GoblinTranslatorArithmetization"; + + // The challenge that is used for batching together evaluations of several polynomials + Fq batching_challenge_v; + + // The input we evaluate polynomials on + Fq evaluation_input_x; /** - * @brief Create bigfield representations of x and powers of v + * @brief Construct a new Goblin Translator Circuit Builder object + * + * @details Goblin Translator Circuit builder has to be initializaed with evaluation input and batching challenge + * (they are used to compute witness and to store the value for the prover) * - * @param x The point at which the polynomials are being evaluated - * @param v The batching challenge + * @param batching_challenge_v_ + * @param evaluation_input_x_ + */ + GoblinTranslatorCircuitBuilder(Fq batching_challenge_v_, Fq evaluation_input_x_) + : CircuitBuilderBase({}, DEFAULT_TRANSLATOR_VM_LENGTH) + , batching_challenge_v(batching_challenge_v_) + , evaluation_input_x(evaluation_input_x_) + { + add_variable(FF::zero()); + for (auto& wire : wires) { + wire.emplace_back(0); + } + num_gates++; + }; + + /** + * @brief Construct a new Goblin Translator Circuit Builder object and feed op_queue inside + * + * @details Goblin Translator Circuit builder has to be initialized with evaluation input and batching challenge + * (they are used to compute witness and to store the value for the prover) + * + * @param batching_challenge_v_ + * @param evaluation_input_x_ + * @param op_queue + */ + GoblinTranslatorCircuitBuilder(Fq batching_challenge_v_, Fq evaluation_input_x_, ECCOpQueue op_queue) + : GoblinTranslatorCircuitBuilder(batching_challenge_v_, evaluation_input_x_) + { + feed_ecc_op_queue_into_circuit(op_queue); + } + + GoblinTranslatorCircuitBuilder(const GoblinTranslatorCircuitBuilder& other) = delete; + GoblinTranslatorCircuitBuilder(GoblinTranslatorCircuitBuilder&& other) noexcept + : CircuitBuilderBase(std::move(other)){}; + GoblinTranslatorCircuitBuilder& operator=(const GoblinTranslatorCircuitBuilder& other) = delete; + GoblinTranslatorCircuitBuilder& operator=(GoblinTranslatorCircuitBuilder&& other) noexcept + { + CircuitBuilderBase::operator=(std::move(other)); + return *this; + }; + ~GoblinTranslatorCircuitBuilder() override = default; + + /** + * @brief Create limb representations of x and powers of v that are needed to compute the witness or check + * circuit correctness + * + * @param evaluation_input_x The point at which the polynomials are being evaluated + * @param batching_challenge_v The batching challenge * @return RelationInputs */ - static RelationInputs compute_relation_inputs_limbs(Fq x, Fq v) + static RelationInputs compute_relation_inputs_limbs(Fq batching_challenge_v, Fq evaluation_input_x) { /** * @brief A small function to transform a native element Fq into its bigfield representation in Fr scalars * */ - auto base_element_to_bigfield = [](Fq& original) { + auto base_element_to_limbs = [](Fq& original) { uint256_t original_uint = original; - return std::array({ Fr(original_uint.slice(0, NUM_LIMB_BITS)), - Fr(original_uint.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)), - Fr(original_uint.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS)), - Fr(original_uint.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS)), - Fr(original_uint) }); + return std::array({ + Fr(original_uint.slice(0, NUM_LIMB_BITS)), + Fr(original_uint.slice(NUM_LIMB_BITS, 2 * NUM_LIMB_BITS)), + Fr(original_uint.slice(2 * NUM_LIMB_BITS, 3 * NUM_LIMB_BITS)), + Fr(original_uint.slice(3 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS)), + }); }; + Fq& v = batching_challenge_v; + Fq& x = evaluation_input_x; Fq v_squared; Fq v_cubed; Fq v_quarted; @@ -241,479 +404,75 @@ class GoblinTranslatorCircuitBuilder : CircuitBuilderBase(wires); - op_wire.push_back(add_variable(acc_step.op_code)); - op_wire.push_back(zero_idx); - - /** - * @brief Insert two values into the same wire sequentially - * - */ - auto insert_pair_into_wire = [this](WireIds wire_index, Fr first, Fr second) { - auto& current_wire = wires[wire_index]; - current_wire.push_back(add_variable(first)); - current_wire.push_back(add_variable(second)); - }; - - // Check and insert P_x_lo and P_y_hi into wire 1 - ASSERT(uint256_t(acc_step.P_x_lo) <= MAX_LOW_WIDE_LIMB_SIZE); - ASSERT(uint256_t(acc_step.P_y_hi) <= MAX_HIGH_WIDE_LIMB_SIZE); - insert_pair_into_wire(WireIds::X_LO_Y_HI, acc_step.P_x_lo, acc_step.P_y_hi); - - // Check and insert P_x_hi and z_1 into wire 2 - ASSERT(uint256_t(acc_step.P_x_hi) <= MAX_HIGH_WIDE_LIMB_SIZE); - ASSERT(uint256_t(acc_step.z_1) <= MAX_LOW_WIDE_LIMB_SIZE); - insert_pair_into_wire(WireIds::X_HI_Z_1, acc_step.P_x_hi, acc_step.z_1); - - // Check and insert P_y_lo and z_2 into wire 3 - ASSERT(uint256_t(acc_step.P_y_lo) <= MAX_LOW_WIDE_LIMB_SIZE); - ASSERT(uint256_t(acc_step.z_2) <= MAX_LOW_WIDE_LIMB_SIZE); - insert_pair_into_wire(WireIds::Y_LO_Z_2, acc_step.P_y_lo, acc_step.z_2); - - // Check decomposition of values from the Queue into limbs used in bigfield evaluations - ASSERT(acc_step.P_x_lo == (acc_step.P_x_limbs[0] + acc_step.P_x_limbs[1] * SHIFT_1)); - ASSERT(acc_step.P_x_hi == (acc_step.P_x_limbs[2] + acc_step.P_x_limbs[3] * SHIFT_1)); - ASSERT(acc_step.P_y_lo == (acc_step.P_y_limbs[0] + acc_step.P_y_limbs[1] * SHIFT_1)); - ASSERT(acc_step.P_y_hi == (acc_step.P_y_limbs[2] + acc_step.P_y_limbs[3] * SHIFT_1)); - ASSERT(acc_step.z_1 == (acc_step.z_1_limbs[0] + acc_step.z_1_limbs[1] * SHIFT_1)); - ASSERT(acc_step.z_2 == (acc_step.z_2_limbs[0] + acc_step.z_2_limbs[1] * SHIFT_1)); + void create_accumulation_gate(AccumulationInput acc_step); - /** - * @brief Check correctness of limbs values - * - */ - auto check_binary_limbs_maximum_values = [](const std::array& limbs, - bool relaxed_last_limb = false) { - if constexpr (total_limbs == (NUM_BINARY_LIMBS + 1)) { - for (size_t i = 0; i < NUM_BINARY_LIMBS - 1; i++) { - ASSERT(uint256_t(limbs[i]) < SHIFT_1); - } - if (!relaxed_last_limb) { - ASSERT(uint256_t(limbs[NUM_BINARY_LIMBS - 1]) < (uint256_t(1) << NUM_LAST_LIMB_BITS)); - } else { - - ASSERT(uint256_t(limbs[NUM_BINARY_LIMBS - 1]) < (SHIFT_1)); - } - } else { - for (size_t i = 0; i < total_limbs; i++) { - ASSERT(uint256_t(limbs[i]) < SHIFT_1); - } - } - }; - /** - * @brief Check correctness of values for range constraint limbs - * - */ - auto check_micro_limbs_maximum_values = - []( - const std::array, binary_limb_count>& limbs) { - for (size_t i = 0; i < binary_limb_count; i++) { - for (size_t j = 0; j < micro_limb_count; j++) { - ASSERT(uint256_t(limbs[i][j]) < MICRO_SHIFT); - } - } - }; - - // Check limb values are in range - check_binary_limbs_maximum_values(acc_step.P_x_limbs); - check_binary_limbs_maximum_values(acc_step.P_y_limbs); - check_binary_limbs_maximum_values(acc_step.z_1_limbs); - check_binary_limbs_maximum_values(acc_step.z_2_limbs); - check_binary_limbs_maximum_values(acc_step.previous_accumulator); - check_binary_limbs_maximum_values(acc_step.current_accumulator); - check_binary_limbs_maximum_values(acc_step.quotient_binary_limbs, /*relaxed_last_limb=*/true); - - // Insert limbs used in bigfield evaluations - insert_pair_into_wire(P_X_LOW_LIMBS, acc_step.P_x_limbs[0], acc_step.P_x_limbs[1]); - insert_pair_into_wire(P_X_HIGH_LIMBS, acc_step.P_x_limbs[2], acc_step.P_x_limbs[3]); - insert_pair_into_wire(P_Y_LOW_LIMBS, acc_step.P_y_limbs[0], acc_step.P_y_limbs[1]); - insert_pair_into_wire(P_Y_HIGH_LIMBS, acc_step.P_y_limbs[2], acc_step.P_y_limbs[3]); - insert_pair_into_wire(Z_LO_LIMBS, acc_step.z_1_limbs[0], acc_step.z_2_limbs[0]); - insert_pair_into_wire(Z_HI_LIMBS, acc_step.z_1_limbs[1], acc_step.z_2_limbs[1]); - insert_pair_into_wire( - QUOTIENT_LO_BINARY_LIMBS, acc_step.quotient_binary_limbs[0], acc_step.quotient_binary_limbs[1]); - insert_pair_into_wire( - QUOTIENT_HI_BINARY_LIMBS, acc_step.quotient_binary_limbs[2], acc_step.quotient_binary_limbs[3]); - insert_pair_into_wire(RELATION_WIDE_LIMBS, acc_step.relation_wide_limbs[0], acc_step.relation_wide_limbs[1]); - - // Check limbs used in range constraints are in range - check_micro_limbs_maximum_values(acc_step.P_x_microlimbs); - check_micro_limbs_maximum_values(acc_step.P_y_microlimbs); - check_micro_limbs_maximum_values(acc_step.z_1_microlimbs); - check_micro_limbs_maximum_values(acc_step.z_2_microlimbs); - check_micro_limbs_maximum_values(acc_step.current_accumulator_microlimbs); - - // Check that relation limbs are in range - ASSERT(uint256_t(acc_step.relation_wide_limbs[0]).get_msb() < WIDE_RELATION_LIMB_BITS); - ASSERT(uint256_t(acc_step.relation_wide_limbs[1]).get_msb() < WIDE_RELATION_LIMB_BITS); - - /** - * @brief Put several values in sequential wires - * - */ - auto lay_limbs_in_row = [this](std::array input, - WireIds starting_wire, - size_t number_of_elements) { - ASSERT(number_of_elements <= array_size); - for (size_t i = 0; i < number_of_elements; i++) { - wires[starting_wire + i].push_back(add_variable(input[i])); - } - }; - lay_limbs_in_row(acc_step.P_x_microlimbs[0], P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.P_x_microlimbs[1], P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.P_x_microlimbs[2], P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.P_x_microlimbs[3], P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.P_y_microlimbs[0], P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.P_y_microlimbs[1], P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.P_y_microlimbs[2], P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.P_y_microlimbs[3], P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.z_1_microlimbs[0], Z_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.z_2_microlimbs[0], Z_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.z_1_microlimbs[1], Z_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.z_2_microlimbs[1], Z_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.current_accumulator, ACCUMULATORS_BINARY_LIMBS_0, NUM_BINARY_LIMBS); - lay_limbs_in_row(acc_step.previous_accumulator, ACCUMULATORS_BINARY_LIMBS_0, NUM_BINARY_LIMBS); - lay_limbs_in_row( - acc_step.current_accumulator_microlimbs[0], ACCUMULATOR_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row( - acc_step.current_accumulator_microlimbs[1], ACCUMULATOR_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row( - acc_step.current_accumulator_microlimbs[2], ACCUMULATOR_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row( - acc_step.current_accumulator_microlimbs[3], ACCUMULATOR_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.quotient_microlimbs[0], QUOTIENT_LO_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.quotient_microlimbs[1], QUOTIENT_LO_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.quotient_microlimbs[2], QUOTIENT_HI_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); - lay_limbs_in_row(acc_step.quotient_microlimbs[3], QUOTIENT_HI_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS); - - num_gates += 2; + /** + * @brief Get the result of accumulation + * + * @return barretenberg::fq + */ + barretenberg::fq get_computation_result() + { + const size_t RESULT_ROW = 1; + ASSERT(num_gates > RESULT_ROW); + return (uint256_t(get_variable(wires[WireIds::ACCUMULATORS_BINARY_LIMBS_0][RESULT_ROW])) + + uint256_t(get_variable(wires[WireIds::ACCUMULATORS_BINARY_LIMBS_1][RESULT_ROW])) * SHIFT_1 + + uint256_t(get_variable(wires[WireIds::ACCUMULATORS_BINARY_LIMBS_2][RESULT_ROW])) * SHIFT_2 + + uint256_t(get_variable(wires[WireIds::ACCUMULATORS_BINARY_LIMBS_3][RESULT_ROW])) * SHIFT_3); } + /** + * @brief Generate all the gates required to prove the correctness of batched evalution of polynomials representing + * commitments to ECCOpQueue + * + * @param ecc_op_queue The queue + */ + void feed_ecc_op_queue_into_circuit(ECCOpQueue& ecc_op_queue); /** * @brief Check the witness satisifies the circuit * - * @details Does one gate for now + * @details Goes through each gate and checks the correctness of accumulation * - * @param x - * @param v * @return true * @return false */ - bool check_circuit(Fq x, Fq v) - { - // Compute the limbs of x and powers of v (these go into the relation) - RelationInputs relation_inputs = compute_relation_inputs_limbs(x, v); - - // Get the wires - auto& op_wire = std::get(wires); - auto& x_lo_y_hi_wire = std::get(wires); - auto& x_hi_z_1_wire = std::get(wires); - auto& y_lo_z_2_wire = std::get(wires); - auto& p_x_0_p_x_1_wire = std::get(wires); - auto& p_x_2_p_x_3_wire = std::get(wires); - auto& p_y_0_p_y_1_wire = std::get(wires); - auto& p_y_2_p_y_3_wire = std::get(wires); - auto& z_lo_wire = std::get(wires); - auto& z_hi_wire = std::get(wires); - auto& accumulators_binary_limbs_0_wire = std::get(wires); - auto& accumulators_binary_limbs_1_wire = std::get(wires); - auto& accumulators_binary_limbs_2_wire = std::get(wires); - auto& accumulators_binary_limbs_3_wire = std::get(wires); - auto& quotient_low_binary_limbs = std::get(wires); - auto& quotient_high_binary_limbs = std::get(wires); - auto& relation_wide_limbs_wire = std::get(wires); - - /** - * @brief Get elements at the same index from several sequential wires and put them into a vector - * - */ - auto get_sequential_micro_chunks = [this](size_t gate_index, WireIds starting_wire_index, size_t chunk_count) { - std::vector chunks; - for (size_t i = starting_wire_index; i < starting_wire_index + chunk_count; i++) { - chunks.push_back(get_variable(wires[i][gate_index])); - } - return chunks; - }; - - /** - * @brief Reconstruct the value of one regular limb used in relation computation from micro chunks used to - * create range constraints - * - */ - auto accumulate_limb_from_micro_chunks = [](const std::vector& chunks) { - Fr mini_accumulator(0); - for (auto it = chunks.end(); it != chunks.begin();) { - --it; - mini_accumulator = mini_accumulator * MICRO_SHIFT + *it; - } - return mini_accumulator; - }; - /** - * @brief Enumerate through the gates - * - */ - for (size_t i = 0; i < num_gates; i++) { - // The main relation is computed between odd and the next even indices. For example, 1 and 2 - if (i & 1) { - // Get the values - Fr op_code = get_variable(op_wire[i]); - Fr p_x_lo = get_variable(x_lo_y_hi_wire[i]); - Fr p_x_hi = get_variable(x_hi_z_1_wire[i]); - Fr p_x_0 = get_variable(p_x_0_p_x_1_wire[i]); - Fr p_x_1 = get_variable(p_x_0_p_x_1_wire[i + 1]); - Fr p_x_2 = get_variable(p_x_2_p_x_3_wire[i]); - Fr p_x_3 = get_variable(p_x_2_p_x_3_wire[i + 1]); - const std::vector p_x_binary_limbs = { p_x_0, p_x_1, p_x_2, p_x_3 }; - Fr p_y_lo = get_variable(y_lo_z_2_wire[i]); - Fr p_y_hi = get_variable(x_lo_y_hi_wire[i + 1]); - Fr p_y_0 = get_variable(p_y_0_p_y_1_wire[i]); - Fr p_y_1 = get_variable(p_y_0_p_y_1_wire[i + 1]); - Fr p_y_2 = get_variable(p_y_2_p_y_3_wire[i]); - Fr p_y_3 = get_variable(p_y_2_p_y_3_wire[i + 1]); - const std::vector p_y_binary_limbs = { p_y_0, p_y_1, p_y_2, p_y_3 }; - Fr z_1 = get_variable(x_hi_z_1_wire[i + 1]); - Fr z_2 = get_variable(y_lo_z_2_wire[i + 1]); - Fr z_1_lo = get_variable(z_lo_wire[i]); - Fr z_2_lo = get_variable(z_lo_wire[i + 1]); - Fr z_1_hi = get_variable(z_hi_wire[i]); - Fr z_2_hi = get_variable(z_hi_wire[i + 1]); - Fr low_wide_relation_limb = get_variable(relation_wide_limbs_wire[i]); - Fr high_wide_relation_limb = get_variable(relation_wide_limbs_wire[i + 1]); - const std::vector z_1_binary_limbs = { z_1_lo, z_1_hi }; - const std::vector z_2_binary_limbs = { z_2_lo, z_2_hi }; - const std::vector current_accumulator_binary_limbs = { - get_variable(accumulators_binary_limbs_0_wire[i]), - get_variable(accumulators_binary_limbs_1_wire[i]), - get_variable(accumulators_binary_limbs_2_wire[i]), - get_variable(accumulators_binary_limbs_3_wire[i]), - }; - const std::vector previous_accumulator_binary_limbs = { - get_variable(accumulators_binary_limbs_0_wire[i + 1]), - get_variable(accumulators_binary_limbs_1_wire[i + 1]), - get_variable(accumulators_binary_limbs_2_wire[i + 1]), - get_variable(accumulators_binary_limbs_3_wire[i + 1]), - }; - const std::vector quotient_binary_limbs = { - get_variable(quotient_low_binary_limbs[i]), - get_variable(quotient_low_binary_limbs[i + 1]), - get_variable(quotient_high_binary_limbs[i]), - get_variable(quotient_high_binary_limbs[i + 1]), - }; - - // These need to be range constrained, but that logic is not present yet - auto p_x_micro_chunks = { - get_sequential_micro_chunks(i, P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, P_X_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) - }; - auto p_y_micro_chunks = { - get_sequential_micro_chunks(i, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) - }; - auto z_1_micro_chunks = { - get_sequential_micro_chunks(i, Z_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - - get_sequential_micro_chunks(i, Z_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - }; - - auto z_2_micro_chunks = { - - get_sequential_micro_chunks(i + 1, Z_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, Z_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS) - }; - - auto current_accumulator_micro_chunks = { - get_sequential_micro_chunks(i, ACCUMULATOR_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, ACCUMULATOR_LO_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, ACCUMULATOR_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, ACCUMULATOR_HI_LIMBS_RANGE_CONSTRAINT_0, NUM_MICRO_LIMBS), - }; - auto quotient_micro_chunks = { - get_sequential_micro_chunks(i, QUOTIENT_LO_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, QUOTIENT_LO_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i, QUOTIENT_HI_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - get_sequential_micro_chunks(i + 1, QUOTIENT_HI_LIMBS_RANGE_CONSTRAIN_0, NUM_MICRO_LIMBS), - }; - - // Lambda for checking the correctness of decomposition of values in the Queue into limbs for checking - // the relation - auto check_wide_limb_into_binary_limb_relation = [](const std::vector& wide_limbs, - const std::vector& binary_limbs) { - ASSERT(wide_limbs.size() * 2 == binary_limbs.size()); - for (size_t i = 0; i < wide_limbs.size(); i++) { - if ((binary_limbs[i * 2] + Fr(SHIFT_1) * binary_limbs[i * 2 + 1]) != wide_limbs[i]) { - return false; - } - } - return true; - }; - // Check that everything has been decomposed correctly - // P.xₗₒ = P.xₗₒ_0 + SHIFT_1 * P.xₗₒ_1 - // P.xₕᵢ = P.xₕᵢ_0 + SHIFT_1 * P.xₕᵢ_1 - // z_1 = z_1ₗₒ + SHIFT_1 * z_1ₕᵢ - // z_2 = z_2ₗₒ + SHIFT_2 * z_1ₕᵢ - if (!(check_wide_limb_into_binary_limb_relation({ p_x_lo, p_x_hi }, p_x_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ p_y_lo, p_y_hi }, p_y_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ z_1 }, z_1_binary_limbs) && - check_wide_limb_into_binary_limb_relation({ z_2 }, z_2_binary_limbs))) { - return false; - } - - // Check that limbs have been decomposed into microlimbs correctly - // value = ∑ (2ˡ)ⁱ⋅ chunkᵢ, where 2ˡ is the shift - auto check_micro_limb_decomposition_correctness = - [&accumulate_limb_from_micro_chunks](const std::vector& binary_limbs, - const std::vector>& micro_limbs) { - ASSERT(binary_limbs.size() == micro_limbs.size()); - for (size_t i = 0; i < binary_limbs.size(); i++) { - if (binary_limbs[i] != accumulate_limb_from_micro_chunks(micro_limbs[i])) { - return false; - } - } - return true; - }; - // Check all micro limb decompositions - if (!check_micro_limb_decomposition_correctness(p_x_binary_limbs, p_x_micro_chunks)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(p_y_binary_limbs, p_y_micro_chunks)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(z_1_binary_limbs, z_1_micro_chunks)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(z_2_binary_limbs, z_2_micro_chunks)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(current_accumulator_binary_limbs, - current_accumulator_micro_chunks)) { - return false; - } - if (!check_micro_limb_decomposition_correctness(quotient_binary_limbs, quotient_micro_chunks)) { - return false; - } - - // The logic we are trying to enforce is: - // current_accumulator = previous_accumulator ⋅ x + op_code + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - // mod Fq To ensure this we transform the relation into the form: previous_accumulator ⋅ x + op + P.x ⋅ - // v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - current_accumulator = 0 However, we don't have - // integers. Despite that, we can approximate integers for a certain range, if we know that there will - // not be any overflows. For now we set the range to 2²⁷² ⋅ r. We can evaluate the logic modulo 2²⁷² - // with range constraints and r is native. - // - // previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - - // current_accumulator = 0 => - // 1. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ + quotient ⋅ (-p mod - // 2²⁷²) - current_accumulator = 0 mod 2²⁷² - // 2. previous_accumulator ⋅ x + op + P.x ⋅ v + P.y ⋅ v² + z_1 ⋅ v³ + z_2 ⋅ v⁴ - quotient ⋅ p - - // current_accumulator = 0 mod r - // - // The second relation is straightforward and easy to check. The first, not so much. We have to evaluate - // certain bit chunks of the equation and ensure that they are zero. For example, for the lowest limb it - // would be (inclusive ranges): - // - // previous_accumulator[0:67] ⋅ x[0:67] + op + P.x[0:67] ⋅ v[0:67] + P.y[0:67] ⋅ v²[0:67] + z_1[0:67] ⋅ - // v³[0:67] + z_2[0:67] ⋅ v⁴[0:67] + quotient[0:67] ⋅ (-p mod 2²⁷²)[0:67] - current_accumulator[0:67] = - // intermediate_value; (we don't take parts of op, because it's supposed to be between 0 and 3) - // - // We could check that this intermediate_value is equal to 0 mod 2⁶⁸ by dividing it by 2⁶⁸ and - // constraining it. For efficiency, we actually compute wider evaluations for 136 bits, which require us - // to also obtain and shift products of [68:135] by [0:67] and [0:67] by [68:135] bits. - // The result of division goes into the next evaluation (the same as a carry flag would) - // So the lowest wide limb is : (∑everything[0:67]⋅everything[0:67] + - // 2⁶⁸⋅(∑everything[0:67]⋅everything[68:135]))/ 2¹³⁶ - // - // The high is: - // (low_limb + ∑everything[0:67]⋅everything[136:203] + ∑everything[68:135]⋅everything[68:135] + - // 2⁶⁸(∑everything[0:67]⋅everything[204:271] + ∑everything[68:135]⋅everything[136:203])) / 2¹³⁶ - // - // We also limit computation on limbs of op, z_1 and z_2, since we know that op has only the lowest limb - // and z_1 and z_2 have only the two lowest limbs - Fr low_wide_limb_relation_check = - - (previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[0] + op_code + - relation_inputs.v_limbs[0] * p_x_0 + relation_inputs.v_squared_limbs[0] * p_y_0 + - relation_inputs.v_cubed_limbs[0] * z_1_lo + relation_inputs.v_quarted_limbs[0] * z_2_lo + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[0] - current_accumulator_binary_limbs[0]) + - (previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[0] + - relation_inputs.v_limbs[1] * p_x_0 + relation_inputs.v_squared_limbs[1] * p_y_0 + - relation_inputs.v_cubed_limbs[1] * z_1_lo + relation_inputs.v_quarted_limbs[1] * z_2_lo + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[0] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[1] + - relation_inputs.v_limbs[0] * p_x_1 + relation_inputs.v_squared_limbs[0] * p_y_1 + - relation_inputs.v_cubed_limbs[0] * z_1_hi + relation_inputs.v_quarted_limbs[0] * z_2_hi + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[1] - current_accumulator_binary_limbs[1]) * - Fr(SHIFT_1); - if (low_wide_limb_relation_check != (low_wide_relation_limb * SHIFT_2)) { - return false; - } - Fr high_wide_relation_limb_check = - low_wide_relation_limb + previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[0] + - previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[1] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[2] + - relation_inputs.v_limbs[2] * p_x_0 + relation_inputs.v_limbs[1] * p_x_1 + - relation_inputs.v_limbs[0] * p_x_2 + relation_inputs.v_squared_limbs[2] * p_y_0 + - relation_inputs.v_squared_limbs[1] * p_y_1 + relation_inputs.v_squared_limbs[0] * p_y_2 + - relation_inputs.v_cubed_limbs[2] * z_1_lo + relation_inputs.v_cubed_limbs[1] * z_1_hi + - relation_inputs.v_quarted_limbs[2] * z_2_lo + relation_inputs.v_quarted_limbs[1] * z_2_hi + - quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[0] + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[1] + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[2] - current_accumulator_binary_limbs[2] + - (previous_accumulator_binary_limbs[3] * relation_inputs.x_limbs[0] + - previous_accumulator_binary_limbs[2] * relation_inputs.x_limbs[1] + - previous_accumulator_binary_limbs[1] * relation_inputs.x_limbs[2] + - previous_accumulator_binary_limbs[0] * relation_inputs.x_limbs[3] + - relation_inputs.v_limbs[3] * p_x_0 + relation_inputs.v_limbs[2] * p_x_1 + - relation_inputs.v_limbs[1] * p_x_2 + relation_inputs.v_limbs[0] * p_x_3 + - relation_inputs.v_squared_limbs[3] * p_y_0 + relation_inputs.v_squared_limbs[2] * p_y_1 + - relation_inputs.v_squared_limbs[1] * p_y_2 + relation_inputs.v_squared_limbs[0] * p_y_3 + - relation_inputs.v_cubed_limbs[3] * z_1_lo + relation_inputs.v_cubed_limbs[2] * z_1_hi + - relation_inputs.v_quarted_limbs[3] * z_2_lo + relation_inputs.v_quarted_limbs[2] * z_2_hi + - quotient_binary_limbs[3] * NEGATIVE_MODULUS_LIMBS[0] + - quotient_binary_limbs[2] * NEGATIVE_MODULUS_LIMBS[1] + - quotient_binary_limbs[1] * NEGATIVE_MODULUS_LIMBS[2] + - quotient_binary_limbs[0] * NEGATIVE_MODULUS_LIMBS[3] - current_accumulator_binary_limbs[3]) * - SHIFT_1; - if (high_wide_relation_limb_check != (high_wide_relation_limb * SHIFT_2)) { - return false; - } - } - } - return true; - } + bool check_circuit(); }; template -GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - Fr op_code, Fr p_x_lo, Fr p_x_hi, Fr p_y_lo, Fr p_y_hi, Fr z_1, Fr z_2, Fq previous_accumulator, Fq v, Fq x); -extern template GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values( - barretenberg::fr op_code, - barretenberg::fr p_x_lo, - barretenberg::fr p_x_hi, - barretenberg::fr p_y_lo, - barretenberg::fr p_y_hi, - barretenberg::fr z_1, - barretenberg::fr z_2, - barretenberg::fq previous_accumulator, - barretenberg::fq v, - barretenberg::fq x); +GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values(Fr op_code, + Fr p_x_lo, + Fr p_x_hi, + Fr p_y_lo, + Fr p_y_hi, + Fr z1, + Fr z2, + Fq previous_accumulator, + Fq batching_challenge_v, + Fq evaluation_input_x); +extern template GoblinTranslatorCircuitBuilder::AccumulationInput generate_witness_values(barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fr, + barretenberg::fq, + barretenberg::fq, + barretenberg::fq); } // namespace proof_system \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp index ca4ac215e785..6c3b10b33c46 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.test.cpp @@ -1,5 +1,6 @@ #include "goblin_translator_circuit_builder.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" +#include "barretenberg/proof_system/op_queue/ecc_op_queue.hpp" #include #include #include @@ -10,173 +11,122 @@ auto& engine = numeric::random::get_debug_engine(); } namespace proof_system { -TEST(translator_circuit_builder, scoping_out_the_circuit) +/** + * @brief Check that a single accumulation gate is created correctly + * + */ +TEST(GoblinTranslatorCircuitBuilder, CircuitBuilderBaseCase) { - // Questions: - // 1. Do we need 68-bit limbs at all? using Fr = ::curve::BN254::ScalarField; using Fq = ::curve::BN254::BaseField; - constexpr size_t NUM_LIMB_BITS = 68; - - constexpr std::array neg_modulus_limbs = GoblinTranslatorCircuitBuilder::NEGATIVE_MODULUS_LIMBS; - // x is the value (challenge) at which we are evaluating the polynomials - // y is the end result of the whole combination (I don't know why we use y for domain and x for evalutation in - // the pepe paper) v is the polynomial batching challenge - - // 2 rows: - // OP | P.xₗₒ | P.xₕᵢ | P.yₗₒ - // - | P.yₕᵢ | z₁ | z₂ - - // Rows written vertically: - // 0 | - | OP | - // 1 | P.yₕᵢ | P.xₗₒ | - // 2 | z₁ | P.xₕᵢ | - // 3 | z₂ | P.yₗₒ | - // 4 | p_x_1 | p_x_0 | 68-bit limbs - // 5 | p_x_1_0 | p_x_0_0 | 12 bit limbs - // 6 | p_x_1_1 | p_x_0_1 | 12 bit limbs - // 7 | p_x_1_2 | p_x_0_2 | 12 bit limbs - // 8 | p_x_1_3 | p_x_0_3 | 12 bit limbs - // 9 | p_x_1_4 | p_x_0_4 | 12 bit limbs - // 10 | p_x_1_5 | p_x_0_5 | 8 bit limns - // 11 | p_x_3 | p_x_2 | 68-bit limbs - // 12 | p_x_3_0 | p_x_2_0 | 12 bit limbs - // 13 | p_x_3_1 | p_x_2_1 | 12 bit limbs - // 14 | p_x_3_2 | p_x_2_2 | 12 bit limbs - // 15 | p_x_3_3 | p_x_2_3 | 12 bit limbs - // 16 | p_x_3_4 | p_x_2_4 | p_x_3_4 is 2 bits and enforced with a relation. p_x_2_4 is 12 bits - // 17 | - | p_x_2_5 | 8 bit limb - // 18 | p_y_1 | p_y_0 | 68-bit limbs - // 19 | p_y_1_0 | p_y_0_0 | 12 bit limbs - // 20 | p_y_1_1 | p_y_0_1 | 12 bit limbs - // 21 | p_y_1_2 | p_y_0_2 | 12 bit limbs - // 22 | p_y_1_3 | p_y_0_3 | 12 bit limbs - // 23 | p_y_1_4 | p_y_0_4 | 12 bit limbs - // 24 | p_y_1_5 | p_y_0_5 | 8 bit limns - // 25 | p_y_3 | p_y_2 | 68-bit limbs - // 26 | p_y_3_0 | p_y_2_0 | 12 bit limbs - // 27 | p_y_3_1 | p_y_2_1 | 12 bit limbs - // 28 | p_y_3_2 | p_y_2_2 | 12 bit limbs - // 29 | p_y_3_3 | p_y_2_3 | 12 bit limbs - // 30 | p_y_3_4 | p_y_2_4 | p_y_3_4 is 2 bits and enforced with a relation. p_y_2_4 is 12 bits - // 31 | - | p_y_2_5 | 8 bit limb - // 32 | z_1_hi | z_1_lo | 68 bit limbs - // 33 | z_1_hi_0| z_1_lo_0| 12 bit limbs - // 34 | z_1_hi_1| z_1_lo_1| 12 bit limbs - // 35 | z_1_hi_2| z_1_lo_2| 12 bit limbs - // 36 | z_1_hi_3| z_1_lo_3| 12 bit limbs - // 37 | z_1_hi_4| z_1_lo_4| 12 bit limbs - // 38 | z_1_hi_5| z_1_lo_5| 8 bit limbs - // 39 | z_2_hi | z_2_lo | 68 bit limbs - // 40 | z_2_hi_0| z_2_lo_0| 12 bit limbs - // 41 | z_2_hi_1| z_2_lo_1| 12 bit limbs - // 42 | z_2_hi_2| z_2_lo_2| 12 bit limbs - // 43 | z_2_hi_3| z_2_lo_3| 12 bit limbs - // 44 | z_2_hi_4| z_2_lo_4| 12 bit limbs - // 45 | z_2_hi_5| z_2_lo_5| 8 bit limbs - // 46 | Aₚᵣₑᵥ_₀ | A₀ | 68 - // 47 | Aₚᵣₑᵥ_₁ | A₁ | 68 - // 48 | Aₚᵣₑᵥ_₂ | A₂ | 68 - // 49 | Aₚᵣₑᵥ_₃ | A₃ | 68 - // 50 | A_1_0 | A_0_0 | 12 - // 51 | A_1_1 | A_0_1 | 12 - // 52 | A_1_2 | A_0_2 | 12 - // 53 | A_1_3 | A_0_3 | 12 - // 54 | A_1_4 | A_0_4 | 12 - // 55 | A_1_5 | A_0_5 | 8 - // 56 | A_3_0 | A_2_0 | 12 - // 57 | A_3_1 | A_2_1 | 12 - // 58 | A_3_2 | A_2_2 | 12 - // 59 | A_3_3 | A_2_3 | 12 - // 60 | A_3_4 | A_2_4 | 2/12 - // 61 | - | A_2_5 | 12 - // 62 | Q_1 | Q_0 | 68 - // 63 | Q_1_0 | Q_0_0 | 12 - // 64 | Q_1_1 | Q_0_1 | 12 - // 65 | Q_1_2 | Q_0_2 | 12 - // 66 | Q_1_3 | Q_0_3 | 12 - // 67 | Q_1_4 | Q_0_4 | 12 - // 68 | Q_1_5 | Q_0_5 | 8 - // 69 | Q_3 | Q_2 | 68 - // 70 | Q_3_0 | Q_2_0 | 12 - // 71 | Q_3_1 | Q_2_1 | 12 - // 72 | Q_3_2 | Q_2_2 | 12 - // 73 | Q_3_3 | Q_2_3 | 12 - // 74 | Q_3_4 | Q_2_4 | 4 - // 75 | - | Q_2_5 | 8 - Fr op; - Fr p_x_lo; - Fr p_x_hi; - Fr p_y_lo; - Fr p_y_hi; - Fr z_1; - Fr z_2; - op = Fr::random_element(); - auto get_random_wide_limb = []() { return Fr(engine.get_random_uint256() >> (256 - NUM_LIMB_BITS * 2)); }; - auto get_random_shortened_wide_limb = []() { return uint256_t(Fq::random_element()) >> (NUM_LIMB_BITS * 2); }; - p_x_lo = get_random_wide_limb(); - p_x_hi = get_random_shortened_wide_limb(); - p_y_lo = get_random_wide_limb(); - p_y_hi = get_random_shortened_wide_limb(); - z_1 = get_random_wide_limb(); - z_2 = get_random_wide_limb(); - - Fq accumulator; - accumulator = Fq::random_element(); - Fq v = Fq::random_element(); - Fq x = Fq::random_element(); - // p_y_lo = get_random_wide_limb(); - // Creating a bigfield representation from (binary_limb_0, binary_limb_1, binary_limb_2, binary_limb_3, prime_limb) - - // Range constrain all the individual limbs - - // Low bits have to be zero - // And we'll need to range constrain it - // 68 can be treated as 12/12/12/12/12/8 - // 68 can be treated as 12/12/12/12/12/8 - GoblinTranslatorCircuitBuilder::AccumulationInput witnesses = - generate_witness_values(op, p_x_lo, p_x_hi, p_y_lo, p_y_hi, z_1, z_2, accumulator, v, x); - // Prime relation - Fr prime_relation = witnesses.previous_accumulator[4] * witnesses.x_limbs[4] + witnesses.op_code + - witnesses.v_limbs[4] * witnesses.P_x_limbs[4] + - witnesses.v_squared_limbs[4] * witnesses.P_y_limbs[4] + witnesses.v_cubed_limbs[4] * z_1 + - witnesses.v_quarted_limbs[4] * z_2 + witnesses.quotient_binary_limbs[4] * neg_modulus_limbs[4] - - witnesses.current_accumulator[4]; - EXPECT_EQ(prime_relation, 0); -} - -TEST(translator_circuit_builder, circuit_builder_base_case) -{ - // Questions: - // 1. Do we need 68-bit limbs at all? - using Fr = ::curve::BN254::ScalarField; - using Fq = ::curve::BN254::BaseField; - // using Fq = ::curve::BN254::BaseField; - constexpr size_t NUM_LIMB_BITS = GoblinTranslatorCircuitBuilder::NUM_LIMB_BITS; + constexpr size_t NUM_Z_BITS = GoblinTranslatorCircuitBuilder::NUM_Z_BITS; + // Generate random EccOpQueue transcript values Fr op; - op = Fr(engine.get_random_uint8() & 3); - auto get_random_wide_limb = []() { return Fr(engine.get_random_uint256().slice(0, 2 * NUM_LIMB_BITS)); }; - // auto get_random_shortened_wide_limb = []() { return uint256_t(Fq::random_element()) >> (NUM_LIMB_BITS * 2); }; + switch (engine.get_random_uint8() % 6) { + case 0: + op = 0; + break; + case 1: + op = 1; + break; + case 2: + op = 2; + break; + case 3: + op = 3; + break; + case 4: + op = 4; + break; + case 5: + op = 8; + break; + } + auto get_random_z_scalar = []() { return Fr(engine.get_random_uint256().slice(0, NUM_Z_BITS)); }; + Fq p_x = Fq::random_element(); Fr p_x_lo = uint256_t(p_x).slice(0, 2 * NUM_LIMB_BITS); Fr p_x_hi = uint256_t(p_x).slice(2 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS); Fq p_y = Fq::random_element(); Fr p_y_lo = uint256_t(p_y).slice(0, 2 * NUM_LIMB_BITS); Fr p_y_hi = uint256_t(p_y).slice(2 * NUM_LIMB_BITS, 4 * NUM_LIMB_BITS); - Fr z_1 = get_random_wide_limb(); - Fr z_2 = get_random_wide_limb(); + Fr z_1 = get_random_z_scalar(); + Fr z_2 = get_random_z_scalar(); Fq v = Fq::random_element(); Fq x = Fq::random_element(); Fq previous_accumulator = Fq::random_element(); + + // Generate the witness for a single step GoblinTranslatorCircuitBuilder::AccumulationInput single_accumulation_step = generate_witness_values(op, p_x_lo, p_x_hi, p_y_lo, p_y_hi, z_1, z_2, previous_accumulator, v, x); - auto circuit_builder = GoblinTranslatorCircuitBuilder(); + // Create a circuit builder + auto circuit_builder = GoblinTranslatorCircuitBuilder(v, x); + // Submit one accumulation step in the builder circuit_builder.create_accumulation_gate(single_accumulation_step); - EXPECT_TRUE(circuit_builder.check_circuit(x, v)); + // Check if the circuit fails + EXPECT_TRUE(circuit_builder.check_circuit()); +} + +/** + * @brief Check that the circuit can handle several accumulations + * + */ +TEST(GoblinTranslatorCircuitBuilder, SeveralOperationCorrectness) +{ + using point = barretenberg::g1::affine_element; + using scalar = barretenberg::fr; + using Fq = barretenberg::fq; + + auto P1 = point::random_element(); + auto P2 = point::random_element(); + auto z = scalar::random_element(); + + // Add the same operations to the ECC op queue; the native computation is performed under the hood. + ECCOpQueue op_queue; + op_queue.add_accumulate(P1); + op_queue.mul_accumulate(P2, z); + Fq op_accumulator = 0; + Fq p_x_accumulator = 0; + Fq p_y_accumulator = 0; + Fq z_1_accumulator = 0; + Fq z_2_accumulator = 0; + Fq batching_challenge = fq::random_element(); + + op_queue.eq(); + op_queue.empty_row(); + + // Sample the evaluation input x + Fq x = Fq::random_element(); + // Get an inverse + Fq x_inv = x.invert(); + // Compute the batched evaluation of polynomials (multiplying by inverse to go from lower to higher) + for (auto& ecc_op : op_queue.raw_ops) { + op_accumulator = op_accumulator * x_inv + ecc_op.get_opcode_value(); + p_x_accumulator = p_x_accumulator * x_inv + ecc_op.base_point.x; + p_y_accumulator = p_y_accumulator * x_inv + ecc_op.base_point.y; + z_1_accumulator = z_1_accumulator * x_inv + ecc_op.z1; + z_2_accumulator = z_2_accumulator * x_inv + ecc_op.z2; + } + Fq x_pow = x.pow(op_queue.raw_ops.size() - 1); + + // Multiply by an appropriate power of x to get rid of the inverses + Fq result = ((((z_2_accumulator * batching_challenge + z_1_accumulator) * batching_challenge + p_y_accumulator) * + batching_challenge + + p_x_accumulator) * + batching_challenge + + op_accumulator) * + x_pow; + + // Create circuit builder and feed the queue inside + auto circuit_builder = GoblinTranslatorCircuitBuilder(batching_challenge, x, op_queue); + // Check that the circuit passes + EXPECT_TRUE(circuit_builder.check_circuit()); + // Check the computation result is in line with what we've computed + EXPECT_EQ(result, circuit_builder.get_computation_result()); } } // namespace proof_system \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_mini.fuzzer.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_mini.fuzzer.cpp index 7aeec7e4f011..c5366ab47a36 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_mini.fuzzer.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_translator_mini.fuzzer.cpp @@ -35,9 +35,9 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size) proof_system::GoblinTranslatorCircuitBuilder::AccumulationInput single_accumulation_step = proof_system::generate_witness_values(op, p_x_lo, p_x_hi, p_y_lo, p_y_hi, z_1, z_2, previous_accumulator, v, x); - auto circuit_builder = proof_system::GoblinTranslatorCircuitBuilder(); + auto circuit_builder = proof_system::GoblinTranslatorCircuitBuilder(v, x); circuit_builder.create_accumulation_gate(single_accumulation_step); - if (!circuit_builder.check_circuit(x, v)) { + if (!circuit_builder.check_circuit()) { return 1; } return 0; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp index 58da2674ec54..da24f8238871 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.cpp @@ -10,9 +10,6 @@ namespace proof_system { template void GoblinUltraCircuitBuilder_::finalize_circuit() { UltraCircuitBuilder_::finalize_circuit(); - - // Set internally the current and previous size of the aggregate op queue transcript - op_queue->set_size_data(); } /** diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index 76fdd6e87dd9..9149df90b85b 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -3334,7 +3334,6 @@ template bool UltraCircuitBuilder_::check_circuit() FF w_3_shifted_value; FF w_4_shifted_value; if (i < (this->num_gates - 1)) { - w_1_shifted_value = this->get_variable(w_l[i + 1]); w_2_shifted_value = this->get_variable(w_r[i + 1]); w_3_shifted_value = this->get_variable(w_o[i + 1]); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp index f97b281b6ccb..9c3f25ee6090 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.hpp @@ -1040,8 +1040,5 @@ template class UltraCircuitBuilder_ : public CircuitBuilderBase; -// TODO: template plookup to be able to be able to have UltraCircuitBuilder on Grumpkin -// extern template class UltraCircuitBuilder_; using UltraCircuitBuilder = UltraCircuitBuilder_; -// using UltraGrumpkinCircuitBuilder = UltraCircuitBuilder_; } // namespace proof_system diff --git a/barretenberg/cpp/src/barretenberg/proof_system/composer/composer_lib.hpp b/barretenberg/cpp/src/barretenberg/proof_system/composer/composer_lib.hpp index df2c83ce6f4a..8f7db1a3327d 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/composer/composer_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/composer/composer_lib.hpp @@ -38,7 +38,7 @@ void construct_selector_polynomials(const typename Flavor::CircuitBuilder& circu } // TODO(#398): Loose coupling here! Would rather build up pk from arithmetization - size_t selector_idx = 0; // TODO(#391) zip + size_t selector_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip for (auto& selector_values : circuit_constructor.selectors) { ASSERT(proving_key->circuit_size >= selector_values.size()); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp b/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp index 73750323b9a2..82ae802c1e47 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp @@ -183,7 +183,8 @@ PermutationMapping compute_permutation_mapping( PermutationMapping mapping; // Initialize the table of permutations so that every element points to itself - for (size_t i = 0; i < Flavor::NUM_WIRES; ++i) { // TODO(#391) zip and split + // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip + for (size_t i = 0; i < Flavor::NUM_WIRES; ++i) { mapping.sigmas[i].reserve(proving_key->circuit_size); if constexpr (generalized) { mapping.ids[i].reserve(proving_key->circuit_size); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/proof_system/flavor/flavor.hpp index d981da35ca82..463c1c3c01af 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/flavor/flavor.hpp @@ -227,7 +227,7 @@ template static constexpr auto if constexpr (Index >= std::tuple_size::value) { return std::tuple<>{}; // Return empty when reach end of the tuple } else { - using UnivariateTuple = typename std::tuple_element_t::RelationUnivariates; + using UnivariateTuple = typename std::tuple_element_t::TupleOfUnivariatesOverSubrelations; return std::tuple_cat(std::tuple{}, create_relation_univariates_container()); } @@ -243,8 +243,8 @@ template static constexpr auto if constexpr (Index >= std::tuple_size::value) { return std::tuple<>{}; // Return empty when reach end of the tuple } else { - using ValuesArray = typename std::tuple_element_t::RelationValues; - return std::tuple_cat(std::tuple{}, create_relation_values_container()); + using Values = typename std::tuple_element_t::ArrayOfValuesOverSubrelations; + return std::tuple_cat(std::tuple{}, create_relation_values_container()); } } @@ -253,7 +253,6 @@ template static constexpr auto // Forward declare honk flavors namespace proof_system::honk::flavor { class Ultra; -class UltraGrumpkin; class ECCVM; class ECCVMGrumpkin; class GoblinUltra; @@ -281,10 +280,10 @@ template concept IsPlonkFlavor = IsAnyOf; template -concept IsHonkFlavor = IsAnyOf; +concept IsHonkFlavor = IsAnyOf; template -concept IsUltraFlavor = IsAnyOf; +concept IsUltraFlavor = IsAnyOf; template concept IsGoblinFlavor = IsAnyOf, honk::flavor::GoblinUltraRecursive_>; -template concept IsGrumpkinFlavor = IsAnyOf; +template concept IsGrumpkinFlavor = IsAnyOf; -template concept UltraFlavor = IsAnyOf; +template concept UltraFlavor = IsAnyOf; template concept ECCVMFlavor = IsAnyOf; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp b/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp index 2bd01127b5eb..88169a75366e 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/op_queue/ecc_op_queue.hpp @@ -20,13 +20,13 @@ class ECCOpQueue { using Point = Curve::AffineElement; using Fr = Curve::ScalarField; using Fq = Curve::BaseField; // Grumpkin's scalar field - using ECCVMOperation = proof_system_eccvm::VMOperation; Point point_at_infinity = Curve::Group::affine_point_at_infinity; // The operations written to the queue are also performed natively; the result is stored in accumulator Point accumulator = point_at_infinity; public: + using ECCVMOperation = proof_system_eccvm::VMOperation; std::vector raw_ops; std::array, 4> ultra_ops; // ops encoded in the width-4 Ultra format diff --git a/barretenberg/cpp/src/barretenberg/proof_system/relations/auxiliary_relation.hpp b/barretenberg/cpp/src/barretenberg/proof_system/relations/auxiliary_relation.hpp index 05078bf638fd..a9b66cfa450c 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/relations/auxiliary_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/relations/auxiliary_relation.hpp @@ -9,17 +9,14 @@ template class AuxiliaryRelationImpl { public: using FF = FF_; - // 1 + polynomial degree of this relation - static constexpr size_t RELATION_LENGTH = 6; - - static constexpr size_t LEN_1 = 6; // auxiliary sub-relation - static constexpr size_t LEN_2 = 6; // ROM consistency sub-relation 1 - static constexpr size_t LEN_3 = 6; // ROM consistency sub-relation 2 - static constexpr size_t LEN_4 = 6; // RAM consistency sub-relation 1 - static constexpr size_t LEN_5 = 6; // RAM consistency sub-relation 2 - static constexpr size_t LEN_6 = 6; // RAM consistency sub-relation 3 - template