diff --git a/Cargo.toml b/Cargo.toml index ff9aa9c3..4b3d9cb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,30 +6,39 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ark-std = { version = "0.3.0", features = ["std"] } -ark-bls12-377 = { version = "0.3.0", features = ["r1cs", "curve"] } -ark-mnt4-753 = "0.3.0" -ark-ff = "0.3.0" +ark-ec = { path = "arkworks/algebra/ec", version = "0.3.0" } +ark-ff = { path = "arkworks/algebra/ff", version = "0.3.0" } +ark-poly = { path = "arkworks/algebra/poly", version = "0.3.0" } +ark-serialize = { path = "arkworks/algebra/serialize", version = "0.3.0" } + +ark-crypto-primitives = { path = "arkworks/crypto-primitives", version = "0.3.0", features = ["r1cs"] } + +ark-bls12-377 = { path = "arkworks/curves/bls12_377", version = "0.3.0", features = ["r1cs", "curve"] } +ark-ed-on-bls12-377 = { path = "arkworks/curves/ed_on_bls12_377", version = "0.3.0", features = ["r1cs"] } +ark-mnt4-753 = { path = "arkworks/curves/mnt4_753", version = "0.3.0" } + +ark-groth16 = { path = "arkworks/groth16", version = "0.3.0" } + +ark-marlin = { path = "arkworks/marlin", version = "0.3.0" } + +ark-poly-commit = { path = "arkworks/poly-commit", version = "0.3.0" } + +ark-r1cs-std = { path = "arkworks/r1cs-std", version = "0.3.0" } + +ark-relations = { path = "arkworks/snark/relations", version = "0.3.0" } +ark-snark = { path = "arkworks/snark/snark", version = "0.3.0" } + +ark-std = { path = "arkworks/std", version = "0.3.0", features = ["std"] } + rand = "0.8.5" rand_distr = "0.4.3" num-bigint = { version = "0.4.3", features = ["rand"] } num-integer = "0.1" -ark-groth16 = "0.3.0" -ark-snark = "0.3.0" -ark-ec = "0.3.0" -ark-relations = "0.3.0" -ark-r1cs-std = "0.3.0" -ark-crypto-primitives = { version = "0.3.0", features = ["r1cs"] } -ark-ed-on-bls12-377 = { version = "0.3.0", features = ["r1cs"] } num-traits = "0.2.14" -ark-poly = "0.3.0" structopt = "0.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -ark-serialize = "0.3.0" hex = "0.4.3" -ark-marlin = "0.3.0" -ark-poly-commit = "0.3.0" blake2 = "0.9" derivative = { version = "2.0", features = ["use_core"]} -zeroize = { version = "1", default-features = false, features = ["zeroize_derive"] } \ No newline at end of file +zeroize = { version = "1", default-features = false, features = ["zeroize_derive"] } diff --git a/arkworks/algebra/.github/.markdownlint.yml b/arkworks/algebra/.github/.markdownlint.yml new file mode 100644 index 00000000..936fc62e --- /dev/null +++ b/arkworks/algebra/.github/.markdownlint.yml @@ -0,0 +1,14 @@ +# See https://github.com/DavidAnson/markdownlint#rules--aliases for list of markdown lint codes +default: true +# MD01 lint blocks having header's incrementing by more than # at a time. +MD001: false +MD007: { indent: 4 } +# MD013 blocks long lines +MD013: false +MD024: { siblings_only: true } +MD025: false +# MD033 lint blocks HTML in MD +MD033: false +# MD036 no-emphasis-as-heading +MD036: false +MD041: false diff --git a/arkworks/algebra/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/algebra/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/algebra/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/algebra/.github/dependabot.yml b/arkworks/algebra/.github/dependabot.yml new file mode 100644 index 00000000..4e480b5a --- /dev/null +++ b/arkworks/algebra/.github/dependabot.yml @@ -0,0 +1,17 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 + ignore: + - dependency-name: hashbrown + versions: + - 0.11.0 + - dependency-name: rand + versions: + - 0.8.0 + - dependency-name: rand_xorshift + versions: + - 0.3.0 diff --git a/arkworks/algebra/.github/workflows/ci.yml b/arkworks/algebra/.github/workflows/ci.yml new file mode 100644 index 00000000..0c499576 --- /dev/null +++ b/arkworks/algebra/.github/workflows/ci.yml @@ -0,0 +1,210 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + docs: + name: Check Documentation + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo doc --all --no-deps --document-private-items --all-features + uses: actions-rs/cargo@v1 + with: + command: doc + args: --all --no-deps --document-private-items --all-features + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --workspace --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: "--workspace \ + --all-features \ + --exclude ark-poly-benches \ + --exclude ark-algebra-test-templates" + + - name: Test assembly on nightly + env: + RUSTFLAGS: -C target-cpu=native + uses: actions-rs/cargo@v1 + with: + command: test + args: "--workspace \ + --package ark-test-curves \ + --all-features" + if: matrix.rust == 'nightly' + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: thumbv6m-none-eabi + override: true + + - name: Install Rust ARM64 (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: check + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --workspace --exclude ark-algebra-test-templates --exclude ark-poly-benches --target thumbv6m-none-eabi + + - name: build + uses: actions-rs/cargo@v1 + with: + command: build + args: --workspace --exclude ark-algebra-test-templates --exclude ark-poly-benches --target thumbv6m-none-eabi + + test_against_curves: + name: Test against curves + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + curve: + - bls12_377 + - bls12_381 + - bn254 + - pallas + - bw6_761 + - mnt4_298 + - mnt6_298 + - ed_on_bls12_377 + steps: + - name: Checkout curves + uses: actions/checkout@v2 + with: + repository: arkworks-rs/curves + + - name: Checkout algebra + uses: actions/checkout@v2 + with: + repository: arkworks-rs/algebra + path: algebra + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Patch cargo.toml + run: | + echo >> Cargo.toml + echo "[patch.crates-io]" >> Cargo.toml + echo "ark-ff = { path = 'algebra/ff' }" >> Cargo.toml + echo "ark-serialize = { path = 'algebra/serialize' }" >> Cargo.toml + echo "ark-ff-macros = { path = 'algebra/ff-macros' }" >> Cargo.toml + echo "ark-ff-asm = { path = 'algebra/ff-asm' }" >> Cargo.toml + echo "ark-ec = { path = 'algebra/ec' }" >> Cargo.toml + echo "ark-algebra-test-templates = { path = 'algebra/test-templates' }" >> Cargo.toml + + - name: Test on ${{ matrix.curve }} + run: "cd ${{ matrix.curve }} && cargo test --all-features" diff --git a/arkworks/algebra/.github/workflows/linkify_changelog.yml b/arkworks/algebra/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..8f3086e0 --- /dev/null +++ b/arkworks/algebra/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push diff --git a/arkworks/algebra/.github/workflows/mdlinter.yml b/arkworks/algebra/.github/workflows/mdlinter.yml new file mode 100644 index 00000000..223621f3 --- /dev/null +++ b/arkworks/algebra/.github/workflows/mdlinter.yml @@ -0,0 +1,35 @@ + +name: Lint +on: + push: + branches: + - master + paths: + - "**.md" + pull_request: + paths: + - "**.md" + +jobs: + build: + name: Markdown linter + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Lint Code Base + uses: docker://github/super-linter:latest + env: + LINTER_RULES_PATH: .github + VALIDATE_ALL_CODEBASE: true + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + VALIDATE_MD: true + MARKDOWN_CONFIG_FILE: .markdownlint.yml + VALIDATE_PROTOBUF: false + VALIDATE_JSCPD: false + # use Python Pylint as the only linter to avoid conflicts + VALIDATE_PYTHON_BLACK: false + VALIDATE_PYTHON_FLAKE8: false + VALIDATE_PYTHON_ISORT: false + VALIDATE_PYTHON_MYPY: false diff --git a/arkworks/algebra/.gitignore b/arkworks/algebra/.gitignore new file mode 100644 index 00000000..9b5e101e --- /dev/null +++ b/arkworks/algebra/.gitignore @@ -0,0 +1,11 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo diff --git a/arkworks/algebra/.hooks/pre-commit b/arkworks/algebra/.hooks/pre-commit new file mode 100755 index 00000000..23a86cd8 --- /dev/null +++ b/arkworks/algebra/.hooks/pre-commit @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +if rustfmt --version &>/dev/null; then + printf "[pre_commit] \033[0;31merror\033[0m: \"rustfmt\" not available. \n" + printf "[pre_commit] \033[0;31merror\033[0m: rustfmt can be installed via - \n" + printf "[pre_commit] $ rustup component add rustfmt \n" + exit 1 +fi + +problem_files=() + +# collect ill-formatted files +for file in $(git diff --name-only --cached); do + if [ "${file: -3}" == ".rs" ]; then + if rustfmt +stable --check "$file" &>/dev/null; then + problem_files+=("$file") + fi + fi +done + +if [ ${#problem_files[@]} == 0 ]; then + # done + printf "[pre_commit] rustfmt \033[0;32mok\033[0m \n" +else + # reformat the files that need it and re-stage them. + printf "[pre_commit] the following files were rustfmt'd before commit: \n" + for file in "${problem_files[@]}"; do + rustfmt +stable "$file" + git add "$file" + printf "\033[0;32m %s\033[0m \n" "$file" + done +fi + +exit 0 diff --git a/arkworks/algebra/CHANGELOG.md b/arkworks/algebra/CHANGELOG.md new file mode 100644 index 00000000..5dc0fc7a --- /dev/null +++ b/arkworks/algebra/CHANGELOG.md @@ -0,0 +1,149 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +- [\#285](https://github.com/arkworks-rs/algebra/pull/285) (ark-ec) Remove `ATE_LOOP_COUNT_IS_NEGATIVE` from BN curve parameter trait. +- [\#292](https://github.com/arkworks-rs/algebra/pull/292) (ark-ec) Remove `CycleEngine`. +- [\#293](https://github.com/arkworks-rs/algebra/pull/293) (ark-ff) Remove `ark_ff::test_rng`. + +### Features + +- [\#230](https://github.com/arkworks-rs/algebra/pull/230) (ark-ec) Add `wnaf_mul` implementation for `ProjectiveCurve`. +- [\#245](https://github.com/arkworks-rs/algebra/pull/245) (ark-poly) Speedup the sequential and parallel radix-2 FFT and IFFT significantly by making the method in which it accesses roots more cache-friendly. +- [\#258](https://github.com/arkworks-rs/algebra/pull/258) (ark-poly) Add `Mul` implementation for `DensePolynomial`. +- [\#259](https://github.com/arkworks-rs/algebra/pull/259) (ark-poly) Add `Mul` implementation for `SparsePolynomial` and `Add>/Sub>` for `DensePolynomial`. +- [\#261](https://github.com/arkworks-rs/algebra/pull/261) (ark-ff) Add support for 448-bit integers and fields. +- [\#263](https://github.com/arkworks-rs/algebra/pull/263) (ark-ff) Add `From` implementations to fields. +- [\#265](https://github.com/arkworks-rs/algebra/pull/265) (ark-serialize) Add hashing as an extension trait of `CanonicalSerialize`. +- [\#280](https://github.com/arkworks-rs/algebra/pull/280) (ark-ff) Add `Into` and `From` implementations to `BigInteger` and `PrimeField`. +- [\#289](https://github.com/arkworks-rs/algebra/pull/289) (ark-ec) Add `Sum` implementation for all `AffineCurve`. + +### Improvements + +- [\#279](https://github.com/arkworks-rs/algebra/pull/279) (ark-ec) Parallelize miller loop operations for BLS12. + +### Bug fixes + +- [\#252](https://github.com/arkworks-rs/algebra/pull/252) (ark-ff) Fix prime field sampling when `REPR_SHIFT_BITS` is 64. +- [\#284](https://github.com/arkworks-rs/algebra/pull/284) (ark-poly-benches) Fix the panic `subgroup_fft_in_place` benchmark for MNT6-753's Fr. + +## v0.2.0 + +The main features of this release are: + +- Adding the ability to define fields with integer parameters +- Multi-variate polynomial support +- Multilinear polynomial support +- Many speedups to operations involving polynomials +- Some speedups to `sqrt` +- Small speedups to MSMs +- Big speedups to radix-2 FFTs +- Fix in the assembly arithmetic backend +- Adding new traits for basic curve cycles and pairing based curve cycles + +### Breaking changes + +- [\#20](https://github.com/arkworks-rs/algebra/pull/20) (ark-poly) Move univariate DensePolynomial and SparsePolynomial into a + univariate sub-crate. Make this change by: + find w/ regex `ark_poly::(Dense|Sparse)Polynomial`, and replace with `ark_poly::univariate::$1Polynomial`. +- [\#36](https://github.com/arkworks-rs/algebra/pull/36) (ark-ec) In Short-Weierstrass curves, include an infinity bit in `ToConstraintField`. +- [\#37](https://github.com/arkworks-rs/algebra/pull/37) (ark-poly) In the `Polynomial` trait, add `Hash` trait bound to `Point`. +- [\#38](https://github.com/arkworks-rs/algebra/pull/38) (ark-poly) Add `Add` and `Neg` trait bounds to `Polynomial`. +- [\#51](https://github.com/arkworks-rs/algebra/pull/51) (ark-ff) Removed `unitary_inverse` from `QuadExtField`. Make this change by + replacing `x.unitary_inverse()` with `let mut tmp = x.clone(); tmp.conjugate()`. +- [\#53](https://github.com/arkworks-rs/algebra/pull/53) (ark-poly) Add `Zero` trait bound to `Polynomial`. +- [\#96](https://github.com/arkworks-rs/algebra/pull/96) (ark-ff) Make the `field_new` macro accept values in integer form, without requiring decomposition into limbs, and without requiring encoding in Montgomery form. +- [\#106](https://github.com/arkworks-rs/algebra/pull/106) (ark-ff, ark-ec) Add `Zeroize` trait bound to `Field, ProjectiveGroup, AffineGroup` traits. +- [\#108](https://github.com/arkworks-rs/algebra/pull/108) (ark-ff) Add `extension_degree()` method to `Field`. +- [\#110](https://github.com/arkworks-rs/algebra/pull/110) (ark-ec) Change the trait bound on the scalar for `mul`, from (essentially) `Into` to `AsRef<[u64]>`. +- [\#117](https://github.com/arkworks-rs/algebra/pull/117) (ark-poly) Make the univariate `SparsePolynomial` implement `Polynomial`. Make this change + by replacing `sparse_poly.evaluate(pt)` to `sparse_poly.evaluate(&pt)`. +- [\#129](https://github.com/arkworks-rs/algebra/pull/129) (ark-ff) Move `ark_ff::{UniformRand, test_rng}` to `ark_std::{UniformRand, test_rng}`. + Importing these from `ark-ff` is still possible, but is deprecated and will be removed in the following release. +- [\#144](https://github.com/arkworks-rs/algebra/pull/144) (ark-poly) Add `CanonicalSerialize` and `CanonicalDeserialize` trait bounds for `Polynomial`. +- [\#160](https://github.com/arkworks-rs/algebra/pull/160) (ark-serialize, ark-ff, ark-ec) + - Remove `ConstantSerializedSize`; users should use `serialized_size*` (see next). + - Add `serialized_size_with_flags` method to `CanonicalSerializeWithFlags`. + - Change `from_random_bytes_with_flags` to output `ark_serialize::Flags`. + - Change signatures of `Flags::from_u8*` to output `Option`. + - Change `Flags::from_u8*` to be more strict about the inputs they accept: + if the top bits of the `u8` value do *not* correspond to one of the possible outputs of `Flags::u8_bitmask`, then these methods output `None`, whereas before they output + a default value. + Downstream users other than `ark-curves` should not see breakage unless they rely on these methods/traits explicitly. +- [\#165](https://github.com/arkworks-rs/algebra/pull/165) (ark-ff) Add `from_base_field_elements` as a method to the `Field` trait. +- [\#166](https://github.com/arkworks-rs/algebra/pull/166) (ark-ff) Change `BigInt::{from_bytes, to_bits}` to `from_bytes_le, from_bytes_be, to_bits_le, to_bits_be`. + +### Features + +- [\#20](https://github.com/arkworks-rs/algebra/pull/20) (ark-poly) Add structs/traits for multivariate polynomials. +- [\#96](https://github.com/arkworks-rs/algebra/pull/96) (ark-ff) Make the `field_new` macro accept values in integer form, without requiring decomposition into limbs, and without requiring encoding in Montgomery form. +- [\#106](https://github.com/arkworks-rs/algebra/pull/106) (ark-ff, ark-ec) Add `Zeroize` trait bound to `Field, ProjectiveGroup, AffineGroup` traits. +- [\#117](https://github.com/arkworks-rs/algebra/pull/117) (ark-poly) Add operations to `SparsePolynomial`, so it implements `Polynomial`. +- [\#140](https://github.com/arkworks-rs/algebra/pull/140) (ark-poly) Add support for multilinear extensions in dense and sparse evaluation form. +- [\#164](https://github.com/arkworks-rs/algebra/pull/164) (ark-ff) Add methods `from_{be, le}_bytes_mod_order` to the `PrimeField` trait. +- [\#197](https://github.com/arkworks-rs/algebra/pull/197) (ark-test-curves) Add a BN384 curve with low two-arity for mixed-radix testing. + +### Improvements + +- [\#22](https://github.com/arkworks-rs/algebra/pull/22) (ark-ec) Speedup fixed-base MSMs. +- [\#28](https://github.com/arkworks-rs/algebra/pull/28) (ark-poly) Add `domain()` method on the `evaluations` struct. +- [\#31](https://github.com/arkworks-rs/algebra/pull/31) (ark-ec) Speedup point doubling on twisted edwards curves. +- [\#35](https://github.com/arkworks-rs/algebra/pull/35) (ark-ff) Implement `ToConstraintField` for `bool`. +- [\#48](https://github.com/arkworks-rs/algebra/pull/48) (ark-ff) Speedup `sqrt` on `QuadExtField`. +- [\#94](https://github.com/arkworks-rs/algebra/pull/94) (ark-ff) Implement `ToBytes` and `FromBytes` for `u128`. +- [\#99](https://github.com/arkworks-rs/algebra/pull/99) (ark-poly) Speedup `evaluate_all_lagrange_coefficients`. +- [\#100](https://github.com/arkworks-rs/algebra/pull/100) (ark-ff) Implement `batch_inverse_and_mul`. +- [\#101](https://github.com/arkworks-rs/algebra/pull/101) (ark-ff) Add `element(i: usize)` on the `Domain` trait. +- [\#107](https://github.com/arkworks-rs/algebra/pull/107) (ark-serialize) Add an impl of `CanonicalSerialize/Deserialize` for `BTreeSet`. +- [\#114](https://github.com/arkworks-rs/algebra/pull/114) (ark-poly) Significantly speedup and reduce memory usage of `DensePolynomial.evaluate`. +- [\#114](https://github.com/arkworks-rs/algebra/pull/114), #119 (ark-poly) Add infrastructure for benchmarking `DensePolynomial` operations. +- [\#115](https://github.com/arkworks-rs/algebra/pull/115) (ark-poly) Add parallel implementation to operations on `Evaluations`. +- [\#115](https://github.com/arkworks-rs/algebra/pull/115) (ark-ff) Add parallel implementation of `batch_inversion`. +- [\#122](https://github.com/arkworks-rs/algebra/pull/122) (ark-poly) Add infrastructure for benchmarking `FFT`s. +- [\#125](https://github.com/arkworks-rs/algebra/pull/125) (ark-poly) Add parallelization to applying coset shifts within `coset_fft`. +- [\#126](https://github.com/arkworks-rs/algebra/pull/126) (ark-ec) Use `ark_ff::batch_inversion` for point normalization. +- [\#131](https://github.com/arkworks-rs/algebra/pull/131), #137 (ark-ff) Speedup `sqrt` on fields when a square root exists. (And slows it down when doesn't exist.) +- [\#141](https://github.com/arkworks-rs/algebra/pull/141) (ark-ff) Add `Fp64`. +- [\#144](https://github.com/arkworks-rs/algebra/pull/144) (ark-poly) Add serialization for polynomials and evaluations. +- [\#149](https://github.com/arkworks-rs/algebra/pull/149) (ark-serialize) Add an impl of `CanonicalSerialize/Deserialize` for `String`. +- [\#153](https://github.com/arkworks-rs/algebra/pull/153) (ark-serialize) Add an impl of `CanonicalSerialize/Deserialize` for `Rc`. +- [\#157](https://github.com/arkworks-rs/algebra/pull/157) (ark-ec) Speed up `variable_base_msm` by not relying on unnecessary normalization. +- [\#158](https://github.com/arkworks-rs/algebra/pull/158) (ark-serialize) Add an impl of `CanonicalSerialize/Deserialize` for `()`. +- [\#166](https://github.com/arkworks-rs/algebra/pull/166) (ark-ff) Add a `to_bytes_be()` and `to_bytes_le` methods to `BigInt`. +- [\#169](https://github.com/arkworks-rs/algebra/pull/169) (ark-poly) Improve radix-2 FFTs by moving to a faster algorithm by Riad S. Wahby. +- [\#171](https://github.com/arkworks-rs/algebra/pull/171), #173, #176 (ark-poly) Apply significant further speedups to the new radix-2 FFT. +- [\#188](https://github.com/arkworks-rs/algebra/pull/188) (ark-ec) Make Short Weierstrass random sampling result in an element with unknown discrete log. +- [\#190](https://github.com/arkworks-rs/algebra/pull/190) (ark-ec) Add curve cycle trait and extended pairing cycle trait for all types of ec cycles. +- [\#201](https://github.com/arkworks-rs/algebra/pull/201) (ark-ec, ark-ff, ark-test-curves, ark-test-templates) Remove the dependency on `rand_xorshift`. +- [\#205](https://github.com/arkworks-rs/algebra/pull/205) (ark-ec, ark-ff) Unroll loops and conditionally use intrinsics in `biginteger` arithmetic, and reduce copies in `ff` and `ec` arithmetic. +- [\#207](https://github.com/arkworks-rs/algebra/pull/207) (ark-ff) Improve performance of extension fields when the non-residue is negative. (Improves fq2, fq12, and g2 speed on bls12 and bn curves.) +- [\#211](https://github.com/arkworks-rs/algebra/pull/211) (ark-ec) Improve performance of BLS12 final exponentiation. +- [\#214](https://github.com/arkworks-rs/algebra/pull/214) (ark-poly) Utilise a more efficient way of evaluating a polynomial at a single point. +- [\#242](https://github.com/arkworks-rs/algebra/pull/242), [\#244][https://github.com/arkworks-rs/algebra/pull/244] (ark-poly) Speedup the sequential radix-2 FFT significantly by making the method in which it accesses roots more cache-friendly. + +### Bug fixes + +- [\#36](https://github.com/arkworks-rs/algebra/pull/36) (ark-ec) In Short-Weierstrass curves, include an infinity bit in `ToConstraintField`. +- [\#107](https://github.com/arkworks-rs/algebra/pull/107) (ark-serialize) Fix handling of `(de)serialize_uncompressed/unchecked` in various impls of `CanonicalSerialize/Deserialize`. +- [\#112](https://github.com/arkworks-rs/algebra/pull/112) (ark-serialize) Make `bool`s checked serialization methods non-malleable. +- [\#119](https://github.com/arkworks-rs/algebra/pull/119) (ark-poly) Fix bugs in degree calculation if adding/subtracting same degree polynomials + whose leading coefficients cancel. +- [\#160](https://github.com/arkworks-rs/algebra/pull/160) (ark-serialize, ark-ff, ark-ec) Support serializing when `MODULUS_BITS + FLAG_BITS` is greater than the multiple of 8 just greater than `MODULUS_BITS`, which is the case for the Pasta curves (fixes #47). +- [\#165](https://github.com/arkworks-rs/algebra/pull/165) (ark-ff) Enforce in the type system that an extension fields `BaseField` extends from the correct `BasePrimeField`. +- [\#184](https://github.com/arkworks-rs/algebra/pull/184) Compile with `panic='abort'` in release mode, for safety of the library across FFI boundaries. +- [\#192](https://github.com/arkworks-rs/algebra/pull/192) Fix a bug in the assembly backend for finite field arithmetic. +- [\#217](https://github.com/arkworks-rs/algebra/pull/217) (ark-ec) Fix the definition of `PairingFriendlyCycle` introduced in #190. + +## v0.1.0 (Initial release of arkworks/algebra) diff --git a/arkworks/algebra/Cargo.toml b/arkworks/algebra/Cargo.toml new file mode 100644 index 00000000..b90f4199 --- /dev/null +++ b/arkworks/algebra/Cargo.toml @@ -0,0 +1,42 @@ +[workspace] + +members = [ + "serialize", + "serialize-derive", + + "ff-macros", + "ff-asm", + "ff", + + "ec", + + "poly", + "poly-benches", + "test-curves", + "test-templates", +] + +[profile.release] +opt-level = 3 +lto = "thin" +incremental = true +panic = 'abort' + +[profile.bench] +opt-level = 3 +debug = false +rpath = false +lto = "thin" +incremental = true +debug-assertions = false + +[profile.dev] +opt-level = 0 +panic = 'abort' + +[profile.test] +opt-level = 3 +lto = "thin" +incremental = true +debug-assertions = true +debug = true diff --git a/arkworks/algebra/LICENSE-APACHE b/arkworks/algebra/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/algebra/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/algebra/LICENSE-MIT b/arkworks/algebra/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/algebra/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/algebra/README.md b/arkworks/algebra/README.md new file mode 100644 index 00000000..7fefcafe --- /dev/null +++ b/arkworks/algebra/README.md @@ -0,0 +1,94 @@ +

arkworks::algebra

+ +

+ + + + +

+ +The arkworks ecosystem consist of Rust libraries for designing and working with __zero knowledge succinct non-interactive arguments (zkSNARKs)__. This repository contains efficient implementations some of the key algebraic components underlying zkSNARKs: finite fields, elliptic curves, and polynomials. + +This library is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic proof-of-concept prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Directory structure + +This repository contains several Rust crates: + +* [`ark-ff`](ff): Provides generic implementations of various finite fields +* [`ark-ec`](ec): Provides generic implementations for different kinds of elliptic curves, along with pairings over these +* [`ark-poly`](poly): Implements univariate, multivariate, and multilinear polynomials, and FFTs over finite fields. +* [`ark-serialize`](serialize): Provides efficient serialization and point compression for finite fields and elliptic curves + +In addition, the [`curves`](https://github.com/arkworks-rs/curves) repository contains implementations of popular elliptic curves; see [here](https://github.com/arkworks-rs/curves/README.md) for details. + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler (v 1.51+). To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: + +```bash +rustup install stable +``` + +After that, use `cargo`, the standard Rust build tool, to build the libraries: + +```bash +git clone https://github.com/arkworks-rs/algebra.git +cd algebra +cargo build --release +``` + +## Tests + +This library comes with comprehensive unit and integration tests for each of the provided crates. Run the tests with: + +```bash +cargo test --all +``` + +## Benchmarks + +To run the benchmarks, install the nightly Rust toolchain, via `rustup install nightly`, and then run the following command: + +```bash +cargo +nightly bench +``` + +## Assembly backend for field arithmetic + +The `ark-ff` crate contains (off-by-default) optimized assembly implementations of field arithmetic that rely on the `adcxq`, `adoxq` and `mulxq` instructions. These are available on most `x86_64` platforms (Broadwell onwards for Intel and Ryzen onwards for AMD). Using this backend can lead to a 30-70% speedup in finite field and elliptic curve arithmetic. To build with this backend enabled, run the following command: + +```bash +RUSTFLAGS="-C target-feature=+bmi2,+adx" cargo +nightly test/build/bench --features asm +``` + +To enable this in the `Cargo.toml` of your own projects, enable the `asm` feature flag: + +```toml +ark-ff = { version = "0.1", features = [ "asm" ] } +``` + +Note that because inline assembly support in Rust is currently unstable, using this backend requires using the Nightly compiler at the moment. + +## License + +The crates in this repo are licensed under either of the following licenses, at your discretion. + +* Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or [apache.org license link](http://www.apache.org/licenses/LICENSE-2.0)) +* MIT license ([LICENSE-MIT](LICENSE-MIT) or [opensource.org license link](http://opensource.org/licenses/MIT)) + +Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +[zexe]: https://ia.cr/2018/962 + +## Acknowledgements + +This work was supported by: +a Google Faculty Award; +the National Science Foundation; +the UC Berkeley Center for Long-Term Cybersecurity; +and donations from the Ethereum Foundation, the Interchain Foundation, and Qtum. + +An earlier version of this library was developed as part of the paper *"[ZEXE: Enabling Decentralized Private Computation][zexe]"*. diff --git a/arkworks/algebra/ec/Cargo.toml b/arkworks/algebra/ec/Cargo.toml new file mode 100644 index 00000000..aeeba307 --- /dev/null +++ b/arkworks/algebra/ec/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "ark-ec" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for elliptic curves and pairings" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ec/" +keywords = ["cryptography", "elliptic-curves", "pairing"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-std = { path = "../../std", version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", path = "../serialize", default-features = false } +ark-ff = { version = "^0.3.0", path = "../ff", default-features = false } +derivative = { version = "2", features = ["use_core"] } +num-traits = { version = "0.2", default-features = false } +rayon = { version = "1", optional = true } +zeroize = { version = "1", default-features = false, features = ["zeroize_derive"] } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-serialize/std" ] +parallel = [ "std", "rayon", "ark-std/parallel" ] diff --git a/arkworks/algebra/ec/LICENSE-APACHE b/arkworks/algebra/ec/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/ec/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/ec/LICENSE-MIT b/arkworks/algebra/ec/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/ec/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/ec/README.md b/arkworks/algebra/ec/README.md new file mode 100644 index 00000000..7f729db7 --- /dev/null +++ b/arkworks/algebra/ec/README.md @@ -0,0 +1,23 @@ +

ark-ec

+

+ + + + +

+ +This crate defines Elliptic Curve traits, curve models that follow these traits, and multi-scalar multiplications. +Implementations of particular curves using these curve models can be found in [`arkworks-rs/curves`](https://github.com/arkworks-rs/curves/README.md). + +The available elliptic curve traits are: + +* [`AffineCurve`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L223) - Interface for elliptic curve points in the 'canonical form' for serialization. +* [`ProjectiveCurve`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L118) - Interface for elliptic curve points in a representation that is more efficient for most computation. +* [`PairingEngine`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L41) - Pairing friendly elliptic curves (Contains the pairing function, and acts as a wrapper type on G1, G2, GT, and the relevant fields). +* [`CurveCycle`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L319) - Trait representing a cycle of elliptic curves. +* [`PairingFriendlyCycle`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L331) - Trait representing a cycle of pairing friendly elliptic curves. + +The elliptic curve models implemented are: + +* [*Short Weierstrass*](https://github.com/arkworks-rs/algebra/blob/master/ec/src/models/short_weierstrass_jacobian.rs) curves. The `AffineCurve` in this case is in typical Short Weierstrass point representation, and the `ProjectiveCurve` is using points in [Jacobian Coordinates](https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates). +* [*Twisted Edwards*](https://github.com/arkworks-rs/algebra/blob/master/ec/src/models/twisted_edwards_extended.rs) curves. The `AffineCurve` in this case is in standard Twisted Edwards curve representation, whereas the `ProjectiveCurve` uses points in [Extended Twisted Edwards Coordinates](https://eprint.iacr.org/2008/522.pdf). diff --git a/arkworks/algebra/ec/src/group.rs b/arkworks/algebra/ec/src/group.rs new file mode 100644 index 00000000..a8cf7b7a --- /dev/null +++ b/arkworks/algebra/ec/src/group.rs @@ -0,0 +1,57 @@ +use core::{ + fmt::{Debug, Display}, + hash::Hash, + ops::{Add, AddAssign, MulAssign, Neg, Sub, SubAssign}, +}; +use num_traits::Zero; + +use ark_ff::{ + bytes::{FromBytes, ToBytes}, + fields::PrimeField, + UniformRand, +}; + +pub trait Group: + ToBytes + + 'static + + FromBytes + + Copy + + Clone + + Debug + + Display + + Default + + Send + + Sync + + Eq + + Hash + + Neg + + UniformRand + + Zero + + Add + + Sub + + AddAssign + + SubAssign + + MulAssign<::ScalarField> + + for<'a> Add<&'a Self, Output = Self> + + for<'a> Sub<&'a Self, Output = Self> + + for<'a> AddAssign<&'a Self> + + for<'a> SubAssign<&'a Self> + + core::iter::Sum + + for<'a> core::iter::Sum<&'a Self> +{ + type ScalarField: PrimeField; + + /// Returns `self + self`. + #[must_use] + fn double(&self) -> Self; + + /// Sets `self := self + self`. + fn double_in_place(&mut self) -> &mut Self; + + #[must_use] + fn mul<'a>(&self, other: &'a Self::ScalarField) -> Self { + let mut copy = *self; + copy *= *other; + copy + } +} diff --git a/arkworks/algebra/ec/src/lib.rs b/arkworks/algebra/ec/src/lib.rs new file mode 100644 index 00000000..759257db --- /dev/null +++ b/arkworks/algebra/ec/src/lib.rs @@ -0,0 +1,353 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] +#![forbid(unsafe_code)] +#![allow( + clippy::op_ref, + clippy::suspicious_op_assign_impl, + clippy::many_single_char_names +)] + +#[macro_use] +extern crate derivative; + +#[macro_use] +extern crate ark_std; + +use crate::group::Group; +use ark_ff::{ + bytes::{FromBytes, ToBytes}, + fields::{Field, PrimeField, SquareRootField}, + UniformRand, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{ + fmt::{Debug, Display}, + hash::Hash, + ops::{Add, AddAssign, MulAssign, Neg, Sub, SubAssign}, + vec::Vec, +}; +use num_traits::Zero; +use zeroize::Zeroize; + +pub mod models; +pub use self::models::*; + +pub mod group; + +pub mod msm; + +pub mod wnaf; + +pub trait PairingEngine: Sized + 'static + Copy + Debug + Sync + Send + Eq + PartialEq { + /// This is the scalar field of the G1/G2 groups. + type Fr: PrimeField + SquareRootField; + + /// The projective representation of an element in G1. + type G1Projective: ProjectiveCurve + + From + + Into + + MulAssign; // needed due to https://github.com/rust-lang/rust/issues/69640 + + /// The affine representation of an element in G1. + type G1Affine: AffineCurve + + From + + Into + + Into; + + /// A G1 element that has been preprocessed for use in a pairing. + type G1Prepared: ToBytes + Default + Clone + Send + Sync + Debug + From; + + /// The projective representation of an element in G2. + type G2Projective: ProjectiveCurve + + From + + Into + + MulAssign; // needed due to https://github.com/rust-lang/rust/issues/69640 + + /// The affine representation of an element in G2. + type G2Affine: AffineCurve + + From + + Into + + Into; + + /// A G2 element that has been preprocessed for use in a pairing. + type G2Prepared: ToBytes + Default + Clone + Send + Sync + Debug + From; + + /// The base field that hosts G1. + type Fq: PrimeField + SquareRootField; + + /// The extension field that hosts G2. + type Fqe: SquareRootField; + + /// The extension field that hosts the target group of the pairing. + type Fqk: Field; + + /// Compute the product of miller loops for some number of (G1, G2) pairs. + #[must_use] + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator; + + /// Perform final exponentiation of the result of a miller loop. + #[must_use] + fn final_exponentiation(_: &Self::Fqk) -> Option; + + /// Computes a product of pairings. + #[must_use] + fn product_of_pairings<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + Self::final_exponentiation(&Self::miller_loop(i)).unwrap() + } + + /// Performs multiple pairing operations + #[must_use] + fn pairing(p: G1, q: G2) -> Self::Fqk + where + G1: Into, + G2: Into, + { + let g1_prep = Self::G1Prepared::from(p.into()); + let g2_prep = Self::G2Prepared::from(q.into()); + Self::product_of_pairings(core::iter::once(&(g1_prep, g2_prep))) + } +} + +/// Projective representation of an elliptic curve point guaranteed to be +/// in the correct prime order subgroup. +pub trait ProjectiveCurve: + Eq + + 'static + + Sized + + ToBytes + + FromBytes + + CanonicalSerialize + + CanonicalDeserialize + + Copy + + Clone + + Default + + Send + + Sync + + Hash + + Debug + + Display + + UniformRand + + Zeroize + + Zero + + Neg + + Add + + Sub + + AddAssign + + SubAssign + + MulAssign<::ScalarField> + + for<'a> Add<&'a Self, Output = Self> + + for<'a> Sub<&'a Self, Output = Self> + + for<'a> AddAssign<&'a Self> + + for<'a> SubAssign<&'a Self> + + core::iter::Sum + + for<'a> core::iter::Sum<&'a Self> + + From<::Affine> +{ + const COFACTOR: &'static [u64]; + type ScalarField: PrimeField + SquareRootField; + type BaseField: Field; + type Affine: AffineCurve + + From + + Into; + + /// Returns a fixed generator of unknown exponent. + #[must_use] + fn prime_subgroup_generator() -> Self; + + /// Normalizes a slice of projective elements so that + /// conversion to affine is cheap. + fn batch_normalization(v: &mut [Self]); + + /// Normalizes a slice of projective elements and outputs a vector + /// containing the affine equivalents. + fn batch_normalization_into_affine(v: &[Self]) -> Vec { + let mut v = v.to_vec(); + Self::batch_normalization(&mut v); + v.into_iter().map(|v| v.into()).collect() + } + + /// Checks if the point is already "normalized" so that + /// cheap affine conversion is possible. + #[must_use] + fn is_normalized(&self) -> bool; + + /// Doubles this element. + #[must_use] + fn double(&self) -> Self { + let mut copy = *self; + copy.double_in_place(); + copy + } + + /// Doubles this element in place. + fn double_in_place(&mut self) -> &mut Self; + + /// Converts self into the affine representation. + fn into_affine(&self) -> Self::Affine { + (*self).into() + } + + /// Set `self` to be `self + other`, where `other: Self::Affine`. + /// This is usually faster than adding `other` in projective form. + fn add_mixed(mut self, other: &Self::Affine) -> Self { + self.add_assign_mixed(other); + self + } + + /// Set `self` to be `self + other`, where `other: Self::Affine`. + /// This is usually faster than adding `other` in projective form. + fn add_assign_mixed(&mut self, other: &Self::Affine); + + /// Performs scalar multiplication of this element. + fn mul>(mut self, other: S) -> Self { + let mut res = Self::zero(); + for b in ark_ff::BitIteratorBE::without_leading_zeros(other) { + res.double_in_place(); + if b { + res += self; + } + } + + self = res; + self + } + + fn scalar_mul(&self, other: &Self::ScalarField) -> Self { + self.mul(other) + } +} + +/// Affine representation of an elliptic curve point guaranteed to be +/// in the correct prime order subgroup. +pub trait AffineCurve: + Eq + + 'static + + Sized + + ToBytes + + FromBytes + + CanonicalSerialize + + CanonicalDeserialize + + Copy + + Clone + + Default + + Send + + Sync + + Hash + + Debug + + Display + + Zero + + Neg + + Zeroize + + core::iter::Sum + + for<'a> core::iter::Sum<&'a Self> + + From<::Projective> +{ + const COFACTOR: &'static [u64]; + type ScalarField: PrimeField + SquareRootField + Into<::BigInt>; + type BaseField: Field; + type Projective: ProjectiveCurve + + From + + Into + + MulAssign; // needed due to https://github.com/rust-lang/rust/issues/69640 + + /// Returns a fixed generator of unknown exponent. + #[must_use] + fn prime_subgroup_generator() -> Self; + + /// Converts self into the projective representation. + fn into_projective(&self) -> Self::Projective { + (*self).into() + } + + /// Returns a group element if the set of bytes forms a valid group element, + /// otherwise returns None. This function is primarily intended for sampling + /// random group elements from a hash-function or RNG output. + fn from_random_bytes(bytes: &[u8]) -> Option; + + /// Performs scalar multiplication of this element with mixed addition. + #[must_use] + fn mul::BigInt>>(&self, other: S) + -> Self::Projective; + + /// Multiply this element by the cofactor and output the + /// resulting projective element. + #[must_use] + fn mul_by_cofactor_to_projective(&self) -> Self::Projective; + + /// Multiply this element by the cofactor. + #[must_use] + fn mul_by_cofactor(&self) -> Self { + self.mul_by_cofactor_to_projective().into() + } + + /// Multiply this element by the inverse of the cofactor in + /// `Self::ScalarField`. + #[must_use] + fn mul_by_cofactor_inv(&self) -> Self; + + fn scalar_mul>(&self, other: S) -> Self::Projective { + self.mul(other.into().into_repr()) + } +} + +impl Group for C { + type ScalarField = C::ScalarField; + + #[inline] + #[must_use] + fn double(&self) -> Self { + let mut tmp = *self; + tmp += self; + tmp + } + + #[inline] + fn double_in_place(&mut self) -> &mut Self { + ::double_in_place(self) + } +} + +/// Preprocess a G1 element for use in a pairing. +pub fn prepare_g1(g: impl Into) -> E::G1Prepared { + let g: E::G1Affine = g.into(); + E::G1Prepared::from(g) +} + +/// Preprocess a G2 element for use in a pairing. +pub fn prepare_g2(g: impl Into) -> E::G2Prepared { + let g: E::G2Affine = g.into(); + E::G2Prepared::from(g) +} + +pub trait CurveCycle +where + ::Projective: MulAssign<::BaseField>, + ::Projective: MulAssign<::BaseField>, +{ + type E1: AffineCurve< + BaseField = ::ScalarField, + ScalarField = ::BaseField, + >; + type E2: AffineCurve; +} + +pub trait PairingFriendlyCycle: CurveCycle { + type Engine1: PairingEngine< + G1Affine = Self::E1, + G1Projective = ::Projective, + Fq = ::BaseField, + Fr = ::ScalarField, + >; + + type Engine2: PairingEngine< + G1Affine = Self::E2, + G1Projective = ::Projective, + Fq = ::BaseField, + Fr = ::ScalarField, + >; +} diff --git a/arkworks/algebra/ec/src/models/bls12/g1.rs b/arkworks/algebra/ec/src/models/bls12/g1.rs new file mode 100644 index 00000000..cb70afc2 --- /dev/null +++ b/arkworks/algebra/ec/src/models/bls12/g1.rs @@ -0,0 +1,44 @@ +use crate::{ + bls12::Bls12Parameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; +use ark_ff::bytes::ToBytes; +use ark_std::io::{Result as IoResult, Write}; +use num_traits::Zero; + +pub type G1Affine

= GroupAffine<

::G1Parameters>; +pub type G1Projective

= GroupProjective<

::G1Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: Bls12Parameters"), + Debug(bound = "P: Bls12Parameters"), + PartialEq(bound = "P: Bls12Parameters"), + Eq(bound = "P: Bls12Parameters") +)] +pub struct G1Prepared(pub G1Affine

); + +impl From> for G1Prepared

{ + fn from(other: G1Affine

) -> Self { + G1Prepared(other) + } +} + +impl G1Prepared

{ + pub fn is_zero(&self) -> bool { + self.0.is_zero() + } +} + +impl Default for G1Prepared

{ + fn default() -> Self { + G1Prepared(G1Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G1Prepared

{ + fn write(&self, writer: W) -> IoResult<()> { + self.0.write(writer) + } +} diff --git a/arkworks/algebra/ec/src/models/bls12/g2.rs b/arkworks/algebra/ec/src/models/bls12/g2.rs new file mode 100644 index 00000000..c3676f19 --- /dev/null +++ b/arkworks/algebra/ec/src/models/bls12/g2.rs @@ -0,0 +1,157 @@ +use ark_std::{ + io::{Result as IoResult, Write}, + vec::Vec, +}; + +use ark_ff::{ + bytes::ToBytes, + fields::{BitIteratorBE, Field, Fp2}, +}; + +use num_traits::{One, Zero}; + +use crate::{ + bls12::{Bls12Parameters, TwistType}, + models::SWModelParameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; + +pub type G2Affine

= GroupAffine<

::G2Parameters>; +pub type G2Projective

= GroupProjective<

::G2Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: Bls12Parameters"), + Debug(bound = "P: Bls12Parameters"), + PartialEq(bound = "P: Bls12Parameters"), + Eq(bound = "P: Bls12Parameters") +)] +pub struct G2Prepared { + // Stores the coefficients of the line evaluations as calculated in + // https://eprint.iacr.org/2013/722.pdf + pub ell_coeffs: Vec>>, + pub infinity: bool, +} + +pub(crate) type EllCoeff = (F, F, F); + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: Bls12Parameters"), + Copy(bound = "P: Bls12Parameters"), + Debug(bound = "P: Bls12Parameters") +)] +struct G2HomProjective { + x: Fp2, + y: Fp2, + z: Fp2, +} + +impl Default for G2Prepared

{ + fn default() -> Self { + Self::from(G2Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G2Prepared

{ + fn write(&self, mut writer: W) -> IoResult<()> { + for coeff in &self.ell_coeffs { + coeff.0.write(&mut writer)?; + coeff.1.write(&mut writer)?; + coeff.2.write(&mut writer)?; + } + self.infinity.write(writer) + } +} + +impl From> for G2Prepared

{ + fn from(q: G2Affine

) -> Self { + let two_inv = P::Fp::one().double().inverse().unwrap(); + if q.is_zero() { + return Self { + ell_coeffs: vec![], + infinity: true, + }; + } + + let mut ell_coeffs = vec![]; + let mut r = G2HomProjective { + x: q.x, + y: q.y, + z: Fp2::one(), + }; + + for i in BitIteratorBE::new(P::X).skip(1) { + ell_coeffs.push(doubling_step::

(&mut r, &two_inv)); + + if i { + ell_coeffs.push(addition_step::

(&mut r, &q)); + } + } + + Self { + ell_coeffs, + infinity: false, + } + } +} +impl G2Prepared

{ + pub fn is_zero(&self) -> bool { + self.infinity + } +} + +fn doubling_step( + r: &mut G2HomProjective, + two_inv: &B::Fp, +) -> EllCoeff> { + // Formula for line function when working with + // homogeneous projective coordinates. + + let mut a = r.x * &r.y; + a.mul_assign_by_fp(two_inv); + let b = r.y.square(); + let c = r.z.square(); + let e = B::G2Parameters::COEFF_B * &(c.double() + &c); + let f = e.double() + &e; + let mut g = b + &f; + g.mul_assign_by_fp(two_inv); + let h = (r.y + &r.z).square() - &(b + &c); + let i = e - &b; + let j = r.x.square(); + let e_square = e.square(); + + r.x = a * &(b - &f); + r.y = g.square() - &(e_square.double() + &e_square); + r.z = b * &h; + match B::TWIST_TYPE { + TwistType::M => (i, j.double() + &j, -h), + TwistType::D => (-h, j.double() + &j, i), + } +} + +fn addition_step( + r: &mut G2HomProjective, + q: &G2Affine, +) -> EllCoeff> { + // Formula for line function when working with + // homogeneous projective coordinates. + let theta = r.y - &(q.y * &r.z); + let lambda = r.x - &(q.x * &r.z); + let c = theta.square(); + let d = lambda.square(); + let e = lambda * &d; + let f = r.z * &c; + let g = r.x * &d; + let h = e + &f - &g.double(); + r.x = lambda * &h; + r.y = theta * &(g - &h) - &(e * &r.y); + r.z *= &e; + let j = theta * &q.x - &(lambda * &q.y); + + match B::TWIST_TYPE { + TwistType::M => (j, -theta, lambda), + TwistType::D => (lambda, -theta, j), + } +} diff --git a/arkworks/algebra/ec/src/models/bls12/mod.rs b/arkworks/algebra/ec/src/models/bls12/mod.rs new file mode 100644 index 00000000..709d7442 --- /dev/null +++ b/arkworks/algebra/ec/src/models/bls12/mod.rs @@ -0,0 +1,256 @@ +use crate::{ + models::{ModelParameters, SWModelParameters}, + PairingEngine, +}; +use ark_ff::fields::{ + fp12_2over3over2::{Fp12, Fp12Parameters}, + fp2::Fp2Parameters, + fp6_3over2::Fp6Parameters, + BitIteratorBE, Field, Fp2, PrimeField, SquareRootField, +}; +use core::marker::PhantomData; +use num_traits::{One, Zero}; + +#[cfg(feature = "parallel")] +use ark_ff::{Fp12ParamsWrapper, Fp2ParamsWrapper, QuadExtField}; +#[cfg(feature = "parallel")] +use ark_std::cfg_iter; +#[cfg(feature = "parallel")] +use core::slice::Iter; +#[cfg(feature = "parallel")] +use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; + +/// A particular BLS12 group can have G2 being either a multiplicative or a +/// divisive twist. +pub enum TwistType { + M, + D, +} + +pub trait Bls12Parameters: 'static { + /// Parameterizes the BLS12 family. + const X: &'static [u64]; + /// Is `Self::X` negative? + const X_IS_NEGATIVE: bool; + /// What kind of twist is this? + const TWIST_TYPE: TwistType; + + type Fp: PrimeField + SquareRootField + Into<::BigInt>; + type Fp2Params: Fp2Parameters; + type Fp6Params: Fp6Parameters; + type Fp12Params: Fp12Parameters; + type G1Parameters: SWModelParameters; + type G2Parameters: SWModelParameters< + BaseField = Fp2, + ScalarField = ::ScalarField, + >; +} + +pub mod g1; +pub mod g2; + +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +#[derive(Derivative)] +#[derivative(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub struct Bls12(PhantomData P>); + +impl Bls12

{ + // Evaluate the line function at point p. + fn ell(f: &mut Fp12, coeffs: &g2::EllCoeff>, p: &G1Affine

) { + let mut c0 = coeffs.0; + let mut c1 = coeffs.1; + let mut c2 = coeffs.2; + + match P::TWIST_TYPE { + TwistType::M => { + c2.mul_assign_by_fp(&p.y); + c1.mul_assign_by_fp(&p.x); + f.mul_by_014(&c0, &c1, &c2); + } + TwistType::D => { + c0.mul_assign_by_fp(&p.y); + c1.mul_assign_by_fp(&p.x); + f.mul_by_034(&c0, &c1, &c2); + } + } + } + + // Exponentiates `f` by `Self::X`, and stores the result in `result`. + fn exp_by_x(f: &Fp12, result: &mut Fp12) { + *result = f.cyclotomic_exp(P::X); + if P::X_IS_NEGATIVE { + result.conjugate(); + } + } +} + +impl PairingEngine for Bls12

{ + type Fr = ::ScalarField; + type G1Projective = G1Projective

; + type G1Affine = G1Affine

; + type G1Prepared = G1Prepared

; + type G2Projective = G2Projective

; + type G2Affine = G2Affine

; + type G2Prepared = G2Prepared

; + type Fq = P::Fp; + type Fqe = Fp2; + type Fqk = Fp12; + + #[cfg(not(feature = "parallel"))] + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + let mut pairs = vec![]; + for (p, q) in i { + if !p.is_zero() && !q.is_zero() { + pairs.push((p, q.ell_coeffs.iter())); + } + } + let mut f = Self::Fqk::one(); + for i in BitIteratorBE::new(P::X).skip(1) { + f.square_in_place(); + for (p, ref mut coeffs) in &mut pairs { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + } + if i { + for &mut (p, ref mut coeffs) in &mut pairs { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + } + } + } + if P::X_IS_NEGATIVE { + f.conjugate(); + } + f + } + + #[cfg(feature = "parallel")] + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + let mut pairs = vec![]; + for (p, q) in i { + if !p.is_zero() && !q.is_zero() { + pairs.push((p, q.ell_coeffs.iter())); + } + } + + let mut f_vec = vec![]; + for _ in 0..pairs.len() { + f_vec.push(Self::Fqk::one()); + } + + let a = |p: &&G1Prepared

, + coeffs: &Iter< + '_, + ( + QuadExtField::Fp2Params>>, + QuadExtField::Fp2Params>>, + QuadExtField::Fp2Params>>, + ), + >, + mut f: QuadExtField::Fp12Params>>| + -> QuadExtField::Fp12Params>> { + let coeffs = coeffs.as_slice(); + let mut j = 0; + for i in BitIteratorBE::new(P::X).skip(1) { + f.square_in_place(); + Self::ell(&mut f, &coeffs[j], &p.0); + j += 1; + if i { + Self::ell(&mut f, &coeffs[j], &p.0); + j += 1; + } + } + f + }; + + let mut products = vec![]; + cfg_iter!(pairs) + .zip(f_vec) + .map(|(p, f)| a(&p.0, &p.1, f)) + .collect_into_vec(&mut products); + + let mut f = Self::Fqk::one(); + for ff in products { + f *= ff; + } + if P::X_IS_NEGATIVE { + f.conjugate(); + } + f + } + + fn final_exponentiation(f: &Self::Fqk) -> Option { + // Computing the final exponentation following + // https://eprint.iacr.org/2020/875 + // Adapted from the implementation in https://github.com/ConsenSys/gurvy/pull/29 + + // f1 = r.conjugate() = f^(p^6) + let mut f1 = *f; + f1.conjugate(); + + f.inverse().map(|mut f2| { + // f2 = f^(-1); + // r = f^(p^6 - 1) + let mut r = f1 * &f2; + + // f2 = f^(p^6 - 1) + f2 = r; + // r = f^((p^6 - 1)(p^2)) + r.frobenius_map(2); + + // r = f^((p^6 - 1)(p^2) + (p^6 - 1)) + // r = f^((p^6 - 1)(p^2 + 1)) + r *= &f2; + + // Hard part of the final exponentation: + // t[0].CyclotomicSquare(&result) + let mut y0 = r.cyclotomic_square(); + // t[1].Expt(&result) + let mut y1 = Fp12::zero(); + Self::exp_by_x(&r, &mut y1); + // t[2].InverseUnitary(&result) + let mut y2 = r; + y2.conjugate(); + // t[1].Mul(&t[1], &t[2]) + y1 *= &y2; + // t[2].Expt(&t[1]) + Self::exp_by_x(&y1, &mut y2); + // t[1].InverseUnitary(&t[1]) + y1.conjugate(); + // t[1].Mul(&t[1], &t[2]) + y1 *= &y2; + // t[2].Expt(&t[1]) + Self::exp_by_x(&y1, &mut y2); + // t[1].Frobenius(&t[1]) + y1.frobenius_map(1); + // t[1].Mul(&t[1], &t[2]) + y1 *= &y2; + // result.Mul(&result, &t[0]) + r *= &y0; + // t[0].Expt(&t[1]) + Self::exp_by_x(&y1, &mut y0); + // t[2].Expt(&t[0]) + Self::exp_by_x(&y0, &mut y2); + // t[0].FrobeniusSquare(&t[1]) + y0 = y1; + y0.frobenius_map(2); + // t[1].InverseUnitary(&t[1]) + y1.conjugate(); + // t[1].Mul(&t[1], &t[2]) + y1 *= &y2; + // t[1].Mul(&t[1], &t[0]) + y1 *= &y0; + // result.Mul(&result, &t[1]) + r *= &y1; + r + }) + } +} diff --git a/arkworks/algebra/ec/src/models/bn/g1.rs b/arkworks/algebra/ec/src/models/bn/g1.rs new file mode 100644 index 00000000..f973326a --- /dev/null +++ b/arkworks/algebra/ec/src/models/bn/g1.rs @@ -0,0 +1,44 @@ +use crate::{ + bn::BnParameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; +use ark_ff::bytes::ToBytes; +use ark_std::io::{Result as IoResult, Write}; +use num_traits::Zero; + +pub type G1Affine

= GroupAffine<

::G1Parameters>; +pub type G1Projective

= GroupProjective<

::G1Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: BnParameters"), + Debug(bound = "P: BnParameters"), + PartialEq(bound = "P: BnParameters"), + Eq(bound = "P: BnParameters") +)] +pub struct G1Prepared(pub G1Affine

); + +impl From> for G1Prepared

{ + fn from(other: G1Affine

) -> Self { + G1Prepared(other) + } +} + +impl G1Prepared

{ + pub fn is_zero(&self) -> bool { + self.0.is_zero() + } +} + +impl Default for G1Prepared

{ + fn default() -> Self { + G1Prepared(G1Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G1Prepared

{ + fn write(&self, writer: W) -> IoResult<()> { + self.0.write(writer) + } +} diff --git a/arkworks/algebra/ec/src/models/bn/g2.rs b/arkworks/algebra/ec/src/models/bn/g2.rs new file mode 100644 index 00000000..f8e8c91d --- /dev/null +++ b/arkworks/algebra/ec/src/models/bn/g2.rs @@ -0,0 +1,191 @@ +use ark_std::{ + io::{Result as IoResult, Write}, + vec::Vec, +}; + +use ark_ff::{ + bytes::ToBytes, + fields::{Field, Fp2}, +}; + +use num_traits::{One, Zero}; + +use crate::{ + bn::{BnParameters, TwistType}, + models::SWModelParameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; + +pub type G2Affine

= GroupAffine<

::G2Parameters>; +pub type G2Projective

= GroupProjective<

::G2Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: BnParameters"), + Debug(bound = "P: BnParameters"), + PartialEq(bound = "P: BnParameters"), + Eq(bound = "P: BnParameters") +)] +pub struct G2Prepared { + // Stores the coefficients of the line evaluations as calculated in + // https://eprint.iacr.org/2013/722.pdf + pub ell_coeffs: Vec>>, + pub infinity: bool, +} + +pub(crate) type EllCoeff = (F, F, F); + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: BnParameters"), + Copy(bound = "P: BnParameters"), + Debug(bound = "P: BnParameters") +)] +struct G2HomProjective { + x: Fp2, + y: Fp2, + z: Fp2, +} + +impl Default for G2Prepared

{ + fn default() -> Self { + Self::from(G2Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G2Prepared

{ + fn write(&self, mut writer: W) -> IoResult<()> { + for coeff in &self.ell_coeffs { + coeff.0.write(&mut writer)?; + coeff.1.write(&mut writer)?; + coeff.2.write(&mut writer)?; + } + self.infinity.write(writer) + } +} + +impl From> for G2Prepared

{ + fn from(q: G2Affine

) -> Self { + let two_inv = P::Fp::one().double().inverse().unwrap(); + if q.is_zero() { + return Self { + ell_coeffs: vec![], + infinity: true, + }; + } + + let mut ell_coeffs = vec![]; + let mut r = G2HomProjective { + x: q.x, + y: q.y, + z: Fp2::one(), + }; + + let negq = -q; + + for i in (1..P::ATE_LOOP_COUNT.len()).rev() { + ell_coeffs.push(doubling_step::

(&mut r, &two_inv)); + + let bit = P::ATE_LOOP_COUNT[i - 1]; + + match bit { + 1 => { + ell_coeffs.push(addition_step::

(&mut r, &q)); + } + -1 => { + ell_coeffs.push(addition_step::

(&mut r, &negq)); + } + _ => continue, + } + } + + let q1 = mul_by_char::

(q); + let mut q2 = mul_by_char::

(q1); + + if P::X_IS_NEGATIVE { + r.y = -r.y; + } + + q2.y = -q2.y; + + ell_coeffs.push(addition_step::

(&mut r, &q1)); + ell_coeffs.push(addition_step::

(&mut r, &q2)); + + Self { + ell_coeffs, + infinity: false, + } + } +} +impl G2Prepared

{ + pub fn is_zero(&self) -> bool { + self.infinity + } +} + +fn mul_by_char(r: G2Affine

) -> G2Affine

{ + // multiply by field characteristic + + let mut s = r; + s.x.frobenius_map(1); + s.x *= &P::TWIST_MUL_BY_Q_X; + s.y.frobenius_map(1); + s.y *= &P::TWIST_MUL_BY_Q_Y; + + s +} + +fn doubling_step( + r: &mut G2HomProjective, + two_inv: &B::Fp, +) -> EllCoeff> { + // Formula for line function when working with + // homogeneous projective coordinates. + + let mut a = r.x * &r.y; + a.mul_assign_by_fp(two_inv); + let b = r.y.square(); + let c = r.z.square(); + let e = B::G2Parameters::COEFF_B * &(c.double() + &c); + let f = e.double() + &e; + let mut g = b + &f; + g.mul_assign_by_fp(two_inv); + let h = (r.y + &r.z).square() - &(b + &c); + let i = e - &b; + let j = r.x.square(); + let e_square = e.square(); + + r.x = a * &(b - &f); + r.y = g.square() - &(e_square.double() + &e_square); + r.z = b * &h; + match B::TWIST_TYPE { + TwistType::M => (i, j.double() + &j, -h), + TwistType::D => (-h, j.double() + &j, i), + } +} + +fn addition_step( + r: &mut G2HomProjective, + q: &G2Affine, +) -> EllCoeff> { + // Formula for line function when working with + // homogeneous projective coordinates. + let theta = r.y - &(q.y * &r.z); + let lambda = r.x - &(q.x * &r.z); + let c = theta.square(); + let d = lambda.square(); + let e = lambda * &d; + let f = r.z * &c; + let g = r.x * &d; + let h = e + &f - &g.double(); + r.x = lambda * &h; + r.y = theta * &(g - &h) - &(e * &r.y); + r.z *= &e; + let j = theta * &q.x - &(lambda * &q.y); + + match B::TWIST_TYPE { + TwistType::M => (j, -theta, lambda), + TwistType::D => (lambda, -theta, j), + } +} diff --git a/arkworks/algebra/ec/src/models/bn/mod.rs b/arkworks/algebra/ec/src/models/bn/mod.rs new file mode 100644 index 00000000..c502a955 --- /dev/null +++ b/arkworks/algebra/ec/src/models/bn/mod.rs @@ -0,0 +1,212 @@ +use crate::{ + models::{ModelParameters, SWModelParameters}, + PairingEngine, +}; +use ark_ff::fields::{ + fp12_2over3over2::{Fp12, Fp12Parameters}, + fp2::Fp2Parameters, + fp6_3over2::Fp6Parameters, + Field, Fp2, PrimeField, SquareRootField, +}; +use num_traits::One; + +use core::marker::PhantomData; + +pub enum TwistType { + M, + D, +} + +pub trait BnParameters: 'static { + // The absolute value of the BN curve parameter `X` (as in `q = 36 X^4 + 36 X^3 + 24 X^2 + 6 X + 1`). + const X: &'static [u64]; + // Whether or not `X` is negative. + const X_IS_NEGATIVE: bool; + + // The absolute value of `6X + 2`. + const ATE_LOOP_COUNT: &'static [i8]; + + const TWIST_TYPE: TwistType; + const TWIST_MUL_BY_Q_X: Fp2; + const TWIST_MUL_BY_Q_Y: Fp2; + type Fp: PrimeField + SquareRootField + Into<::BigInt>; + type Fp2Params: Fp2Parameters; + type Fp6Params: Fp6Parameters; + type Fp12Params: Fp12Parameters; + type G1Parameters: SWModelParameters; + type G2Parameters: SWModelParameters< + BaseField = Fp2, + ScalarField = ::ScalarField, + >; +} + +pub mod g1; +pub mod g2; + +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +#[derive(Derivative)] +#[derivative(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub struct Bn(PhantomData P>); + +impl Bn

{ + // Evaluate the line function at point p. + fn ell(f: &mut Fp12, coeffs: &g2::EllCoeff>, p: &G1Affine

) { + let mut c0 = coeffs.0; + let mut c1 = coeffs.1; + let mut c2 = coeffs.2; + + match P::TWIST_TYPE { + TwistType::M => { + c2.mul_assign_by_fp(&p.y); + c1.mul_assign_by_fp(&p.x); + f.mul_by_014(&c0, &c1, &c2); + } + TwistType::D => { + c0.mul_assign_by_fp(&p.y); + c1.mul_assign_by_fp(&p.x); + f.mul_by_034(&c0, &c1, &c2); + } + } + } + + fn exp_by_neg_x(mut f: Fp12) -> Fp12 { + f = f.cyclotomic_exp(&P::X); + if !P::X_IS_NEGATIVE { + f.conjugate(); + } + f + } +} + +impl PairingEngine for Bn

{ + type Fr = ::ScalarField; + type G1Projective = G1Projective

; + type G1Affine = G1Affine

; + type G1Prepared = G1Prepared

; + type G2Projective = G2Projective

; + type G2Affine = G2Affine

; + type G2Prepared = G2Prepared

; + type Fq = P::Fp; + type Fqe = Fp2; + type Fqk = Fp12; + + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + let mut pairs = vec![]; + for (p, q) in i { + if !p.is_zero() && !q.is_zero() { + pairs.push((p, q.ell_coeffs.iter())); + } + } + + let mut f = Self::Fqk::one(); + + for i in (1..P::ATE_LOOP_COUNT.len()).rev() { + if i != P::ATE_LOOP_COUNT.len() - 1 { + f.square_in_place(); + } + + for (p, ref mut coeffs) in &mut pairs { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + } + + let bit = P::ATE_LOOP_COUNT[i - 1]; + match bit { + 1 => { + for &mut (p, ref mut coeffs) in &mut pairs { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + } + } + -1 => { + for &mut (p, ref mut coeffs) in &mut pairs { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + } + } + _ => continue, + } + } + + if P::X_IS_NEGATIVE { + f.conjugate(); + } + + for &mut (p, ref mut coeffs) in &mut pairs { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + } + + for &mut (p, ref mut coeffs) in &mut pairs { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + } + + f + } + + #[allow(clippy::let_and_return)] + fn final_exponentiation(f: &Self::Fqk) -> Option { + // Easy part: result = elt^((q^6-1)*(q^2+1)). + // Follows, e.g., Beuchat et al page 9, by computing result as follows: + // elt^((q^6-1)*(q^2+1)) = (conj(elt) * elt^(-1))^(q^2+1) + + // f1 = r.conjugate() = f^(p^6) + let mut f1 = *f; + f1.conjugate(); + + f.inverse().map(|mut f2| { + // f2 = f^(-1); + // r = f^(p^6 - 1) + let mut r = f1 * &f2; + + // f2 = f^(p^6 - 1) + f2 = r; + // r = f^((p^6 - 1)(p^2)) + r.frobenius_map(2); + + // r = f^((p^6 - 1)(p^2) + (p^6 - 1)) + // r = f^((p^6 - 1)(p^2 + 1)) + r *= &f2; + + // Hard part follows Laura Fuentes-Castaneda et al. "Faster hashing to G2" + // by computing: + // + // result = elt^(q^3 * (12*z^3 + 6z^2 + 4z - 1) + + // q^2 * (12*z^3 + 6z^2 + 6z) + + // q * (12*z^3 + 6z^2 + 4z) + + // 1 * (12*z^3 + 12z^2 + 6z + 1)) + // which equals + // + // result = elt^( 2z * ( 6z^2 + 3z + 1 ) * (q^4 - q^2 + 1)/r ). + + let y0 = Self::exp_by_neg_x(r); + let y1 = y0.cyclotomic_square(); + let y2 = y1.cyclotomic_square(); + let mut y3 = y2 * &y1; + let y4 = Self::exp_by_neg_x(y3); + let y5 = y4.cyclotomic_square(); + let mut y6 = Self::exp_by_neg_x(y5); + y3.conjugate(); + y6.conjugate(); + let y7 = y6 * &y4; + let mut y8 = y7 * &y3; + let y9 = y8 * &y1; + let y10 = y8 * &y4; + let y11 = y10 * &r; + let mut y12 = y9; + y12.frobenius_map(1); + let y13 = y12 * &y11; + y8.frobenius_map(2); + let y14 = y8 * &y13; + r.conjugate(); + let mut y15 = r * &y9; + y15.frobenius_map(3); + let y16 = y15 * &y14; + + y16 + }) + } +} diff --git a/arkworks/algebra/ec/src/models/bw6/g1.rs b/arkworks/algebra/ec/src/models/bw6/g1.rs new file mode 100644 index 00000000..b86adccb --- /dev/null +++ b/arkworks/algebra/ec/src/models/bw6/g1.rs @@ -0,0 +1,44 @@ +use crate::{ + bw6::BW6Parameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; +use ark_ff::bytes::ToBytes; +use ark_std::io::{Result as IoResult, Write}; +use num_traits::Zero; + +pub type G1Affine

= GroupAffine<

::G1Parameters>; +pub type G1Projective

= GroupProjective<

::G1Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: BW6Parameters"), + Debug(bound = "P: BW6Parameters"), + PartialEq(bound = "P: BW6Parameters"), + Eq(bound = "P: BW6Parameters") +)] +pub struct G1Prepared(pub G1Affine

); + +impl From> for G1Prepared

{ + fn from(other: G1Affine

) -> Self { + G1Prepared(other) + } +} + +impl G1Prepared

{ + pub fn is_zero(&self) -> bool { + self.0.is_zero() + } +} + +impl Default for G1Prepared

{ + fn default() -> Self { + G1Prepared(G1Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G1Prepared

{ + fn write(&self, writer: W) -> IoResult<()> { + self.0.write(writer) + } +} diff --git a/arkworks/algebra/ec/src/models/bw6/g2.rs b/arkworks/algebra/ec/src/models/bw6/g2.rs new file mode 100644 index 00000000..68fbacfb --- /dev/null +++ b/arkworks/algebra/ec/src/models/bw6/g2.rs @@ -0,0 +1,185 @@ +use ark_std::{ + io::{Result as IoResult, Write}, + vec::Vec, +}; + +use ark_ff::{ + bytes::ToBytes, + fields::{BitIteratorBE, Field}, +}; + +use num_traits::{One, Zero}; + +use crate::{ + bw6::{BW6Parameters, TwistType}, + models::SWModelParameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; + +pub type G2Affine

= GroupAffine<

::G2Parameters>; +pub type G2Projective

= GroupProjective<

::G2Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: BW6Parameters"), + Debug(bound = "P: BW6Parameters"), + PartialEq(bound = "P: BW6Parameters"), + Eq(bound = "P: BW6Parameters") +)] +pub struct G2Prepared { + // Stores the coefficients of the line evaluations as calculated in + // https://eprint.iacr.org/2013/722.pdf + pub ell_coeffs_1: Vec<(P::Fp, P::Fp, P::Fp)>, + pub ell_coeffs_2: Vec<(P::Fp, P::Fp, P::Fp)>, + pub infinity: bool, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: BW6Parameters"), + Copy(bound = "P: BW6Parameters"), + Debug(bound = "P: BW6Parameters") +)] +struct G2HomProjective { + x: P::Fp, + y: P::Fp, + z: P::Fp, +} + +impl Default for G2Prepared

{ + fn default() -> Self { + Self::from(G2Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G2Prepared

{ + fn write(&self, mut writer: W) -> IoResult<()> { + for coeff_1 in &self.ell_coeffs_1 { + coeff_1.0.write(&mut writer)?; + coeff_1.1.write(&mut writer)?; + coeff_1.2.write(&mut writer)?; + } + for coeff_2 in &self.ell_coeffs_2 { + coeff_2.0.write(&mut writer)?; + coeff_2.1.write(&mut writer)?; + coeff_2.2.write(&mut writer)?; + } + self.infinity.write(writer) + } +} + +impl From> for G2Prepared

{ + fn from(q: G2Affine

) -> Self { + if q.is_zero() { + return Self { + ell_coeffs_1: vec![], + ell_coeffs_2: vec![], + infinity: true, + }; + } + + // f_{u+1,Q}(P) + let mut ell_coeffs_1 = vec![]; + let mut r = G2HomProjective { + x: q.x, + y: q.y, + z: P::Fp::one(), + }; + + for i in BitIteratorBE::new(P::ATE_LOOP_COUNT_1).skip(1) { + ell_coeffs_1.push(doubling_step::

(&mut r)); + + if i { + ell_coeffs_1.push(addition_step::

(&mut r, &q)); + } + } + + // f_{u^3-u^2-u,Q}(P) + let mut ell_coeffs_2 = vec![]; + let mut r = G2HomProjective { + x: q.x, + y: q.y, + z: P::Fp::one(), + }; + + let negq = -q; + + for i in (1..P::ATE_LOOP_COUNT_2.len()).rev() { + ell_coeffs_2.push(doubling_step::

(&mut r)); + + let bit = P::ATE_LOOP_COUNT_2[i - 1]; + match bit { + 1 => { + ell_coeffs_2.push(addition_step::

(&mut r, &q)); + } + -1 => { + ell_coeffs_2.push(addition_step::

(&mut r, &negq)); + } + _ => continue, + } + } + + Self { + ell_coeffs_1, + ell_coeffs_2, + infinity: false, + } + } +} + +impl G2Prepared

{ + pub fn is_zero(&self) -> bool { + self.infinity + } +} + +fn doubling_step(r: &mut G2HomProjective) -> (B::Fp, B::Fp, B::Fp) { + // Formula for line function when working with + // homogeneous projective coordinates, as described in https://eprint.iacr.org/2013/722.pdf. + + let a = r.x * &r.y; + let b = r.y.square(); + let b4 = b.double().double(); + let c = r.z.square(); + let e = B::G2Parameters::COEFF_B * &(c.double() + &c); + let f = e.double() + &e; + let g = b + &f; + let h = (r.y + &r.z).square() - &(b + &c); + let i = e - &b; + let j = r.x.square(); + let e2_square = e.double().square(); + + r.x = a.double() * &(b - &f); + r.y = g.square() - &(e2_square.double() + &e2_square); + r.z = b4 * &h; + match B::TWIST_TYPE { + TwistType::M => (i, j.double() + &j, -h), + TwistType::D => (-h, j.double() + &j, i), + } +} + +fn addition_step( + r: &mut G2HomProjective, + q: &G2Affine, +) -> (B::Fp, B::Fp, B::Fp) { + // Formula for line function when working with + // homogeneous projective coordinates, as described in https://eprint.iacr.org/2013/722.pdf. + let theta = r.y - &(q.y * &r.z); + let lambda = r.x - &(q.x * &r.z); + let c = theta.square(); + let d = lambda.square(); + let e = lambda * &d; + let f = r.z * &c; + let g = r.x * &d; + let h = e + &f - &g.double(); + r.x = lambda * &h; + r.y = theta * &(g - &h) - &(e * &r.y); + r.z *= &e; + let j = theta * &q.x - &(lambda * &q.y); + + match B::TWIST_TYPE { + TwistType::M => (j, -theta, lambda), + TwistType::D => (lambda, -theta, j), + } +} diff --git a/arkworks/algebra/ec/src/models/bw6/mod.rs b/arkworks/algebra/ec/src/models/bw6/mod.rs new file mode 100644 index 00000000..8bb67eb1 --- /dev/null +++ b/arkworks/algebra/ec/src/models/bw6/mod.rs @@ -0,0 +1,297 @@ +use crate::{ + models::{ModelParameters, SWModelParameters}, + PairingEngine, +}; +use ark_ff::fields::{ + fp3::Fp3Parameters, + fp6_2over3::{Fp6, Fp6Parameters}, + BitIteratorBE, Field, PrimeField, SquareRootField, +}; +use num_traits::One; + +use core::marker::PhantomData; + +pub enum TwistType { + M, + D, +} + +pub trait BW6Parameters: 'static + Eq + PartialEq { + const X: ::BigInt; + const X_IS_NEGATIVE: bool; + const ATE_LOOP_COUNT_1: &'static [u64]; + const ATE_LOOP_COUNT_1_IS_NEGATIVE: bool; + const ATE_LOOP_COUNT_2: &'static [i8]; + const ATE_LOOP_COUNT_2_IS_NEGATIVE: bool; + const TWIST_TYPE: TwistType; + type Fp: PrimeField + SquareRootField + Into<::BigInt>; + type Fp3Params: Fp3Parameters; + type Fp6Params: Fp6Parameters; + type G1Parameters: SWModelParameters; + type G2Parameters: SWModelParameters< + BaseField = Self::Fp, + ScalarField = ::ScalarField, + >; +} + +pub mod g1; +pub mod g2; + +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +#[derive(Derivative)] +#[derivative(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub struct BW6(PhantomData P>); + +impl BW6

{ + // Evaluate the line function at point p. + fn ell(f: &mut Fp6, coeffs: &(P::Fp, P::Fp, P::Fp), p: &G1Affine

) { + let mut c0 = coeffs.0; + let mut c1 = coeffs.1; + let mut c2 = coeffs.2; + + match P::TWIST_TYPE { + TwistType::M => { + c2 *= &p.y; + c1 *= &p.x; + f.mul_by_014(&c0, &c1, &c2); + } + TwistType::D => { + c0 *= &p.y; + c1 *= &p.x; + f.mul_by_034(&c0, &c1, &c2); + } + } + } + + fn exp_by_x(mut f: Fp6) -> Fp6 { + f = f.cyclotomic_exp(&P::X); + if P::X_IS_NEGATIVE { + f.conjugate(); + } + f + } + + pub fn final_exponentiation(value: &Fp6) -> Fp6 { + let value_inv = value.inverse().unwrap(); + let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv); + Self::final_exponentiation_last_chunk(&value_to_first_chunk) + } + + fn final_exponentiation_first_chunk( + elt: &Fp6, + elt_inv: &Fp6, + ) -> Fp6 { + // (q^3-1)*(q+1) + + // elt_q3 = elt^(q^3) + let mut elt_q3 = *elt; + elt_q3.conjugate(); + // elt_q3_over_elt = elt^(q^3-1) + let elt_q3_over_elt = elt_q3 * elt_inv; + // alpha = elt^((q^3-1) * q) + let mut alpha = elt_q3_over_elt; + alpha.frobenius_map(1); + // beta = elt^((q^3-1)*(q+1) + alpha * &elt_q3_over_elt + } + + #[allow(clippy::let_and_return)] + fn final_exponentiation_last_chunk(f: &Fp6) -> Fp6 { + // hard_part + // From https://eprint.iacr.org/2020/351.pdf, Alg.6 + // R0(x) := (-103*x^7 + 70*x^6 + 269*x^5 - 197*x^4 - 314*x^3 - 73*x^2 - 263*x - 220) + // R1(x) := (103*x^9 - 276*x^8 + 77*x^7 + 492*x^6 - 445*x^5 - 65*x^4 + 452*x^3 - 181*x^2 + 34*x + 229) + // f ^ R0(u) * (f ^ q) ^ R1(u) in a 2-NAF multi-exp fashion. + + // steps 1,2,3 + let f0 = *f; + let mut f0p = f0; + f0p.frobenius_map(1); + let f1 = Self::exp_by_x(f0); + let mut f1p = f1; + f1p.frobenius_map(1); + let f2 = Self::exp_by_x(f1); + let mut f2p = f2; + f2p.frobenius_map(1); + let f3 = Self::exp_by_x(f2); + let mut f3p = f3; + f3p.frobenius_map(1); + let f4 = Self::exp_by_x(f3); + let mut f4p = f4; + f4p.frobenius_map(1); + let f5 = Self::exp_by_x(f4); + let mut f5p = f5; + f5p.frobenius_map(1); + let f6 = Self::exp_by_x(f5); + let mut f6p = f6; + f6p.frobenius_map(1); + let f7 = Self::exp_by_x(f6); + let mut f7p = f7; + f7p.frobenius_map(1); + + // step 4 + let f8p = Self::exp_by_x(f7p); + let f9p = Self::exp_by_x(f8p); + + // step 5 + let mut f5p_p3 = f5p; + f5p_p3.conjugate(); + let result1 = f3p * &f6p * &f5p_p3; + + // step 6 + let result2 = result1.square(); + let f4_2p = f4 * &f2p; + let mut tmp1_p3 = f0 * &f1 * &f3 * &f4_2p * &f8p; + tmp1_p3.conjugate(); + let result3 = result2 * &f5 * &f0p * &tmp1_p3; + + // step 7 + let result4 = result3.square(); + let mut f7_p3 = f7; + f7_p3.conjugate(); + let result5 = result4 * &f9p * &f7_p3; + + // step 8 + let result6 = result5.square(); + let f2_4p = f2 * &f4p; + let f4_2p_5p = f4_2p * &f5p; + let mut tmp2_p3 = f2_4p * &f3 * &f3p; + tmp2_p3.conjugate(); + let result7 = result6 * &f4_2p_5p * &f6 * &f7p * &tmp2_p3; + + // step 9 + let result8 = result7.square(); + let mut tmp3_p3 = f0p * &f9p; + tmp3_p3.conjugate(); + let result9 = result8 * &f0 * &f7 * &f1p * &tmp3_p3; + + // step 10 + let result10 = result9.square(); + let f6p_8p = f6p * &f8p; + let f5_7p = f5 * &f7p; + let mut tmp4_p3 = f6p_8p; + tmp4_p3.conjugate(); + let result11 = result10 * &f5_7p * &f2p * &tmp4_p3; + + // step 11 + let result12 = result11.square(); + let f3_6 = f3 * &f6; + let f1_7 = f1 * &f7; + let mut tmp5_p3 = f1_7 * &f2; + tmp5_p3.conjugate(); + let result13 = result12 * &f3_6 * &f9p * &tmp5_p3; + + // step 12 + let result14 = result13.square(); + let mut tmp6_p3 = f4_2p * &f5_7p * &f6p_8p; + tmp6_p3.conjugate(); + let result15 = result14 * &f0 * &f0p * &f3p * &f5p * &tmp6_p3; + + // step 13 + let result16 = result15.square(); + let mut tmp7_p3 = f3_6; + tmp7_p3.conjugate(); + let result17 = result16 * &f1p * &tmp7_p3; + + // step 14 + let result18 = result17.square(); + let mut tmp8_p3 = f2_4p * &f4_2p_5p * &f9p; + tmp8_p3.conjugate(); + let result19 = result18 * &f1_7 * &f5_7p * &f0p * &tmp8_p3; + + result19 + } +} + +impl PairingEngine for BW6

{ + type Fr = ::ScalarField; + type G1Projective = G1Projective

; + type G1Affine = G1Affine

; + type G1Prepared = G1Prepared

; + type G2Projective = G2Projective

; + type G2Affine = G2Affine

; + type G2Prepared = G2Prepared

; + type Fq = P::Fp; + type Fqe = P::Fp; + type Fqk = Fp6; + + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + // Alg.5 in https://eprint.iacr.org/2020/351.pdf + + let mut pairs_1 = vec![]; + let mut pairs_2 = vec![]; + for (p, q) in i { + if !p.is_zero() && !q.is_zero() { + pairs_1.push((p, q.ell_coeffs_1.iter())); + pairs_2.push((p, q.ell_coeffs_2.iter())); + } + } + + // f_{u+1,Q}(P) + let mut f_1 = Self::Fqk::one(); + + for i in BitIteratorBE::new(P::ATE_LOOP_COUNT_1).skip(1) { + f_1.square_in_place(); + + for (p, ref mut coeffs) in &mut pairs_1 { + Self::ell(&mut f_1, coeffs.next().unwrap(), &p.0); + } + if i { + for &mut (p, ref mut coeffs) in &mut pairs_1 { + Self::ell(&mut f_1, coeffs.next().unwrap(), &p.0); + } + } + } + + if P::ATE_LOOP_COUNT_1_IS_NEGATIVE { + f_1.conjugate(); + } + + // f_{u^2-u^2-u,Q}(P) + let mut f_2 = Self::Fqk::one(); + + for i in (1..P::ATE_LOOP_COUNT_2.len()).rev() { + if i != P::ATE_LOOP_COUNT_2.len() - 1 { + f_2.square_in_place(); + } + + for (p, ref mut coeffs) in &mut pairs_2 { + Self::ell(&mut f_2, coeffs.next().unwrap(), &p.0); + } + + let bit = P::ATE_LOOP_COUNT_2[i - 1]; + match bit { + 1 => { + for &mut (p, ref mut coeffs) in &mut pairs_2 { + Self::ell(&mut f_2, coeffs.next().unwrap(), &p.0); + } + } + -1 => { + for &mut (p, ref mut coeffs) in &mut pairs_2 { + Self::ell(&mut f_2, coeffs.next().unwrap(), &p.0); + } + } + _ => continue, + } + } + + if P::ATE_LOOP_COUNT_2_IS_NEGATIVE { + f_2.conjugate(); + } + + f_2.frobenius_map(1); + + f_1 * &f_2 + } + + fn final_exponentiation(f: &Self::Fqk) -> Option { + Some(Self::final_exponentiation(f)) + } +} diff --git a/arkworks/algebra/ec/src/models/mnt4/g1.rs b/arkworks/algebra/ec/src/models/mnt4/g1.rs new file mode 100644 index 00000000..cade542b --- /dev/null +++ b/arkworks/algebra/ec/src/models/mnt4/g1.rs @@ -0,0 +1,57 @@ +use crate::{ + mnt4::MNT4Parameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; +use ark_ff::{bytes::ToBytes, Fp2}; +use ark_std::io::{Result as IoResult, Write}; + +pub type G1Affine

= GroupAffine<

::G1Parameters>; +pub type G1Projective

= GroupProjective<

::G1Parameters>; + +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: MNT4Parameters"), + Clone(bound = "P: MNT4Parameters"), + Debug(bound = "P: MNT4Parameters"), + PartialEq(bound = "P: MNT4Parameters"), + Eq(bound = "P: MNT4Parameters") +)] +pub struct G1Prepared { + pub x: P::Fp, + pub y: P::Fp, + pub x_twist: Fp2, + pub y_twist: Fp2, +} + +impl From> for G1Prepared

{ + fn from(g1: G1Affine

) -> Self { + let mut x_twist = P::TWIST; + x_twist.mul_assign_by_fp(&g1.x); + + let mut y_twist = P::TWIST; + y_twist.mul_assign_by_fp(&g1.y); + + Self { + x: g1.x, + y: g1.y, + x_twist, + y_twist, + } + } +} + +impl Default for G1Prepared

{ + fn default() -> Self { + Self::from(G1Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G1Prepared

{ + fn write(&self, mut writer: W) -> IoResult<()> { + self.x.write(&mut writer)?; + self.y.write(&mut writer)?; + self.x_twist.write(&mut writer)?; + self.y_twist.write(&mut writer) + } +} diff --git a/arkworks/algebra/ec/src/models/mnt4/g2.rs b/arkworks/algebra/ec/src/models/mnt4/g2.rs new file mode 100644 index 00000000..8b0e0586 --- /dev/null +++ b/arkworks/algebra/ec/src/models/mnt4/g2.rs @@ -0,0 +1,147 @@ +use crate::{ + mnt4::MNT4Parameters, + models::mnt4::MNT4, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; +use ark_ff::{ + bytes::ToBytes, + fields::{Field, Fp2}, +}; +use ark_std::{ + io::{Result as IoResult, Write}, + vec::Vec, +}; +use num_traits::One; + +pub type G2Affine

= GroupAffine<

::G2Parameters>; +pub type G2Projective

= GroupProjective<

::G2Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: MNT4Parameters"), + Debug(bound = "P: MNT4Parameters"), + PartialEq(bound = "P: MNT4Parameters"), + Eq(bound = "P: MNT4Parameters") +)] +pub struct G2Prepared { + pub x: Fp2, + pub y: Fp2, + pub x_over_twist: Fp2, + pub y_over_twist: Fp2, + pub double_coefficients: Vec>, + pub addition_coefficients: Vec>, +} + +impl Default for G2Prepared

{ + fn default() -> Self { + Self::from(G2Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G2Prepared

{ + fn write(&self, _writer: W) -> IoResult<()> { + unimplemented!() + } +} + +impl From> for G2Prepared

{ + fn from(g2: G2Affine

) -> Self { + let twist_inv = P::TWIST.inverse().unwrap(); + + let mut g2p = G2Prepared { + x: g2.x, + y: g2.y, + x_over_twist: g2.x * &twist_inv, + y_over_twist: g2.y * &twist_inv, + double_coefficients: vec![], + addition_coefficients: vec![], + }; + + let mut r = G2ProjectiveExtended { + x: g2.x, + y: g2.y, + z: >::one(), + t: >::one(), + }; + + for (idx, value) in P::ATE_LOOP_COUNT.iter().rev().enumerate() { + let mut tmp = *value; + let skip_extraneous_bits = 64 - value.leading_zeros(); + let mut v = Vec::with_capacity(16); + for i in 0..64 { + if idx == 0 && (i == 0 || i >= skip_extraneous_bits) { + continue; + } + v.push(tmp & 1 == 1); + tmp >>= 1; + } + + for bit in v.iter().rev() { + let (r2, coeff) = MNT4::

::doubling_step_for_flipped_miller_loop(&r); + g2p.double_coefficients.push(coeff); + r = r2; + + if *bit { + let (r2, coeff) = + MNT4::

::mixed_addition_step_for_flipped_miller_loop(&g2.x, &g2.y, &r); + g2p.addition_coefficients.push(coeff); + r = r2; + } + + tmp >>= 1; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let rz_inv = r.z.inverse().unwrap(); + let rz2_inv = rz_inv.square(); + let rz3_inv = rz_inv * &rz2_inv; + + let minus_r_affine_x = r.x * &rz2_inv; + let minus_r_affine_y = -r.y * &rz3_inv; + + let add_result = MNT4::

::mixed_addition_step_for_flipped_miller_loop( + &minus_r_affine_x, + &minus_r_affine_y, + &r, + ); + g2p.addition_coefficients.push(add_result.1); + } + + g2p + } +} + +pub(super) struct G2ProjectiveExtended { + pub(crate) x: Fp2, + pub(crate) y: Fp2, + pub(crate) z: Fp2, + pub(crate) t: Fp2, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: MNT4Parameters"), + Debug(bound = "P: MNT4Parameters"), + PartialEq(bound = "P: MNT4Parameters"), + Eq(bound = "P: MNT4Parameters") +)] +pub struct AteDoubleCoefficients { + pub c_h: Fp2, + pub c_4c: Fp2, + pub c_j: Fp2, + pub c_l: Fp2, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: MNT4Parameters"), + Debug(bound = "P: MNT4Parameters"), + PartialEq(bound = "P: MNT4Parameters"), + Eq(bound = "P: MNT4Parameters") +)] +pub struct AteAdditionCoefficients { + pub c_l1: Fp2, + pub c_rz: Fp2, +} diff --git a/arkworks/algebra/ec/src/models/mnt4/mod.rs b/arkworks/algebra/ec/src/models/mnt4/mod.rs new file mode 100644 index 00000000..05eafa7e --- /dev/null +++ b/arkworks/algebra/ec/src/models/mnt4/mod.rs @@ -0,0 +1,219 @@ +use { + crate::{ + models::{ModelParameters, SWModelParameters}, + PairingEngine, + }, + ark_ff::{ + fp2::{Fp2, Fp2Parameters}, + fp4::{Fp4, Fp4Parameters}, + BitIteratorBE, Field, PrimeField, SquareRootField, + }, + num_traits::{One, Zero}, +}; + +use core::marker::PhantomData; + +pub mod g1; +pub mod g2; + +use self::g2::{AteAdditionCoefficients, AteDoubleCoefficients, G2ProjectiveExtended}; +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +pub type GT

= Fp4

; + +pub trait MNT4Parameters: 'static { + const TWIST: Fp2; + const TWIST_COEFF_A: Fp2; + const ATE_LOOP_COUNT: &'static [u64]; + const ATE_IS_LOOP_COUNT_NEG: bool; + const FINAL_EXPONENT_LAST_CHUNK_1: ::BigInt; + const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool; + const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: ::BigInt; + type Fp: PrimeField + SquareRootField + Into<::BigInt>; + type Fr: PrimeField + SquareRootField + Into<::BigInt>; + type Fp2Params: Fp2Parameters; + type Fp4Params: Fp4Parameters; + type G1Parameters: SWModelParameters; + type G2Parameters: SWModelParameters< + BaseField = Fp2, + ScalarField = ::ScalarField, + >; +} + +#[derive(Derivative)] +#[derivative(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub struct MNT4(PhantomData P>); + +impl MNT4

{ + fn doubling_step_for_flipped_miller_loop( + r: &G2ProjectiveExtended

, + ) -> (G2ProjectiveExtended

, AteDoubleCoefficients

) { + let a = r.t.square(); + let b = r.x.square(); + let c = r.y.square(); + let d = c.square(); + let e = (r.x + &c).square() - &b - &d; + let f = (b + &b + &b) + &(P::TWIST_COEFF_A * &a); + let g = f.square(); + + let d_eight = d.double().double().double(); + + let x = -(e + &e + &e + &e) + &g; + let y = -d_eight + &(f * &(e + &e - &x)); + let z = (r.y + &r.z).square() - &c - &r.z.square(); + let t = z.square(); + + let r2 = G2ProjectiveExtended { x, y, z, t }; + let coeff = AteDoubleCoefficients { + c_h: (r2.z + &r.t).square() - &r2.t - &a, + c_4c: c + &c + &c + &c, + c_j: (f + &r.t).square() - &g - &a, + c_l: (f + &r.x).square() - &g - &b, + }; + + (r2, coeff) + } + + fn mixed_addition_step_for_flipped_miller_loop( + x: &Fp2, + y: &Fp2, + r: &G2ProjectiveExtended

, + ) -> (G2ProjectiveExtended

, AteAdditionCoefficients

) { + let a = y.square(); + let b = r.t * x; + let d = ((r.z + y).square() - &a - &r.t) * &r.t; + let h = b - &r.x; + let i = h.square(); + let e = i + &i + &i + &i; + let j = h * &e; + let v = r.x * &e; + let l1 = d - &(r.y + &r.y); + + let x = l1.square() - &j - &(v + &v); + let y = l1 * &(v - &x) - &(j * &(r.y + &r.y)); + let z = (r.z + &h).square() - &r.t - &i; + let t = z.square(); + + let r2 = G2ProjectiveExtended { x, y, z, t }; + let coeff = AteAdditionCoefficients { c_l1: l1, c_rz: z }; + + (r2, coeff) + } + + pub fn ate_miller_loop(p: &G1Prepared

, q: &G2Prepared

) -> Fp4 { + let l1_coeff = Fp2::new(p.x, P::Fp::zero()) - &q.x_over_twist; + + let mut f = >::one(); + + let mut add_idx: usize = 0; + + // code below gets executed for all bits (EXCEPT the MSB itself) of + // mnt6_param_p (skipping leading zeros) in MSB to LSB order + + for (bit, dc) in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT) + .skip(1) + .zip(&q.double_coefficients) + { + let g_rr_at_p = Fp4::new( + -dc.c_4c - &(dc.c_j * &p.x_twist) + &dc.c_l, + dc.c_h * &p.y_twist, + ); + + f = f.square() * &g_rr_at_p; + + if bit { + let ac = &q.addition_coefficients[add_idx]; + add_idx += 1; + + let g_rq_at_p = Fp4::new( + ac.c_rz * &p.y_twist, + -(q.y_over_twist * &ac.c_rz + &(l1_coeff * &ac.c_l1)), + ); + f *= &g_rq_at_p; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let ac = &q.addition_coefficients[add_idx]; + + let g_rnegr_at_p = Fp4::new( + ac.c_rz * &p.y_twist, + -(q.y_over_twist * &ac.c_rz + &(l1_coeff * &ac.c_l1)), + ); + f = (f * &g_rnegr_at_p).inverse().unwrap(); + } + + f + } + + pub fn final_exponentiation(value: &Fp4) -> GT { + let value_inv = value.inverse().unwrap(); + let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv); + let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, value); + Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk) + } + + fn final_exponentiation_first_chunk( + elt: &Fp4, + elt_inv: &Fp4, + ) -> Fp4 { + // (q^2-1) + + // elt_q2 = elt^(q^2) + let mut elt_q2 = *elt; + elt_q2.conjugate(); + // elt_q2_over_elt = elt^(q^2-1) + elt_q2 * elt_inv + } + + fn final_exponentiation_last_chunk( + elt: &Fp4, + elt_inv: &Fp4, + ) -> Fp4 { + let elt_clone = *elt; + let elt_inv_clone = *elt_inv; + + let mut elt_q = *elt; + elt_q.frobenius_map(1); + + let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1); + let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG { + elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + } else { + elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + }; + + w1_part * &w0_part + } +} + +impl PairingEngine for MNT4

{ + type Fr = ::ScalarField; + type G1Projective = G1Projective

; + type G1Affine = G1Affine

; + type G1Prepared = G1Prepared

; + type G2Projective = G2Projective

; + type G2Affine = G2Affine

; + type G2Prepared = G2Prepared

; + type Fq = P::Fp; + type Fqe = Fp2; + type Fqk = Fp4; + + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + let mut result = Self::Fqk::one(); + for (p, q) in i { + result *= &Self::ate_miller_loop(p, q); + } + result + } + + fn final_exponentiation(r: &Self::Fqk) -> Option { + Some(Self::final_exponentiation(r)) + } +} diff --git a/arkworks/algebra/ec/src/models/mnt6/g1.rs b/arkworks/algebra/ec/src/models/mnt6/g1.rs new file mode 100644 index 00000000..60d6ca43 --- /dev/null +++ b/arkworks/algebra/ec/src/models/mnt6/g1.rs @@ -0,0 +1,57 @@ +use crate::{ + mnt6::MNT6Parameters, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; +use ark_ff::{bytes::ToBytes, Fp3}; +use ark_std::io::{Result as IoResult, Write}; + +pub type G1Affine

= GroupAffine<

::G1Parameters>; +pub type G1Projective

= GroupProjective<

::G1Parameters>; + +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: MNT6Parameters"), + Clone(bound = "P: MNT6Parameters"), + Debug(bound = "P: MNT6Parameters"), + PartialEq(bound = "P: MNT6Parameters"), + Eq(bound = "P: MNT6Parameters") +)] +pub struct G1Prepared { + pub x: P::Fp, + pub y: P::Fp, + pub x_twist: Fp3, + pub y_twist: Fp3, +} + +impl From> for G1Prepared

{ + fn from(g1: G1Affine

) -> Self { + let mut x_twist = P::TWIST; + x_twist.mul_assign_by_fp(&g1.x); + + let mut y_twist = P::TWIST; + y_twist.mul_assign_by_fp(&g1.y); + + Self { + x: g1.x, + y: g1.y, + x_twist, + y_twist, + } + } +} + +impl Default for G1Prepared

{ + fn default() -> Self { + Self::from(G1Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G1Prepared

{ + fn write(&self, mut writer: W) -> IoResult<()> { + self.x.write(&mut writer)?; + self.y.write(&mut writer)?; + self.x_twist.write(&mut writer)?; + self.y_twist.write(&mut writer) + } +} diff --git a/arkworks/algebra/ec/src/models/mnt6/g2.rs b/arkworks/algebra/ec/src/models/mnt6/g2.rs new file mode 100644 index 00000000..26aaac54 --- /dev/null +++ b/arkworks/algebra/ec/src/models/mnt6/g2.rs @@ -0,0 +1,147 @@ +use crate::{ + mnt6::MNT6Parameters, + models::mnt6::MNT6, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, + AffineCurve, +}; +use ark_ff::{ + bytes::ToBytes, + fields::{Field, Fp3}, +}; +use ark_std::{ + io::{Result as IoResult, Write}, + vec::Vec, +}; +use num_traits::One; + +pub type G2Affine

= GroupAffine<

::G2Parameters>; +pub type G2Projective

= GroupProjective<

::G2Parameters>; + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: MNT6Parameters"), + Debug(bound = "P: MNT6Parameters"), + PartialEq(bound = "P: MNT6Parameters"), + Eq(bound = "P: MNT6Parameters") +)] +pub struct G2Prepared { + pub x: Fp3, + pub y: Fp3, + pub x_over_twist: Fp3, + pub y_over_twist: Fp3, + pub double_coefficients: Vec>, + pub addition_coefficients: Vec>, +} + +impl Default for G2Prepared

{ + fn default() -> Self { + Self::from(G2Affine::

::prime_subgroup_generator()) + } +} + +impl ToBytes for G2Prepared

{ + fn write(&self, _writer: W) -> IoResult<()> { + unimplemented!() + } +} + +impl From> for G2Prepared

{ + fn from(g2: G2Affine

) -> Self { + let twist_inv = P::TWIST.inverse().unwrap(); + + let mut g2p = G2Prepared { + x: g2.x, + y: g2.y, + x_over_twist: g2.x * &twist_inv, + y_over_twist: g2.y * &twist_inv, + double_coefficients: vec![], + addition_coefficients: vec![], + }; + + let mut r = G2ProjectiveExtended { + x: g2.x, + y: g2.y, + z: >::one(), + t: >::one(), + }; + + for (idx, value) in P::ATE_LOOP_COUNT.iter().rev().enumerate() { + let mut tmp = *value; + let skip_extraneous_bits = 64 - value.leading_zeros(); + let mut v = Vec::with_capacity(16); + for i in 0..64 { + if idx == 0 && (i == 0 || i >= skip_extraneous_bits) { + continue; + } + v.push(tmp & 1 == 1); + tmp >>= 1; + } + + for bit in v.iter().rev() { + let (r2, coeff) = MNT6::

::doubling_step_for_flipped_miller_loop(&r); + g2p.double_coefficients.push(coeff); + r = r2; + + if *bit { + let (r2, coeff) = + MNT6::

::mixed_addition_step_for_flipped_miller_loop(&g2.x, &g2.y, &r); + g2p.addition_coefficients.push(coeff); + r = r2; + } + + tmp >>= 1; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let rz_inv = r.z.inverse().unwrap(); + let rz2_inv = rz_inv.square(); + let rz3_inv = rz_inv * &rz2_inv; + + let minus_r_affine_x = r.x * &rz2_inv; + let minus_r_affine_y = -r.y * &rz3_inv; + + let add_result = MNT6::

::mixed_addition_step_for_flipped_miller_loop( + &minus_r_affine_x, + &minus_r_affine_y, + &r, + ); + g2p.addition_coefficients.push(add_result.1); + } + + g2p + } +} + +pub(super) struct G2ProjectiveExtended { + pub(crate) x: Fp3, + pub(crate) y: Fp3, + pub(crate) z: Fp3, + pub(crate) t: Fp3, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: MNT6Parameters"), + Debug(bound = "P: MNT6Parameters"), + PartialEq(bound = "P: MNT6Parameters"), + Eq(bound = "P: MNT6Parameters") +)] +pub struct AteDoubleCoefficients { + pub c_h: Fp3, + pub c_4c: Fp3, + pub c_j: Fp3, + pub c_l: Fp3, +} + +#[derive(Derivative)] +#[derivative( + Clone(bound = "P: MNT6Parameters"), + Debug(bound = "P: MNT6Parameters"), + PartialEq(bound = "P: MNT6Parameters"), + Eq(bound = "P: MNT6Parameters") +)] +pub struct AteAdditionCoefficients { + pub c_l1: Fp3, + pub c_rz: Fp3, +} diff --git a/arkworks/algebra/ec/src/models/mnt6/mod.rs b/arkworks/algebra/ec/src/models/mnt6/mod.rs new file mode 100644 index 00000000..89984dd1 --- /dev/null +++ b/arkworks/algebra/ec/src/models/mnt6/mod.rs @@ -0,0 +1,225 @@ +use { + crate::{ + models::{ModelParameters, SWModelParameters}, + PairingEngine, + }, + ark_ff::{ + fp3::{Fp3, Fp3Parameters}, + fp6_2over3::{Fp6, Fp6Parameters}, + BitIteratorBE, Field, PrimeField, SquareRootField, + }, + num_traits::{One, Zero}, +}; + +use core::marker::PhantomData; + +pub mod g1; +pub mod g2; + +use self::g2::{AteAdditionCoefficients, AteDoubleCoefficients, G2ProjectiveExtended}; +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +pub type GT

= Fp6

; + +pub trait MNT6Parameters: 'static { + const TWIST: Fp3; + const TWIST_COEFF_A: Fp3; + const ATE_LOOP_COUNT: &'static [u64]; + const ATE_IS_LOOP_COUNT_NEG: bool; + const FINAL_EXPONENT_LAST_CHUNK_1: ::BigInt; + const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool; + const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: ::BigInt; + type Fp: PrimeField + SquareRootField + Into<::BigInt>; + type Fr: PrimeField + SquareRootField + Into<::BigInt>; + type Fp3Params: Fp3Parameters; + type Fp6Params: Fp6Parameters; + type G1Parameters: SWModelParameters; + type G2Parameters: SWModelParameters< + BaseField = Fp3, + ScalarField = ::ScalarField, + >; +} + +#[derive(Derivative)] +#[derivative(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub struct MNT6(PhantomData P>); + +impl MNT6

{ + fn doubling_step_for_flipped_miller_loop( + r: &G2ProjectiveExtended

, + ) -> (G2ProjectiveExtended

, AteDoubleCoefficients

) { + let a = r.t.square(); + let b = r.x.square(); + let c = r.y.square(); + let d = c.square(); + let e = (r.x + &c).square() - &b - &d; + let f = (b + &b + &b) + &(P::TWIST_COEFF_A * &a); + let g = f.square(); + + let d_eight = d.double().double().double(); + + let e2 = e.double(); + let x = g - &e2.double(); + let y = -d_eight + &(f * &(e2 - &x)); + let z = (r.y + &r.z).square() - &c - &r.z.square(); + let t = z.square(); + + let r2 = G2ProjectiveExtended { x, y, z, t }; + let coeff = AteDoubleCoefficients { + c_h: (r2.z + &r.t).square() - &r2.t - &a, + c_4c: c + &c + &c + &c, + c_j: (f + &r.t).square() - &g - &a, + c_l: (f + &r.x).square() - &g - &b, + }; + + (r2, coeff) + } + + fn mixed_addition_step_for_flipped_miller_loop( + x: &Fp3, + y: &Fp3, + r: &G2ProjectiveExtended

, + ) -> (G2ProjectiveExtended

, AteAdditionCoefficients

) { + let a = y.square(); + let b = r.t * x; + let d = ((r.z + y).square() - &a - &r.t) * &r.t; + let h = b - &r.x; + let i = h.square(); + let e = i + &i + &i + &i; + let j = h * &e; + let v = r.x * &e; + let ry2 = r.y.double(); + let l1 = d - &ry2; + + let x = l1.square() - &j - &(v + &v); + let y = l1 * &(v - &x) - &(j * &ry2); + let z = (r.z + &h).square() - &r.t - &i; + let t = z.square(); + + let r2 = G2ProjectiveExtended { x, y, z, t }; + let coeff = AteAdditionCoefficients { c_l1: l1, c_rz: z }; + + (r2, coeff) + } + + pub fn ate_miller_loop(p: &G1Prepared

, q: &G2Prepared

) -> Fp6 { + let l1_coeff = Fp3::new(p.x, P::Fp::zero(), P::Fp::zero()) - &q.x_over_twist; + + let mut f = >::one(); + + let mut add_idx: usize = 0; + + // code below gets executed for all bits (EXCEPT the MSB itself) of + // mnt6_param_p (skipping leading zeros) in MSB to LSB order + for (bit, dc) in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT) + .skip(1) + .zip(&q.double_coefficients) + { + let g_rr_at_p = Fp6::new( + dc.c_l - &dc.c_4c - &(dc.c_j * &p.x_twist), + dc.c_h * &p.y_twist, + ); + + f = f.square() * &g_rr_at_p; + + if bit { + let ac = &q.addition_coefficients[add_idx]; + add_idx += 1; + + let g_rq_at_p = Fp6::new( + ac.c_rz * &p.y_twist, + -(q.y_over_twist * &ac.c_rz + &(l1_coeff * &ac.c_l1)), + ); + f *= &g_rq_at_p; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let ac = &q.addition_coefficients[add_idx]; + + let g_rnegr_at_p = Fp6::new( + ac.c_rz * &p.y_twist, + -(q.y_over_twist * &ac.c_rz + &(l1_coeff * &ac.c_l1)), + ); + f = (f * &g_rnegr_at_p).inverse().unwrap(); + } + + f + } + + pub fn final_exponentiation(value: &Fp6) -> GT { + let value_inv = value.inverse().unwrap(); + let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv); + let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, value); + Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk) + } + + fn final_exponentiation_first_chunk( + elt: &Fp6, + elt_inv: &Fp6, + ) -> Fp6 { + // (q^3-1)*(q+1) + + // elt_q3 = elt^(q^3) + let mut elt_q3 = *elt; + elt_q3.conjugate(); + // elt_q3_over_elt = elt^(q^3-1) + let elt_q3_over_elt = elt_q3 * elt_inv; + // alpha = elt^((q^3-1) * q) + let mut alpha = elt_q3_over_elt; + alpha.frobenius_map(1); + // beta = elt^((q^3-1)*(q+1) + alpha * &elt_q3_over_elt + } + + fn final_exponentiation_last_chunk( + elt: &Fp6, + elt_inv: &Fp6, + ) -> Fp6 { + let elt_clone = *elt; + let elt_inv_clone = *elt_inv; + + let mut elt_q = *elt; + elt_q.frobenius_map(1); + + let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1); + let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG { + elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + } else { + elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + }; + + w1_part * &w0_part + } +} + +impl PairingEngine for MNT6

{ + type Fr = ::ScalarField; + type G1Projective = G1Projective

; + type G1Affine = G1Affine

; + type G1Prepared = G1Prepared

; + type G2Projective = G2Projective

; + type G2Affine = G2Affine

; + type G2Prepared = G2Prepared

; + type Fq = P::Fp; + type Fqe = Fp3; + type Fqk = Fp6; + + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + let mut result = Self::Fqk::one(); + for (p, q) in i { + result *= &Self::ate_miller_loop(p, q); + } + result + } + + fn final_exponentiation(r: &Self::Fqk) -> Option { + Some(Self::final_exponentiation(r)) + } +} diff --git a/arkworks/algebra/ec/src/models/mod.rs b/arkworks/algebra/ec/src/models/mod.rs new file mode 100644 index 00000000..198bb994 --- /dev/null +++ b/arkworks/algebra/ec/src/models/mod.rs @@ -0,0 +1,63 @@ +use ark_ff::{Field, PrimeField, SquareRootField, Zero}; + +pub mod bls12; +pub mod bn; +pub mod bw6; +pub mod mnt4; +pub mod mnt6; +pub mod short_weierstrass_jacobian; +pub mod twisted_edwards_extended; + +pub trait ModelParameters: Send + Sync + 'static { + type BaseField: Field + SquareRootField; + type ScalarField: PrimeField + SquareRootField + Into<::BigInt>; +} + +pub trait SWModelParameters: ModelParameters { + const COEFF_A: Self::BaseField; + const COEFF_B: Self::BaseField; + const COFACTOR: &'static [u64]; + const COFACTOR_INV: Self::ScalarField; + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField); + + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + let mut copy = *elem; + copy *= &Self::COEFF_A; + copy + } + + #[inline(always)] + fn add_b(elem: &Self::BaseField) -> Self::BaseField { + if !Self::COEFF_B.is_zero() { + let mut copy = *elem; + copy += &Self::COEFF_B; + return copy; + } + *elem + } +} + +pub trait TEModelParameters: ModelParameters { + const COEFF_A: Self::BaseField; + const COEFF_D: Self::BaseField; + const COFACTOR: &'static [u64]; + const COFACTOR_INV: Self::ScalarField; + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField); + + type MontgomeryModelParameters: MontgomeryModelParameters; + + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + let mut copy = *elem; + copy *= &Self::COEFF_A; + copy + } +} + +pub trait MontgomeryModelParameters: ModelParameters { + const COEFF_A: Self::BaseField; + const COEFF_B: Self::BaseField; + + type TEModelParameters: TEModelParameters; +} diff --git a/arkworks/algebra/ec/src/models/short_weierstrass_jacobian.rs b/arkworks/algebra/ec/src/models/short_weierstrass_jacobian.rs new file mode 100644 index 00000000..ee4046df --- /dev/null +++ b/arkworks/algebra/ec/src/models/short_weierstrass_jacobian.rs @@ -0,0 +1,908 @@ +use ark_serialize::{ + CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, + CanonicalSerializeWithFlags, SWFlags, SerializationError, +}; +use ark_std::{ + fmt::{Display, Formatter, Result as FmtResult}, + io::{Read, Result as IoResult, Write}, + marker::PhantomData, + ops::{Add, AddAssign, MulAssign, Neg, Sub, SubAssign}, + vec::Vec, +}; + +use ark_ff::{ + bytes::{FromBytes, ToBytes}, + fields::{BitIteratorBE, Field, PrimeField, SquareRootField}, + ToConstraintField, UniformRand, +}; + +use crate::{models::SWModelParameters as Parameters, AffineCurve, ProjectiveCurve}; + +use num_traits::{One, Zero}; +use zeroize::Zeroize; + +use ark_std::rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Affine coordinates for a point on an elliptic curve in short Weierstrass form, +/// over the base field `P::BaseField`. +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: Parameters"), + Clone(bound = "P: Parameters"), + PartialEq(bound = "P: Parameters"), + Eq(bound = "P: Parameters"), + Debug(bound = "P: Parameters"), + Hash(bound = "P: Parameters") +)] +#[must_use] +pub struct GroupAffine { + pub x: P::BaseField, + pub y: P::BaseField, + pub infinity: bool, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl PartialEq> for GroupAffine

{ + fn eq(&self, other: &GroupProjective

) -> bool { + self.into_projective() == *other + } +} + +impl PartialEq> for GroupProjective

{ + fn eq(&self, other: &GroupAffine

) -> bool { + *self == other.into_projective() + } +} + +impl Display for GroupAffine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + if self.infinity { + write!(f, "GroupAffine(Infinity)") + } else { + write!(f, "GroupAffine(x={}, y={})", self.x, self.y) + } + } +} + +impl GroupAffine

{ + pub fn new(x: P::BaseField, y: P::BaseField, infinity: bool) -> Self { + Self { + x, + y, + infinity, + _params: PhantomData, + } + } + + /// Multiply `self` by the cofactor of the curve, `P::COFACTOR`. + pub fn scale_by_cofactor(&self) -> GroupProjective

{ + let cofactor = BitIteratorBE::new(P::COFACTOR); + self.mul_bits(cofactor) + } + + /// Multiplies `self` by the scalar represented by `bits`. `bits` must be a big-endian + /// bit-wise decomposition of the scalar. + pub(crate) fn mul_bits(&self, bits: impl Iterator) -> GroupProjective

{ + let mut res = GroupProjective::zero(); + // Skip leading zeros. + for i in bits.skip_while(|b| !b) { + res.double_in_place(); + if i { + res.add_assign_mixed(&self) + } + } + res + } + + /// Attempts to construct an affine point given an x-coordinate. The + /// point is not guaranteed to be in the prime order subgroup. + /// + /// If and only if `greatest` is set will the lexicographically + /// largest y-coordinate be selected. + #[allow(dead_code)] + pub fn get_point_from_x(x: P::BaseField, greatest: bool) -> Option { + // Compute x^3 + ax + b + // Rust does not optimise away addition with zero + let x3b = if P::COEFF_A.is_zero() { + P::add_b(&(x.square() * &x)) + } else { + P::add_b(&((x.square() * &x) + &P::mul_by_a(&x))) + }; + + x3b.sqrt().map(|y| { + let negy = -y; + + let y = if (y < negy) ^ greatest { y } else { negy }; + Self::new(x, y, false) + }) + } + + /// Checks if `self` is a valid point on the curve. + pub fn is_on_curve(&self) -> bool { + if self.is_zero() { + true + } else { + // Check that the point is on the curve + let y2 = self.y.square(); + // Rust does not optimise away addition with zero + let x3b = if P::COEFF_A.is_zero() { + P::add_b(&(self.x.square() * &self.x)) + } else { + P::add_b(&((self.x.square() * &self.x) + &P::mul_by_a(&self.x))) + }; + y2 == x3b + } + } + + /// Checks if `self` is in the subgroup having order that equaling that of + /// `P::ScalarField`. + pub fn is_in_correct_subgroup_assuming_on_curve(&self) -> bool { + self.mul_bits(BitIteratorBE::new(P::ScalarField::characteristic())) + .is_zero() + } +} + +impl Zeroize for GroupAffine

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.x.zeroize(); + self.y.zeroize(); + self.infinity.zeroize(); + } +} + +impl Zero for GroupAffine

{ + /// Returns the point at infinity. Note that in affine coordinates, + /// the point at infinity does not lie on the curve, and this is indicated + /// by setting the `infinity` flag to true. + #[inline] + fn zero() -> Self { + Self::new(P::BaseField::zero(), P::BaseField::one(), true) + } + + /// Checks if `self` is the point at infinity. + #[inline] + fn is_zero(&self) -> bool { + self.infinity + } +} + +impl Add for GroupAffine

{ + type Output = Self; + fn add(self, other: Self) -> Self { + let mut copy = self; + copy += &other; + copy + } +} + +impl<'a, P: Parameters> AddAssign<&'a Self> for GroupAffine

{ + fn add_assign(&mut self, other: &'a Self) { + let mut s_proj = GroupProjective::from(*self); + s_proj.add_assign_mixed(other); + *self = s_proj.into(); + } +} + +impl AffineCurve for GroupAffine

{ + const COFACTOR: &'static [u64] = P::COFACTOR; + type BaseField = P::BaseField; + type ScalarField = P::ScalarField; + type Projective = GroupProjective

; + + #[inline] + fn prime_subgroup_generator() -> Self { + Self::new( + P::AFFINE_GENERATOR_COEFFS.0, + P::AFFINE_GENERATOR_COEFFS.1, + false, + ) + } + + fn from_random_bytes(bytes: &[u8]) -> Option { + P::BaseField::from_random_bytes_with_flags::(bytes).and_then(|(x, flags)| { + // if x is valid and is zero and only the infinity flag is set, then parse this + // point as infinity. For all other choices, get the original point. + if x.is_zero() && flags.is_infinity() { + Some(Self::zero()) + } else if let Some(y_is_positive) = flags.is_positive() { + Self::get_point_from_x(x, y_is_positive) // Unwrap is safe because it's not zero. + } else { + None + } + }) + } + + #[inline] + fn mul::BigInt>>(&self, by: S) -> GroupProjective

{ + let bits = BitIteratorBE::new(by.into()); + self.mul_bits(bits) + } + + #[inline] + fn mul_by_cofactor_to_projective(&self) -> Self::Projective { + self.scale_by_cofactor() + } + + fn mul_by_cofactor_inv(&self) -> Self { + self.mul(P::COFACTOR_INV).into() + } +} + +impl Neg for GroupAffine

{ + type Output = Self; + + /// If `self.is_zero()`, returns `self` (`== Self::zero()`). + /// Else, returns `(x, -y)`, where `self = (x, y)`. + #[inline] + fn neg(self) -> Self { + if !self.is_zero() { + Self::new(self.x, -self.y, false) + } else { + self + } + } +} + +impl ToBytes for GroupAffine

{ + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + self.x.write(&mut writer)?; + self.y.write(&mut writer)?; + self.infinity.write(&mut writer) + } +} + +impl FromBytes for GroupAffine

{ + #[inline] + fn read(mut reader: R) -> IoResult { + let x = P::BaseField::read(&mut reader)?; + let y = P::BaseField::read(&mut reader)?; + let infinity = bool::read(reader)?; + Ok(Self::new(x, y, infinity)) + } +} + +impl Default for GroupAffine

{ + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl core::iter::Sum for GroupAffine

{ + fn sum>(iter: I) -> Self { + iter.fold(GroupProjective::

::zero(), |sum, x| sum.add_mixed(&x)) + .into() + } +} + +impl<'a, P: Parameters> core::iter::Sum<&'a Self> for GroupAffine

{ + fn sum>(iter: I) -> Self { + iter.fold(GroupProjective::

::zero(), |sum, x| sum.add_mixed(&x)) + .into() + } +} + +/// Jacobian coordinates for a point on an elliptic curve in short Weierstrass form, +/// over the base field `P::BaseField`. This struct implements arithmetic +/// via the Jacobian formulae +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: Parameters"), + Clone(bound = "P: Parameters"), + Debug(bound = "P: Parameters"), + Hash(bound = "P: Parameters") +)] +#[must_use] +pub struct GroupProjective { + pub x: P::BaseField, + pub y: P::BaseField, + pub z: P::BaseField, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl Display for GroupProjective

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "{}", GroupAffine::from(*self)) + } +} + +impl Eq for GroupProjective

{} +impl PartialEq for GroupProjective

{ + fn eq(&self, other: &Self) -> bool { + if self.is_zero() { + return other.is_zero(); + } + + if other.is_zero() { + return false; + } + + // The points (X, Y, Z) and (X', Y', Z') + // are equal when (X * Z^2) = (X' * Z'^2) + // and (Y * Z^3) = (Y' * Z'^3). + let z1z1 = self.z.square(); + let z2z2 = other.z.square(); + + if self.x * &z2z2 != other.x * &z1z1 { + false + } else { + self.y * &(z2z2 * &other.z) == other.y * &(z1z1 * &self.z) + } + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> GroupProjective

{ + loop { + let x = P::BaseField::rand(rng); + let greatest = rng.gen(); + + if let Some(p) = GroupAffine::get_point_from_x(x, greatest) { + return p.scale_by_cofactor().into(); + } + } + } +} + +impl ToBytes for GroupProjective

{ + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + self.x.write(&mut writer)?; + self.y.write(&mut writer)?; + self.z.write(writer) + } +} + +impl FromBytes for GroupProjective

{ + #[inline] + fn read(mut reader: R) -> IoResult { + let x = P::BaseField::read(&mut reader)?; + let y = P::BaseField::read(&mut reader)?; + let z = P::BaseField::read(reader)?; + Ok(Self::new(x, y, z)) + } +} + +impl Default for GroupProjective

{ + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl GroupProjective

{ + pub fn new(x: P::BaseField, y: P::BaseField, z: P::BaseField) -> Self { + Self { + x, + y, + z, + _params: PhantomData, + } + } +} + +impl Zeroize for GroupProjective

{ + fn zeroize(&mut self) { + // `PhantomData` does not contain any data and thus does not need to be zeroized. + self.x.zeroize(); + self.y.zeroize(); + self.z.zeroize(); + } +} + +impl Zero for GroupProjective

{ + /// Returns the point at infinity, which always has Z = 0. + #[inline] + fn zero() -> Self { + Self::new( + P::BaseField::one(), + P::BaseField::one(), + P::BaseField::zero(), + ) + } + + /// Checks whether `self.z.is_zero()`. + #[inline] + fn is_zero(&self) -> bool { + self.z.is_zero() + } +} + +impl ProjectiveCurve for GroupProjective

{ + const COFACTOR: &'static [u64] = P::COFACTOR; + type BaseField = P::BaseField; + type ScalarField = P::ScalarField; + type Affine = GroupAffine

; + + #[inline] + fn prime_subgroup_generator() -> Self { + GroupAffine::prime_subgroup_generator().into() + } + + #[inline] + fn is_normalized(&self) -> bool { + self.is_zero() || self.z.is_one() + } + + /// Normalizes a slice of projective elements so that + /// conversion to affine is cheap. + /// + /// In more detail, this method converts a curve point in Jacobian coordinates + /// (x, y, z) into an equivalent representation (x/z^2, y/z^3, 1). + /// + /// For `N = v.len()`, this costs 1 inversion + 6N field multiplications + N field squarings. + /// + /// (Where batch inversion comprises 3N field multiplications + 1 inversion of these operations) + #[inline] + fn batch_normalization(v: &mut [Self]) { + let mut z_s = v.iter().map(|g| g.z).collect::>(); + ark_ff::batch_inversion(&mut z_s); + + // Perform affine transformations + ark_std::cfg_iter_mut!(v) + .zip(z_s) + .filter(|(g, _)| !g.is_normalized()) + .for_each(|(g, z)| { + let z2 = z.square(); // 1/z + g.x *= &z2; // x/z^2 + g.y *= &(z2 * &z); // y/z^3 + g.z = P::BaseField::one(); // z = 1 + }); + } + + /// Sets `self = 2 * self`. Note that Jacobian formulae are incomplete, and + /// so doubling cannot be computed as `self + self`. Instead, this implementation + /// uses the following specialized doubling formulae: + /// * [`P::A` is zero](http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l) + /// * [`P::A` is not zero](https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl) + fn double_in_place(&mut self) -> &mut Self { + if self.is_zero() { + return self; + } + + if P::COEFF_A.is_zero() { + // A = X1^2 + let mut a = self.x.square(); + + // B = Y1^2 + let b = self.y.square(); + + // C = B^2 + let mut c = b.square(); + + // D = 2*((X1+B)2-A-C) + let d = ((self.x + &b).square() - &a - &c).double(); + + // E = 3*A + let e = a + &*a.double_in_place(); + + // F = E^2 + let f = e.square(); + + // Z3 = 2*Y1*Z1 + self.z *= &self.y; + self.z.double_in_place(); + + // X3 = F-2*D + self.x = f - &d - &d; + + // Y3 = E*(D-X3)-8*C + self.y = (d - &self.x) * &e - &*c.double_in_place().double_in_place().double_in_place(); + self + } else { + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + // XX = X1^2 + let xx = self.x.square(); + + // YY = Y1^2 + let yy = self.y.square(); + + // YYYY = YY^2 + let mut yyyy = yy.square(); + + // ZZ = Z1^2 + let zz = self.z.square(); + + // S = 2*((X1+YY)^2-XX-YYYY) + let s = ((self.x + &yy).square() - &xx - &yyyy).double(); + + // M = 3*XX+a*ZZ^2 + let m = xx + &xx + &xx + &P::mul_by_a(&zz.square()); + + // T = M^2-2*S + let t = m.square() - &s.double(); + + // X3 = T + self.x = t; + // Y3 = M*(S-T)-8*YYYY + let old_y = self.y; + self.y = m * &(s - &t) - &*yyyy.double_in_place().double_in_place().double_in_place(); + // Z3 = (Y1+Z1)^2-YY-ZZ + self.z = (old_y + &self.z).square() - &yy - &zz; + self + } + } + + /// When `other.is_normalized()` (i.e., `other.z == 1`), we can use a more efficient + /// [formula](http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl) + /// to compute `self + other`. + fn add_assign_mixed(&mut self, other: &GroupAffine

) { + if other.is_zero() { + return; + } + + if self.is_zero() { + self.x = other.x; + self.y = other.y; + self.z = P::BaseField::one(); + return; + } + + // Z1Z1 = Z1^2 + let z1z1 = self.z.square(); + + // U2 = X2*Z1Z1 + let u2 = other.x * &z1z1; + + // S2 = Y2*Z1*Z1Z1 + let s2 = (other.y * &self.z) * &z1z1; + + if self.x == u2 && self.y == s2 { + // The two points are equal, so we double. + self.double_in_place(); + } else { + // If we're adding -a and a together, self.z becomes zero as H becomes zero. + + // H = U2-X1 + let h = u2 - &self.x; + + // HH = H^2 + let hh = h.square(); + + // I = 4*HH + let mut i = hh; + i.double_in_place().double_in_place(); + + // J = H*I + let mut j = h * &i; + + // r = 2*(S2-Y1) + let r = (s2 - &self.y).double(); + + // V = X1*I + let v = self.x * &i; + + // X3 = r^2 - J - 2*V + self.x = r.square(); + self.x -= &j; + self.x -= &v; + self.x -= &v; + + // Y3 = r*(V-X3)-2*Y1*J + j *= &self.y; // J = 2*Y1*J + j.double_in_place(); + self.y = v - &self.x; + self.y *= &r; + self.y -= &j; + + // Z3 = (Z1+H)^2-Z1Z1-HH + self.z += &h; + self.z.square_in_place(); + self.z -= &z1z1; + self.z -= &hh; + } + } +} + +impl Neg for GroupProjective

{ + type Output = Self; + + #[inline] + fn neg(self) -> Self { + if !self.is_zero() { + Self::new(self.x, -self.y, self.z) + } else { + self + } + } +} + +ark_ff::impl_additive_ops_from_ref!(GroupProjective, Parameters); + +impl<'a, P: Parameters> Add<&'a Self> for GroupProjective

{ + type Output = Self; + + #[inline] + fn add(mut self, other: &'a Self) -> Self { + self += other; + self + } +} + +impl<'a, P: Parameters> AddAssign<&'a Self> for GroupProjective

{ + fn add_assign(&mut self, other: &'a Self) { + if self.is_zero() { + *self = *other; + return; + } + + if other.is_zero() { + return; + } + + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + // Works for all curves. + + // Z1Z1 = Z1^2 + let z1z1 = self.z.square(); + + // Z2Z2 = Z2^2 + let z2z2 = other.z.square(); + + // U1 = X1*Z2Z2 + let u1 = self.x * &z2z2; + + // U2 = X2*Z1Z1 + let u2 = other.x * &z1z1; + + // S1 = Y1*Z2*Z2Z2 + let s1 = self.y * &other.z * &z2z2; + + // S2 = Y2*Z1*Z1Z1 + let s2 = other.y * &self.z * &z1z1; + + if u1 == u2 && s1 == s2 { + // The two points are equal, so we double. + self.double_in_place(); + } else { + // If we're adding -a and a together, self.z becomes zero as H becomes zero. + + // H = U2-U1 + let h = u2 - &u1; + + // I = (2*H)^2 + let i = (h.double()).square(); + + // J = H*I + let j = h * &i; + + // r = 2*(S2-S1) + let r = (s2 - &s1).double(); + + // V = U1*I + let v = u1 * &i; + + // X3 = r^2 - J - 2*V + self.x = r.square() - &j - &(v.double()); + + // Y3 = r*(V - X3) - 2*S1*J + self.y = r * &(v - &self.x) - &*(s1 * &j).double_in_place(); + + // Z3 = ((Z1+Z2)^2 - Z1Z1 - Z2Z2)*H + self.z = ((self.z + &other.z).square() - &z1z1 - &z2z2) * &h; + } + } +} + +impl<'a, P: Parameters> Sub<&'a Self> for GroupProjective

{ + type Output = Self; + + #[inline] + fn sub(mut self, other: &'a Self) -> Self { + self -= other; + self + } +} + +impl<'a, P: Parameters> SubAssign<&'a Self> for GroupProjective

{ + fn sub_assign(&mut self, other: &'a Self) { + *self += &(-(*other)); + } +} + +impl MulAssign for GroupProjective

{ + fn mul_assign(&mut self, other: P::ScalarField) { + *self = self.mul(other.into_repr()) + } +} + +// The affine point X, Y is represented in the Jacobian +// coordinates with Z = 1. +impl From> for GroupProjective

{ + #[inline] + fn from(p: GroupAffine

) -> GroupProjective

{ + if p.is_zero() { + Self::zero() + } else { + Self::new(p.x, p.y, P::BaseField::one()) + } + } +} + +// The projective point X, Y, Z is represented in the affine +// coordinates as X/Z^2, Y/Z^3. +impl From> for GroupAffine

{ + #[inline] + fn from(p: GroupProjective

) -> GroupAffine

{ + if p.is_zero() { + GroupAffine::zero() + } else if p.z.is_one() { + // If Z is one, the point is already normalized. + GroupAffine::new(p.x, p.y, false) + } else { + // Z is nonzero, so it must have an inverse in a field. + let zinv = p.z.inverse().unwrap(); + let zinv_squared = zinv.square(); + + // X/Z^2 + let x = p.x * &zinv_squared; + + // Y/Z^3 + let y = p.y * &(zinv_squared * &zinv); + + GroupAffine::new(x, y, false) + } + } +} + +impl CanonicalSerialize for GroupAffine

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + if self.is_zero() { + let flags = SWFlags::infinity(); + // Serialize 0. + P::BaseField::zero().serialize_with_flags(writer, flags) + } else { + let flags = SWFlags::from_y_sign(self.y > -self.y); + self.x.serialize_with_flags(writer, flags) + } + } + + #[inline] + fn serialized_size(&self) -> usize { + P::BaseField::zero().serialized_size_with_flags::() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + let flags = if self.is_zero() { + SWFlags::infinity() + } else { + SWFlags::default() + }; + self.x.serialize(&mut writer)?; + self.y.serialize_with_flags(&mut writer, flags)?; + Ok(()) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + self.x.serialized_size() + self.y.serialized_size_with_flags::() + } +} + +impl CanonicalSerialize for GroupProjective

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + let aff = GroupAffine::

::from(self.clone()); + aff.serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + let aff = GroupAffine::

::from(self.clone()); + aff.serialized_size() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + let aff = GroupAffine::

::from(self.clone()); + aff.serialize_uncompressed(writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + let aff = GroupAffine::

::from(self.clone()); + aff.uncompressed_size() + } +} + +impl CanonicalDeserialize for GroupAffine

{ + #[allow(unused_qualifications)] + fn deserialize(reader: R) -> Result { + let (x, flags): (P::BaseField, SWFlags) = + CanonicalDeserializeWithFlags::deserialize_with_flags(reader)?; + if flags.is_infinity() { + Ok(Self::zero()) + } else { + let p = GroupAffine::

::get_point_from_x(x, flags.is_positive().unwrap()) + .ok_or(SerializationError::InvalidData)?; + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed( + reader: R, + ) -> Result { + let p = Self::deserialize_unchecked(reader)?; + + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(mut reader: R) -> Result { + let x: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let (y, flags): (P::BaseField, SWFlags) = + CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; + let p = GroupAffine::

::new(x, y, flags.is_infinity()); + Ok(p) + } +} + +impl CanonicalDeserialize for GroupProjective

{ + #[allow(unused_qualifications)] + fn deserialize(reader: R) -> Result { + let aff = GroupAffine::

::deserialize(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed(reader: R) -> Result { + let aff = GroupAffine::

::deserialize_uncompressed(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(reader: R) -> Result { + let aff = GroupAffine::

::deserialize_unchecked(reader)?; + Ok(aff.into()) + } +} + +impl ToConstraintField for GroupAffine +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + let mut x_fe = self.x.to_field_elements()?; + let y_fe = self.y.to_field_elements()?; + let infinity_fe = self.infinity.to_field_elements()?; + x_fe.extend_from_slice(&y_fe); + x_fe.extend_from_slice(&infinity_fe); + Some(x_fe) + } +} + +impl ToConstraintField for GroupProjective +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + GroupAffine::from(*self).to_field_elements() + } +} diff --git a/arkworks/algebra/ec/src/models/twisted_edwards_extended.rs b/arkworks/algebra/ec/src/models/twisted_edwards_extended.rs new file mode 100644 index 00000000..d4a5524e --- /dev/null +++ b/arkworks/algebra/ec/src/models/twisted_edwards_extended.rs @@ -0,0 +1,852 @@ +use crate::{ + models::{MontgomeryModelParameters as MontgomeryParameters, TEModelParameters as Parameters}, + AffineCurve, ProjectiveCurve, +}; +use ark_serialize::{ + CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, + CanonicalSerializeWithFlags, EdwardsFlags, SerializationError, +}; +use ark_std::rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use ark_std::{ + fmt::{Display, Formatter, Result as FmtResult}, + io::{Read, Result as IoResult, Write}, + marker::PhantomData, + ops::{Add, AddAssign, MulAssign, Neg, Sub, SubAssign}, + vec::Vec, +}; +use num_traits::{One, Zero}; +use zeroize::Zeroize; + +use ark_ff::{ + bytes::{FromBytes, ToBytes}, + fields::{BitIteratorBE, Field, PrimeField, SquareRootField}, + ToConstraintField, UniformRand, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: Parameters"), + Clone(bound = "P: Parameters"), + PartialEq(bound = "P: Parameters"), + Eq(bound = "P: Parameters"), + Debug(bound = "P: Parameters"), + Hash(bound = "P: Parameters") +)] +#[must_use] +pub struct GroupAffine { + pub x: P::BaseField, + pub y: P::BaseField, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl Display for GroupAffine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "GroupAffine(x={}, y={})", self.x, self.y) + } +} + +impl GroupAffine

{ + pub fn new(x: P::BaseField, y: P::BaseField) -> Self { + Self { + x, + y, + _params: PhantomData, + } + } + + #[must_use] + pub fn scale_by_cofactor(&self) -> ::Projective { + self.mul_bits(BitIteratorBE::new(P::COFACTOR)) + } + + /// Multiplies `self` by the scalar represented by `bits`. `bits` must be a big-endian + /// bit-wise decomposition of the scalar. + pub(crate) fn mul_bits(&self, bits: impl Iterator) -> GroupProjective

{ + let mut res = GroupProjective::zero(); + for i in bits.skip_while(|b| !b) { + res.double_in_place(); + if i { + res.add_assign_mixed(&self) + } + } + res + } + + /// Attempts to construct an affine point given an x-coordinate. The + /// point is not guaranteed to be in the prime order subgroup. + /// + /// If and only if `greatest` is set will the lexicographically + /// largest y-coordinate be selected. + #[allow(dead_code)] + pub fn get_point_from_x(x: P::BaseField, greatest: bool) -> Option { + let x2 = x.square(); + let one = P::BaseField::one(); + let numerator = P::mul_by_a(&x2) - &one; + let denominator = P::COEFF_D * &x2 - &one; + let y2 = denominator.inverse().map(|denom| denom * &numerator); + y2.and_then(|y2| y2.sqrt()).map(|y| { + let negy = -y; + let y = if (y < negy) ^ greatest { y } else { negy }; + Self::new(x, y) + }) + } + + /// Checks that the current point is on the elliptic curve. + pub fn is_on_curve(&self) -> bool { + let x2 = self.x.square(); + let y2 = self.y.square(); + + let lhs = y2 + &P::mul_by_a(&x2); + let rhs = P::BaseField::one() + &(P::COEFF_D * &(x2 * &y2)); + + lhs == rhs + } + + /// Checks that the current point is in the prime order subgroup given + /// the point on the curve. + pub fn is_in_correct_subgroup_assuming_on_curve(&self) -> bool { + self.mul_bits(BitIteratorBE::new(P::ScalarField::characteristic())) + .is_zero() + } +} + +impl Zero for GroupAffine

{ + fn zero() -> Self { + Self::new(P::BaseField::zero(), P::BaseField::one()) + } + + fn is_zero(&self) -> bool { + self.x.is_zero() & self.y.is_one() + } +} + +impl AffineCurve for GroupAffine

{ + const COFACTOR: &'static [u64] = P::COFACTOR; + type BaseField = P::BaseField; + type ScalarField = P::ScalarField; + type Projective = GroupProjective

; + + fn prime_subgroup_generator() -> Self { + Self::new(P::AFFINE_GENERATOR_COEFFS.0, P::AFFINE_GENERATOR_COEFFS.1) + } + + fn mul::BigInt>>(&self, by: S) -> GroupProjective

{ + self.mul_bits(BitIteratorBE::new(by.into())) + } + + fn from_random_bytes(bytes: &[u8]) -> Option { + P::BaseField::from_random_bytes_with_flags::(bytes).and_then(|(x, flags)| { + // if x is valid and is zero, then parse this + // point as infinity. + if x.is_zero() { + Some(Self::zero()) + } else { + Self::get_point_from_x(x, flags.is_positive()) + } + }) + } + + #[inline] + fn mul_by_cofactor_to_projective(&self) -> Self::Projective { + self.scale_by_cofactor() + } + + fn mul_by_cofactor_inv(&self) -> Self { + self.mul(P::COFACTOR_INV).into() + } +} + +impl Zeroize for GroupAffine

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.x.zeroize(); + self.y.zeroize(); + } +} + +impl Neg for GroupAffine

{ + type Output = Self; + + fn neg(self) -> Self { + Self::new(-self.x, self.y) + } +} + +ark_ff::impl_additive_ops_from_ref!(GroupAffine, Parameters); + +impl<'a, P: Parameters> Add<&'a Self> for GroupAffine

{ + type Output = Self; + fn add(self, other: &'a Self) -> Self { + let mut copy = self; + copy += other; + copy + } +} + +impl<'a, P: Parameters> AddAssign<&'a Self> for GroupAffine

{ + fn add_assign(&mut self, other: &'a Self) { + let y1y2 = self.y * &other.y; + let x1x2 = self.x * &other.x; + let dx1x2y1y2 = P::COEFF_D * &y1y2 * &x1x2; + + let d1 = P::BaseField::one() + &dx1x2y1y2; + let d2 = P::BaseField::one() - &dx1x2y1y2; + + let x1y2 = self.x * &other.y; + let y1x2 = self.y * &other.x; + + self.x = (x1y2 + &y1x2) / &d1; + self.y = (y1y2 - &P::mul_by_a(&x1x2)) / &d2; + } +} + +impl<'a, P: Parameters> Sub<&'a Self> for GroupAffine

{ + type Output = Self; + fn sub(self, other: &'a Self) -> Self { + let mut copy = self; + copy -= other; + copy + } +} + +impl<'a, P: Parameters> SubAssign<&'a Self> for GroupAffine

{ + fn sub_assign(&mut self, other: &'a Self) { + *self += &(-(*other)); + } +} + +impl MulAssign for GroupAffine

{ + fn mul_assign(&mut self, other: P::ScalarField) { + *self = self.mul(other.into_repr()).into() + } +} + +impl ToBytes for GroupAffine

{ + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + self.x.write(&mut writer)?; + self.y.write(&mut writer) + } +} + +impl FromBytes for GroupAffine

{ + #[inline] + fn read(mut reader: R) -> IoResult { + let x = P::BaseField::read(&mut reader)?; + let y = P::BaseField::read(&mut reader)?; + Ok(Self::new(x, y)) + } +} + +impl Default for GroupAffine

{ + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> GroupAffine

{ + loop { + let x = P::BaseField::rand(rng); + let greatest = rng.gen(); + + if let Some(p) = GroupAffine::get_point_from_x(x, greatest) { + return p.scale_by_cofactor().into(); + } + } + } +} + +mod group_impl { + use super::*; + use crate::group::Group; + + impl Group for GroupAffine

{ + type ScalarField = P::ScalarField; + + #[inline] + fn double(&self) -> Self { + let mut tmp = *self; + tmp += self; + tmp + } + + #[inline] + fn double_in_place(&mut self) -> &mut Self { + let mut tmp = *self; + tmp += &*self; + *self = tmp; + self + } + } +} + +////////////////////////////////////////////////////////////////////////////// + +/// `GroupProjective` implements Extended Twisted Edwards Coordinates +/// as described in [\[HKCD08\]](https://eprint.iacr.org/2008/522.pdf). +/// +/// This implementation uses the unified addition formulae from that paper (see Section 3.1). +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: Parameters"), + Clone(bound = "P: Parameters"), + Eq(bound = "P: Parameters"), + Debug(bound = "P: Parameters"), + Hash(bound = "P: Parameters") +)] +#[must_use] +pub struct GroupProjective { + pub x: P::BaseField, + pub y: P::BaseField, + pub t: P::BaseField, + pub z: P::BaseField, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl PartialEq> for GroupAffine

{ + fn eq(&self, other: &GroupProjective

) -> bool { + self.into_projective() == *other + } +} + +impl PartialEq> for GroupProjective

{ + fn eq(&self, other: &GroupAffine

) -> bool { + *self == other.into_projective() + } +} + +impl Display for GroupProjective

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "{}", GroupAffine::from(*self)) + } +} + +impl PartialEq for GroupProjective

{ + fn eq(&self, other: &Self) -> bool { + if self.is_zero() { + return other.is_zero(); + } + + if other.is_zero() { + return false; + } + + // x1/z1 == x2/z2 <==> x1 * z2 == x2 * z1 + (self.x * &other.z) == (other.x * &self.z) && (self.y * &other.z) == (other.y * &self.z) + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> GroupProjective

{ + loop { + let x = P::BaseField::rand(rng); + let greatest = rng.gen(); + + if let Some(p) = GroupAffine::get_point_from_x(x, greatest) { + return p.scale_by_cofactor(); + } + } + } +} + +impl ToBytes for GroupProjective

{ + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + self.x.write(&mut writer)?; + self.y.write(&mut writer)?; + self.t.write(&mut writer)?; + self.z.write(writer) + } +} + +impl FromBytes for GroupProjective

{ + #[inline] + fn read(mut reader: R) -> IoResult { + let x = P::BaseField::read(&mut reader)?; + let y = P::BaseField::read(&mut reader)?; + let t = P::BaseField::read(&mut reader)?; + let z = P::BaseField::read(reader)?; + Ok(Self::new(x, y, t, z)) + } +} + +impl Default for GroupProjective

{ + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl GroupProjective

{ + pub fn new(x: P::BaseField, y: P::BaseField, t: P::BaseField, z: P::BaseField) -> Self { + Self { + x, + y, + t, + z, + _params: PhantomData, + } + } +} +impl Zeroize for GroupProjective

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.x.zeroize(); + self.y.zeroize(); + self.t.zeroize(); + self.z.zeroize(); + } +} + +impl Zero for GroupProjective

{ + fn zero() -> Self { + Self::new( + P::BaseField::zero(), + P::BaseField::one(), + P::BaseField::zero(), + P::BaseField::one(), + ) + } + + fn is_zero(&self) -> bool { + self.x.is_zero() && self.y == self.z && !self.y.is_zero() && self.t.is_zero() + } +} + +impl ProjectiveCurve for GroupProjective

{ + const COFACTOR: &'static [u64] = P::COFACTOR; + type BaseField = P::BaseField; + type ScalarField = P::ScalarField; + type Affine = GroupAffine

; + + fn prime_subgroup_generator() -> Self { + GroupAffine::prime_subgroup_generator().into() + } + + fn is_normalized(&self) -> bool { + self.z.is_one() + } + + fn batch_normalization(v: &mut [Self]) { + // A projective curve element (x, y, t, z) is normalized + // to its affine representation, by the conversion + // (x, y, t, z) -> (x/z, y/z, t/z, 1) + // Batch normalizing N twisted edwards curve elements costs: + // 1 inversion + 6N field multiplications + // (batch inversion requires 3N multiplications + 1 inversion) + let mut z_s = v.iter().map(|g| g.z).collect::>(); + ark_ff::batch_inversion(&mut z_s); + + // Perform affine transformations + ark_std::cfg_iter_mut!(v) + .zip(z_s) + .filter(|(g, _)| !g.is_normalized()) + .for_each(|(g, z)| { + g.x *= &z; // x/z + g.y *= &z; + g.t *= &z; + g.z = P::BaseField::one(); // z = 1 + }); + } + + fn double_in_place(&mut self) -> &mut Self { + // See "Twisted Edwards Curves Revisited" + // Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson + // 3.3 Doubling in E^e + // Source: https://www.hyperelliptic.org/EFD/g1p/data/twisted/extended/doubling/dbl-2008-hwcd + + // A = X1^2 + let a = self.x.square(); + // B = Y1^2 + let b = self.y.square(); + // C = 2 * Z1^2 + let c = self.z.square().double(); + // D = a * A + let d = P::mul_by_a(&a); + // E = (X1 + Y1)^2 - A - B + let e = (self.x + &self.y).square() - &a - &b; + // G = D + B + let g = d + &b; + // F = G - C + let f = g - &c; + // H = D - B + let h = d - &b; + // X3 = E * F + self.x = e * &f; + // Y3 = G * H + self.y = g * &h; + // T3 = E * H + self.t = e * &h; + // Z3 = F * G + self.z = f * &g; + + self + } + + fn add_assign_mixed(&mut self, other: &GroupAffine

) { + // See "Twisted Edwards Curves Revisited" + // Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson + // 3.1 Unified Addition in E^e + // Source: https://www.hyperelliptic.org/EFD/g1p/data/twisted/extended/addition/madd-2008-hwcd + + // A = X1*X2 + let a = self.x * &other.x; + // B = Y1*Y2 + let b = self.y * &other.y; + // C = T1*d*T2 + let c = P::COEFF_D * &self.t * &other.x * &other.y; + + // D = Z1 + let d = self.z; + // E = (X1+Y1)*(X2+Y2)-A-B + let e = (self.x + &self.y) * &(other.x + &other.y) - &a - &b; + // F = D-C + let f = d - &c; + // G = D+C + let g = d + &c; + // H = B-a*A + let h = b - &P::mul_by_a(&a); + // X3 = E*F + self.x = e * &f; + // Y3 = G*H + self.y = g * &h; + // T3 = E*H + self.t = e * &h; + // Z3 = F*G + self.z = f * &g; + } +} + +impl Neg for GroupProjective

{ + type Output = Self; + fn neg(mut self) -> Self { + self.x = -self.x; + self.t = -self.t; + self + } +} + +ark_ff::impl_additive_ops_from_ref!(GroupProjective, Parameters); + +impl<'a, P: Parameters> Add<&'a Self> for GroupProjective

{ + type Output = Self; + fn add(mut self, other: &'a Self) -> Self { + self += other; + self + } +} + +impl<'a, P: Parameters> AddAssign<&'a Self> for GroupProjective

{ + fn add_assign(&mut self, other: &'a Self) { + // See "Twisted Edwards Curves Revisited" (https://eprint.iacr.org/2008/522.pdf) + // by Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson + // 3.1 Unified Addition in E^e + + // A = x1 * x2 + let a = self.x * &other.x; + + // B = y1 * y2 + let b = self.y * &other.y; + + // C = d * t1 * t2 + let c = P::COEFF_D * &self.t * &other.t; + + // D = z1 * z2 + let d = self.z * &other.z; + + // H = B - aA + let h = b - &P::mul_by_a(&a); + + // E = (x1 + y1) * (x2 + y2) - A - B + let e = (self.x + &self.y) * &(other.x + &other.y) - &a - &b; + + // F = D - C + let f = d - &c; + + // G = D + C + let g = d + &c; + + // x3 = E * F + self.x = e * &f; + + // y3 = G * H + self.y = g * &h; + + // t3 = E * H + self.t = e * &h; + + // z3 = F * G + self.z = f * &g; + } +} + +impl<'a, P: Parameters> Sub<&'a Self> for GroupProjective

{ + type Output = Self; + fn sub(mut self, other: &'a Self) -> Self { + self -= other; + self + } +} + +impl<'a, P: Parameters> SubAssign<&'a Self> for GroupProjective

{ + fn sub_assign(&mut self, other: &'a Self) { + *self += &(-(*other)); + } +} + +impl MulAssign for GroupProjective

{ + fn mul_assign(&mut self, other: P::ScalarField) { + *self = self.mul(other.into_repr()) + } +} + +// The affine point (X, Y) is represented in the Extended Projective coordinates +// with Z = 1. +impl From> for GroupProjective

{ + fn from(p: GroupAffine

) -> GroupProjective

{ + Self::new(p.x, p.y, p.x * &p.y, P::BaseField::one()) + } +} + +// The projective point X, Y, T, Z is represented in the affine +// coordinates as X/Z, Y/Z. +impl From> for GroupAffine

{ + fn from(p: GroupProjective

) -> GroupAffine

{ + if p.is_zero() { + GroupAffine::zero() + } else if p.z.is_one() { + // If Z is one, the point is already normalized. + GroupAffine::new(p.x, p.y) + } else { + // Z is nonzero, so it must have an inverse in a field. + let z_inv = p.z.inverse().unwrap(); + let x = p.x * &z_inv; + let y = p.y * &z_inv; + GroupAffine::new(x, y) + } + } +} + +impl core::str::FromStr for GroupAffine

+where + P::BaseField: core::str::FromStr, +{ + type Err = (); + + fn from_str(mut s: &str) -> Result { + s = s.trim(); + if s.is_empty() { + return Err(()); + } + if s.len() < 3 { + return Err(()); + } + if !(s.starts_with('(') && s.ends_with(')')) { + return Err(()); + } + let mut point = Vec::new(); + for substr in s.split(|c| c == '(' || c == ')' || c == ',' || c == ' ') { + if !substr.is_empty() { + point.push(P::BaseField::from_str(substr)?); + } + } + if point.len() != 2 { + return Err(()); + } + let point = Self::new(point[0], point[1]); + + if !point.is_on_curve() { + Err(()) + } else { + Ok(point) + } + } +} + +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: MontgomeryParameters"), + Clone(bound = "P: MontgomeryParameters"), + PartialEq(bound = "P: MontgomeryParameters"), + Eq(bound = "P: MontgomeryParameters"), + Debug(bound = "P: MontgomeryParameters"), + Hash(bound = "P: MontgomeryParameters") +)] +pub struct MontgomeryGroupAffine { + pub x: P::BaseField, + pub y: P::BaseField, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl Display for MontgomeryGroupAffine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "MontgomeryGroupAffine(x={}, y={})", self.x, self.y) + } +} + +impl MontgomeryGroupAffine

{ + pub fn new(x: P::BaseField, y: P::BaseField) -> Self { + Self { + x, + y, + _params: PhantomData, + } + } +} + +impl CanonicalSerialize for GroupAffine

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + if self.is_zero() { + let flags = EdwardsFlags::default(); + // Serialize 0. + P::BaseField::zero().serialize_with_flags(writer, flags) + } else { + let flags = EdwardsFlags::from_y_sign(self.y > -self.y); + self.x.serialize_with_flags(writer, flags) + } + } + + #[inline] + fn serialized_size(&self) -> usize { + P::BaseField::zero().serialized_size_with_flags::() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.x.serialize_uncompressed(&mut writer)?; + self.y.serialize_uncompressed(&mut writer)?; + Ok(()) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + // x + y + self.x.serialized_size() + self.y.serialized_size() + } +} + +impl CanonicalSerialize for GroupProjective

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + let aff = GroupAffine::

::from(self.clone()); + aff.serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + let aff = GroupAffine::

::from(self.clone()); + aff.serialized_size() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + let aff = GroupAffine::

::from(self.clone()); + aff.serialize_uncompressed(writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + let aff = GroupAffine::

::from(self.clone()); + aff.uncompressed_size() + } +} + +impl CanonicalDeserialize for GroupAffine

{ + #[allow(unused_qualifications)] + fn deserialize(mut reader: R) -> Result { + let (x, flags): (P::BaseField, EdwardsFlags) = + CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; + if x == P::BaseField::zero() { + Ok(Self::zero()) + } else { + let p = GroupAffine::

::get_point_from_x(x, flags.is_positive()) + .ok_or(SerializationError::InvalidData)?; + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed(reader: R) -> Result { + let p = Self::deserialize_unchecked(reader)?; + + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(mut reader: R) -> Result { + let x: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let y: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + + let p = GroupAffine::

::new(x, y); + Ok(p) + } +} + +impl CanonicalDeserialize for GroupProjective

{ + #[allow(unused_qualifications)] + fn deserialize(reader: R) -> Result { + let aff = GroupAffine::

::deserialize(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed(reader: R) -> Result { + let aff = GroupAffine::

::deserialize_uncompressed(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(reader: R) -> Result { + let aff = GroupAffine::

::deserialize_unchecked(reader)?; + Ok(aff.into()) + } +} + +impl ToConstraintField for GroupAffine +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + let mut x_fe = self.x.to_field_elements()?; + let y_fe = self.y.to_field_elements()?; + x_fe.extend_from_slice(&y_fe); + Some(x_fe) + } +} + +impl ToConstraintField for GroupProjective +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + GroupAffine::from(*self).to_field_elements() + } +} diff --git a/arkworks/algebra/ec/src/msm/fixed_base.rs b/arkworks/algebra/ec/src/msm/fixed_base.rs new file mode 100644 index 00000000..ef5d0f4f --- /dev/null +++ b/arkworks/algebra/ec/src/msm/fixed_base.rs @@ -0,0 +1,96 @@ +use crate::{AffineCurve, ProjectiveCurve}; +use ark_ff::{BigInteger, FpParameters, PrimeField}; +use ark_std::vec::Vec; +use ark_std::{cfg_iter, cfg_iter_mut}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +pub struct FixedBaseMSM; + +impl FixedBaseMSM { + pub fn get_mul_window_size(num_scalars: usize) -> usize { + if num_scalars < 32 { + 3 + } else { + super::ln_without_floats(num_scalars) + } + } + + pub fn get_window_table( + scalar_size: usize, + window: usize, + g: T, + ) -> Vec> { + let in_window = 1 << window; + let outerc = (scalar_size + window - 1) / window; + let last_in_window = 1 << (scalar_size - (outerc - 1) * window); + + let mut multiples_of_g = vec![vec![T::zero(); in_window]; outerc]; + + let mut g_outer = g; + let mut g_outers = Vec::with_capacity(outerc); + for _ in 0..outerc { + g_outers.push(g_outer); + for _ in 0..window { + g_outer.double_in_place(); + } + } + cfg_iter_mut!(multiples_of_g) + .enumerate() + .take(outerc) + .zip(g_outers) + .for_each(|((outer, multiples_of_g), g_outer)| { + let cur_in_window = if outer == outerc - 1 { + last_in_window + } else { + in_window + }; + + let mut g_inner = T::zero(); + for inner in multiples_of_g.iter_mut().take(cur_in_window) { + *inner = g_inner; + g_inner += &g_outer; + } + }); + cfg_iter!(multiples_of_g) + .map(|s| T::batch_normalization_into_affine(&s)) + .collect() + } + + pub fn windowed_mul( + outerc: usize, + window: usize, + multiples_of_g: &[Vec], + scalar: &T::ScalarField, + ) -> T { + let modulus_size = ::Params::MODULUS_BITS as usize; + let scalar_val = scalar.into_repr().to_bits_le(); + + let mut res = multiples_of_g[0][0].into_projective(); + for outer in 0..outerc { + let mut inner = 0usize; + for i in 0..window { + if outer * window + i < modulus_size && scalar_val[outer * window + i] { + inner |= 1 << i; + } + } + res.add_assign_mixed(&multiples_of_g[outer][inner]); + } + res + } + + pub fn multi_scalar_mul( + scalar_size: usize, + window: usize, + table: &[Vec], + v: &[T::ScalarField], + ) -> Vec { + let outerc = (scalar_size + window - 1) / window; + assert!(outerc <= table.len()); + + cfg_iter!(v) + .map(|e| Self::windowed_mul::(outerc, window, table, e)) + .collect::>() + } +} diff --git a/arkworks/algebra/ec/src/msm/mod.rs b/arkworks/algebra/ec/src/msm/mod.rs new file mode 100644 index 00000000..941dcbb9 --- /dev/null +++ b/arkworks/algebra/ec/src/msm/mod.rs @@ -0,0 +1,13 @@ +mod fixed_base; +mod variable_base; +pub use fixed_base::*; +pub use variable_base::*; + +/// The result of this function is only approximately `ln(a)` +/// [`Explanation of usage`] +/// +/// [`Explanation of usage`]: https://github.com/scipr-lab/zexe/issues/79#issue-556220473 +fn ln_without_floats(a: usize) -> usize { + // log2(a) * ln(2) + (ark_std::log2(a) * 69 / 100) as usize +} diff --git a/arkworks/algebra/ec/src/msm/variable_base.rs b/arkworks/algebra/ec/src/msm/variable_base.rs new file mode 100644 index 00000000..57f4fa4d --- /dev/null +++ b/arkworks/algebra/ec/src/msm/variable_base.rs @@ -0,0 +1,107 @@ +use ark_ff::prelude::*; +use ark_std::vec::Vec; + +use crate::{AffineCurve, ProjectiveCurve}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +pub struct VariableBaseMSM; + +impl VariableBaseMSM { + pub fn multi_scalar_mul( + bases: &[G], + scalars: &[::BigInt], + ) -> G::Projective { + let size = ark_std::cmp::min(bases.len(), scalars.len()); + let scalars = &scalars[..size]; + let bases = &bases[..size]; + let scalars_and_bases_iter = scalars.iter().zip(bases).filter(|(s, _)| !s.is_zero()); + + let c = if size < 32 { + 3 + } else { + super::ln_without_floats(size) + 2 + }; + + let num_bits = ::Params::MODULUS_BITS as usize; + let fr_one = G::ScalarField::one().into_repr(); + + let zero = G::Projective::zero(); + let window_starts: Vec<_> = (0..num_bits).step_by(c).collect(); + + // Each window is of size `c`. + // We divide up the bits 0..num_bits into windows of size `c`, and + // in parallel process each such window. + let window_sums: Vec<_> = ark_std::cfg_into_iter!(window_starts) + .map(|w_start| { + let mut res = zero; + // We don't need the "zero" bucket, so we only have 2^c - 1 buckets. + let mut buckets = vec![zero; (1 << c) - 1]; + // This clone is cheap, because the iterator contains just a + // pointer and an index into the original vectors. + scalars_and_bases_iter.clone().for_each(|(&scalar, base)| { + if scalar == fr_one { + // We only process unit scalars once in the first window. + if w_start == 0 { + res.add_assign_mixed(base); + } + } else { + let mut scalar = scalar; + + // We right-shift by w_start, thus getting rid of the + // lower bits. + scalar.divn(w_start as u32); + + // We mod the remaining bits by 2^{window size}, thus taking `c` bits. + let scalar = scalar.as_ref()[0] % (1 << c); + + // If the scalar is non-zero, we update the corresponding + // bucket. + // (Recall that `buckets` doesn't have a zero bucket.) + if scalar != 0 { + buckets[(scalar - 1) as usize].add_assign_mixed(base); + } + } + }); + + // Compute sum_{i in 0..num_buckets} (sum_{j in i..num_buckets} bucket[j]) + // This is computed below for b buckets, using 2b curve additions. + // + // We could first normalize `buckets` and then use mixed-addition + // here, but that's slower for the kinds of groups we care about + // (Short Weierstrass curves and Twisted Edwards curves). + // In the case of Short Weierstrass curves, + // mixed addition saves ~4 field multiplications per addition. + // However normalization (with the inversion batched) takes ~6 + // field multiplications per element, + // hence batch normalization is a slowdown. + + // `running_sum` = sum_{j in i..num_buckets} bucket[j], + // where we iterate backward from i = num_buckets to 0. + let mut running_sum = G::Projective::zero(); + buckets.into_iter().rev().for_each(|b| { + running_sum += &b; + res += &running_sum; + }); + res + }) + .collect(); + + // We store the sum for the lowest window. + let lowest = *window_sums.first().unwrap(); + + // We're traversing windows from high to low. + lowest + + &window_sums[1..] + .iter() + .rev() + .fold(zero, |mut total, sum_i| { + total += sum_i; + for _ in 0..c { + total.double_in_place(); + } + total + }) + } +} diff --git a/arkworks/algebra/ec/src/wnaf.rs b/arkworks/algebra/ec/src/wnaf.rs new file mode 100644 index 00000000..078e71f4 --- /dev/null +++ b/arkworks/algebra/ec/src/wnaf.rs @@ -0,0 +1,77 @@ +use crate::ProjectiveCurve; +use ark_ff::{BigInteger, PrimeField}; +use ark_std::vec::Vec; + +/// A helper type that contains all the context required for computing +/// a window NAF multiplication of a group element by a scalar. +pub struct WnafContext { + pub window_size: usize, +} + +impl WnafContext { + /// Construct a new context for a window of size `window_size`. + pub fn new(window_size: usize) -> Self { + assert!(window_size >= 2); + assert!(window_size < 64); + Self { window_size } + } + + pub fn table(&self, mut base: G) -> Vec { + let mut table = Vec::with_capacity(1 << (self.window_size - 1)); + let dbl = base.double(); + + for _ in 0..(1 << (self.window_size - 1)) { + table.push(base); + base += &dbl; + } + table + } + + /// Computes scalar multiplication of a group element `g` by `scalar`. + /// + /// This method uses the wNAF algorithm to perform the scalar multiplication; + /// first, it uses `Self::table` to calculate an appropriate table of multiples of `g`, + /// and then uses the wNAF algorithm to compute the scalar multiple. + pub fn mul(&self, g: G, scalar: &G::ScalarField) -> G { + let table = self.table(g); + self.mul_with_table(&table, scalar).unwrap() + } + + /// Computes scalar multiplication of a group element by `scalar`. + /// `base_table` holds precomputed multiples of the group element; it can be generated using `Self::table`. + /// `scalar` is an element of `G::ScalarField`. + /// + /// Returns `None` if the table is too small. + pub fn mul_with_table( + &self, + base_table: &[G], + scalar: &G::ScalarField, + ) -> Option { + if 1 << (self.window_size - 1) > base_table.len() { + return None; + } + let scalar_wnaf = scalar.into_repr().find_wnaf(self.window_size).unwrap(); + + let mut result = G::zero(); + + let mut found_non_zero = false; + + for n in scalar_wnaf.iter().rev() { + if found_non_zero { + result.double_in_place(); + } + + if *n != 0 { + found_non_zero = true; + + if *n > 0 { + result += &base_table[(n / 2) as usize]; + } else { + result -= &base_table[((-n) / 2) as usize]; + } + } + } + + Some(result) + } +} diff --git a/arkworks/algebra/ff-asm/Cargo.toml b/arkworks/algebra/ff-asm/Cargo.toml new file mode 100644 index 00000000..0d61841f --- /dev/null +++ b/arkworks/algebra/ff-asm/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "ark-ff-asm" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for generating x86-64 assembly for finite field multiplication" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ff-asm/" +keywords = ["cryptography", "finite-fields", "assembly" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +quote = "1.0.0" +syn = { version = "1.0.0", features = ["full", "parsing", "extra-traits"]} + +[lib] +proc-macro = true diff --git a/arkworks/algebra/ff-asm/LICENSE-APACHE b/arkworks/algebra/ff-asm/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/ff-asm/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/ff-asm/LICENSE-MIT b/arkworks/algebra/ff-asm/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/ff-asm/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/ff-asm/src/context.rs b/arkworks/algebra/ff-asm/src/context.rs new file mode 100644 index 00000000..f8bffcf9 --- /dev/null +++ b/arkworks/algebra/ff-asm/src/context.rs @@ -0,0 +1,121 @@ +use std::collections::HashMap; + +pub const REG_CLOBBER: [&str; 8] = ["r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]; + +#[derive(Clone)] +pub struct Context { + ctx_string: String, + declarations: HashMap, + declaration_vec: Vec, + clobbers: Vec, +} + +#[derive(Clone)] +struct Declare { + ty: String, + var: String, + pos: usize, + token: String, +} + +impl Context { + pub fn new() -> Self { + Context { + ctx_string: String::new(), + declarations: HashMap::new(), + declaration_vec: Vec::new(), + clobbers: Vec::new(), + } + } + + fn append(&mut self, other: &str) { + self.ctx_string += other; + } + + pub fn get_string(&self) -> String { + self.ctx_string.clone() + } + + pub fn get(self, id: &str) -> String { + self.declarations.get(id).unwrap().token.clone() + } + + pub fn try_get(self, id: &str, fallback_id: &str) -> String { + self.declarations + .get(id) + .map(|dec| dec.token.clone()) + .unwrap_or_else(|| self.get(fallback_id)) + } + + pub fn add_declaration(&mut self, id: &str, ty: &str, var: &str) { + self.declarations.insert( + id.to_string(), + Declare { + ty: ty.to_string(), + var: var.to_string(), + pos: self.declarations.len(), + token: format!("${}", self.declarations.len()), + }, + ); + self.declaration_vec.push(Declare { + ty: ty.to_string(), + var: var.to_string(), + pos: self.declaration_vec.len(), + token: format!("${}", self.declaration_vec.len()), + }); + } + + pub fn add_buffer(&mut self, extra_reg: usize) { + self.append(&format!( + " + let mut spill_buffer = core::mem::MaybeUninit::<[u64; {}]>::uninit();", + extra_reg + )); + } + + pub fn add_llvm_asm(&mut self, ctx_string: String) { + self.append(&format!( + " + unsafe {{ + llvm_asm!({} + : + :", + ctx_string + )); + } + + pub fn add_clobber_from_vec(&mut self, clobbers: Vec<&str>) { + for clobber in clobbers { + self.clobbers.push(format!(" \"{}\"", clobber)); + } + } + + pub fn add_clobber(&mut self, clobber: &str) { + self.clobbers.push(format!(" \"{}\"", clobber)); + } + + pub fn build(&mut self) { + for i in 0..self.declarations.len() { + let dec = &self.declaration_vec[i]; + let last = i == self.declarations.len() - 1; + let dec = &format!( + " + \"{}\"({}){} // {}", + dec.ty, + dec.var, + if last { "" } else { "," }, + dec.pos + ); + self.append(dec); + } + let clobbers = self.clobbers.join(","); + self.append(&format!( + " + : {} + ); + }} + ", + clobbers + )); + } +} diff --git a/arkworks/algebra/ff-asm/src/lib.rs b/arkworks/algebra/ff-asm/src/lib.rs new file mode 100644 index 00000000..70442ea8 --- /dev/null +++ b/arkworks/algebra/ff-asm/src/lib.rs @@ -0,0 +1,321 @@ +#![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] +#![forbid(unsafe_code)] +#![recursion_limit = "128"] + +use proc_macro::TokenStream; +use syn::{ + parse::{Parse, ParseStream}, + Expr, Item, ItemFn, +}; + +#[macro_use] +mod utils; +use utils::*; + +mod context; +use context::*; + +mod unroll; + +use std::cell::RefCell; + +const MAX_REGS: usize = 6; + +/// Attribute used to unroll for loops found inside a function block. +#[proc_macro_attribute] +pub fn unroll_for_loops(_meta: TokenStream, input: TokenStream) -> TokenStream { + let item: Item = syn::parse(input).expect("Failed to parse input."); + + if let Item::Fn(item_fn) = item { + let new_block = { + let &ItemFn { + block: ref box_block, + .. + } = &item_fn; + unroll::unroll_in_block(&**box_block) + }; + let new_item = Item::Fn(ItemFn { + block: Box::new(new_block), + ..item_fn + }); + quote::quote! ( #new_item ).into() + } else { + quote::quote! ( #item ).into() + } +} + +struct AsmMulInput { + num_limbs: Box, + a: Expr, + b: Expr, +} + +impl Parse for AsmMulInput { + fn parse(input: ParseStream<'_>) -> syn::Result { + let input = input + .parse_terminated::<_, syn::Token![,]>(Expr::parse)? + .into_iter() + .collect::>(); + let num_limbs = input[0].clone(); + let a = input[1].clone(); + let b = input[2].clone(); + + let num_limbs = if let Expr::Group(syn::ExprGroup { expr, .. }) = num_limbs { + expr + } else { + Box::new(num_limbs) + }; + let output = Self { num_limbs, a, b }; + Ok(output) + } +} + +#[proc_macro] +pub fn x86_64_asm_mul(input: TokenStream) -> TokenStream { + let AsmMulInput { num_limbs, a, b } = syn::parse_macro_input!(input); + let num_limbs = if let Expr::Lit(syn::ExprLit { + lit: syn::Lit::Int(ref lit_int), + .. + }) = &*num_limbs + { + lit_int.base10_parse::().unwrap() + } else { + panic!("The number of limbs must be a literal"); + }; + if num_limbs <= 6 && num_limbs <= 3 * MAX_REGS { + let impl_block = generate_impl(num_limbs, true); + + let inner_ts: Expr = syn::parse_str(&impl_block).unwrap(); + let ts = quote::quote! { + let a = &mut #a; + let b = &#b; + #inner_ts + }; + ts.into() + } else { + TokenStream::new() + } +} + +struct AsmSquareInput { + num_limbs: Box, + a: Expr, +} + +impl Parse for AsmSquareInput { + fn parse(input: ParseStream<'_>) -> syn::Result { + let input = input + .parse_terminated::<_, syn::Token![,]>(Expr::parse)? + .into_iter() + .collect::>(); + let num_limbs = input[0].clone(); + let a = input[1].clone(); + + let num_limbs = if let Expr::Group(syn::ExprGroup { expr, .. }) = num_limbs { + expr + } else { + Box::new(num_limbs) + }; + let output = Self { num_limbs, a }; + Ok(output) + } +} + +#[proc_macro] +pub fn x86_64_asm_square(input: TokenStream) -> TokenStream { + let AsmSquareInput { num_limbs, a } = syn::parse_macro_input!(input); + let num_limbs = if let Expr::Lit(syn::ExprLit { + lit: syn::Lit::Int(ref lit_int), + .. + }) = &*num_limbs + { + lit_int.base10_parse::().unwrap() + } else { + panic!("The number of limbs must be a literal"); + }; + if num_limbs <= 6 && num_limbs <= 3 * MAX_REGS { + let impl_block = generate_impl(num_limbs, false); + + let inner_ts: Expr = syn::parse_str(&impl_block).unwrap(); + let ts = quote::quote! { + let a = &mut #a; + #inner_ts + }; + ts.into() + } else { + TokenStream::new() + } +} + +fn generate_llvm_asm_mul_string( + a: &str, + b: &str, + modulus: &str, + zero: &str, + mod_prime: &str, + limbs: usize, +) -> String { + let llvm_asm_string = RefCell::new(String::new()); + + let begin = || llvm_asm_string.borrow_mut().push_str("\""); + + let end = || { + llvm_asm_string.borrow_mut().push_str( + " + \"", + ) + }; + + let _comment = |comment: &str| { + llvm_asm_string + .borrow_mut() + .push_str(&format!(" // {}", comment)); + }; + + let mulxq = |a: &str, b: &str, c: &str| { + llvm_asm_string.borrow_mut().push_str(&format!( + " + mulxq {}, {}, {}", + a, b, c + )); + }; + + let adcxq = |a: &str, b: &str| { + llvm_asm_string.borrow_mut().push_str(&format!( + " + adcxq {}, {}", + a, b + )); + }; + + let adoxq = |a: &str, b: &str| { + llvm_asm_string.borrow_mut().push_str(&format!( + " + adoxq {}, {}", + a, b + )); + }; + + let movq = |a: &str, b: &str| { + llvm_asm_string.borrow_mut().push_str(&format!( + " + movq {}, {}", + a, b + )); + }; + + let xorq = |a: &str, b: &str| { + llvm_asm_string.borrow_mut().push_str(&format!( + " + xorq {}, {}", + a, b + )); + }; + + macro_rules! mul_1 { + ($a:expr, $b:ident, $zero:ident, $limbs:expr) => { + movq($a, RDX); + mulxq($b[0], R[0], R[1]); + for j in 1..$limbs - 1 { + mulxq($b[j], RAX, R[((j + 1) % $limbs)]); + adcxq(RAX, R[j]); + } + mulxq($b[$limbs - 1], RAX, RCX); + movq($zero, RBX); + adcxq(RAX, R[$limbs - 1]); + adcxq(RBX, RCX); + }; + } + + macro_rules! mul_add_1 { + ($a:ident, $b:ident, $zero:ident, $i:ident, $limbs:expr) => { + movq($a[$i], RDX); + for j in 0..$limbs - 1 { + mulxq($b[j], RAX, RBX); + adcxq(RAX, R[(j + $i) % $limbs]); + adoxq(RBX, R[(j + $i + 1) % $limbs]); + } + mulxq($b[$limbs - 1], RAX, RCX); + movq($zero, RBX); + adcxq(RAX, R[($i + $limbs - 1) % $limbs]); + adoxq(RBX, RCX); + adcxq(RBX, RCX); + }; + } + + macro_rules! mul_add_shift_1 { + ($a:ident, $mod_prime:ident, $zero:ident, $i:ident, $limbs:expr) => { + movq($mod_prime, RDX); + mulxq(R[$i], RDX, RAX); + mulxq($a[0], RAX, RBX); + adcxq(R[$i % $limbs], RAX); + adoxq(RBX, R[($i + 1) % $limbs]); + for j in 1..$limbs - 1 { + mulxq($a[j], RAX, RBX); + adcxq(RAX, R[(j + $i) % $limbs]); + adoxq(RBX, R[(j + $i + 1) % $limbs]); + } + mulxq($a[$limbs - 1], RAX, R[$i % $limbs]); + movq($zero, RBX); + adcxq(RAX, R[($i + $limbs - 1) % $limbs]); + adoxq(RCX, R[$i % $limbs]); + adcxq(RBX, R[$i % $limbs]); + }; + } + begin(); + { + reg!(a0, a1, a, limbs); + reg!(b0, b1, b, limbs); + reg!(m, m1, modulus, limbs); + + xorq(RCX, RCX); + for i in 0..limbs { + if i == 0 { + mul_1!(a1[0], b1, zero, limbs); + } else { + mul_add_1!(a1, b1, zero, i, limbs); + } + mul_add_shift_1!(m1, mod_prime, zero, i, limbs); + } + + for i in 0..limbs { + movq(R[i], a1[i]); + } + } + end(); + llvm_asm_string.into_inner() +} + +fn generate_impl(num_limbs: usize, is_mul: bool) -> String { + let mut ctx = Context::new(); + ctx.add_declaration("a", "r", "a"); + if is_mul { + ctx.add_declaration("b", "r", "b"); + } + ctx.add_declaration("modulus", "r", "&P::MODULUS.0"); + ctx.add_declaration("0", "i", "0u64"); + ctx.add_declaration("mod_prime", "i", "P::INV"); + + if num_limbs > MAX_REGS { + ctx.add_buffer(2 * num_limbs); + ctx.add_declaration("buf", "r", "&mut spill_buffer"); + } + + let llvm_asm_string = generate_llvm_asm_mul_string( + &ctx.clone().get("a"), + &ctx.clone().try_get("b", "a"), + &ctx.clone().get("modulus"), + &ctx.clone().get("0"), + &ctx.clone().get("mod_prime"), + num_limbs, + ); + + ctx.add_llvm_asm(llvm_asm_string); + ctx.add_clobber_from_vec(vec!["rcx", "rbx", "rdx", "rax"]); + for clobber in REG_CLOBBER.iter().take(std::cmp::min(num_limbs, 8)) { + ctx.add_clobber(clobber); + } + ctx.add_clobber_from_vec(vec!["cc", "memory"]); + ctx.build(); + format!("{{ {} }}", ctx.get_string()) +} diff --git a/arkworks/algebra/ff-asm/src/unroll.rs b/arkworks/algebra/ff-asm/src/unroll.rs new file mode 100644 index 00000000..0c4235cc --- /dev/null +++ b/arkworks/algebra/ff-asm/src/unroll.rs @@ -0,0 +1,197 @@ +//! An attribute-like procedural macro for unrolling for loops with integer literal bounds. +//! +//! This crate provides the [`unroll_for_loops`](../attr.unroll_for_loops.html) attribute-like macro that can be applied to +//! functions containing for-loops with integer bounds. This macro looks for loops to unroll and +//! unrolls them at compile time. +//! +//! +//! ## Usage +//! +//! Just add `#[unroll_for_loops]` above the function whose for loops you would like to unroll. +//! Currently all for loops with integer literal bounds will be unrolled, although this macro +//! currently can't see inside complex code (e.g. for loops within closures). +//! +//! +//! ## Example +//! +//! The following function computes a matrix-vector product and returns the result as an array. +//! Both of the inner for-loops are unrolled when `#[unroll_for_loops]` is applied. +//! +//! ```rust +//! use ark_ff_asm::unroll_for_loops; +//! +//! #[unroll_for_loops] +//! fn mtx_vec_mul(mtx: &[[f64; 5]; 5], vec: &[f64; 5]) -> [f64; 5] { +//! let mut out = [0.0; 5]; +//! for col in 0..5 { +//! for row in 0..5 { +//! out[row] += mtx[col][row] * vec[col]; +//! } +//! } +//! out +//! } +//! ``` +//! +//! This code was adapted from the [`unroll`](https://crates.io/crates/unroll) crate. + +use syn::token::Brace; +use syn::{ + parse_quote, Block, Expr, ExprBlock, ExprForLoop, ExprIf, ExprLet, ExprLit, ExprRange, Lit, + Pat, PatIdent, RangeLimits, Stmt, +}; + +/// Routine to unroll for loops within a block +pub(crate) fn unroll_in_block(block: &Block) -> Block { + let &Block { + ref brace_token, + ref stmts, + } = block; + let mut new_stmts = Vec::new(); + for stmt in stmts.iter() { + if let Stmt::Expr(expr) = stmt { + new_stmts.push(Stmt::Expr(unroll(expr))); + } else if let Stmt::Semi(expr, semi) = stmt { + new_stmts.push(Stmt::Semi(unroll(expr), *semi)); + } else { + new_stmts.push((*stmt).clone()); + } + } + Block { + brace_token: *brace_token, + stmts: new_stmts, + } +} + +/// Routine to unroll a for loop statement, or return the statement unchanged if it's not a for +/// loop. +fn unroll(expr: &Expr) -> Expr { + // impose a scope that we can break out of so we can return stmt without copying it. + if let Expr::ForLoop(for_loop) = expr { + let ExprForLoop { + ref attrs, + ref label, + ref pat, + expr: ref range_expr, + ref body, + .. + } = *for_loop; + + let new_body = unroll_in_block(&*body); + + let forloop_with_body = |body| { + Expr::ForLoop(ExprForLoop { + body, + ..(*for_loop).clone() + }) + }; + + if let Pat::Ident(PatIdent { + ref by_ref, + ref mutability, + ref ident, + ref subpat, + .. + }) = *pat + { + // Don't know how to deal with these so skip and return the original. + if !by_ref.is_none() || !mutability.is_none() || !subpat.is_none() { + return forloop_with_body(new_body); + } + let idx = ident; // got the index variable name + + if let Expr::Range(ExprRange { + from: ref mb_box_from, + ref limits, + to: ref mb_box_to, + .. + }) = **range_expr + { + // Parse mb_box_from + let begin = if let Some(ref box_from) = *mb_box_from { + if let Expr::Lit(ExprLit { + lit: Lit::Int(ref lit_int), + .. + }) = **box_from + { + lit_int.base10_parse::().unwrap() + } else { + return forloop_with_body(new_body); + } + } else { + 0 + }; + + // Parse mb_box_to + let end = if let Some(ref box_to) = *mb_box_to { + if let Expr::Lit(ExprLit { + lit: Lit::Int(ref lit_int), + .. + }) = **box_to + { + lit_int.base10_parse::().unwrap() + } else { + return forloop_with_body(new_body); + } + } else { + // we need to know where the limit is to know how much to unroll by. + return forloop_with_body(new_body); + } + if let RangeLimits::Closed(_) = limits { + 1 + } else { + 0 + }; + + let mut stmts = Vec::new(); + for i in begin..end { + let declare_i: Stmt = parse_quote! { + #[allow(non_upper_case_globals)] + const #idx: usize = #i; + }; + let mut augmented_body = new_body.clone(); + augmented_body.stmts.insert(0, declare_i); + stmts.push(parse_quote! { #augmented_body }); + } + let block = Block { + brace_token: Brace::default(), + stmts, + }; + Expr::Block(ExprBlock { + attrs: attrs.clone(), + label: label.clone(), + block, + }) + } else { + forloop_with_body(new_body) + } + } else { + forloop_with_body(new_body) + } + } else if let Expr::If(if_expr) = expr { + let ExprIf { + ref cond, + ref then_branch, + ref else_branch, + .. + } = *if_expr; + Expr::If(ExprIf { + cond: Box::new(unroll(&**cond)), + then_branch: unroll_in_block(&*then_branch), + else_branch: else_branch.as_ref().map(|x| (x.0, Box::new(unroll(&*x.1)))), + ..(*if_expr).clone() + }) + } else if let Expr::Let(let_expr) = expr { + let ExprLet { ref expr, .. } = *let_expr; + Expr::Let(ExprLet { + expr: Box::new(unroll(&**expr)), + ..(*let_expr).clone() + }) + } else if let Expr::Block(expr_block) = expr { + let ExprBlock { ref block, .. } = *expr_block; + Expr::Block(ExprBlock { + block: unroll_in_block(&*block), + ..(*expr_block).clone() + }) + } else { + (*expr).clone() + } +} diff --git a/arkworks/algebra/ff-asm/src/utils.rs b/arkworks/algebra/ff-asm/src/utils.rs new file mode 100644 index 00000000..b1b0f9f4 --- /dev/null +++ b/arkworks/algebra/ff-asm/src/utils.rs @@ -0,0 +1,20 @@ +pub const RAX: &str = "%rax"; +pub const RBX: &str = "%rbx"; +pub const RCX: &str = "%rcx"; +pub const RDX: &str = "%rdx"; +// pub const RDI: &'static str = "%rdi"; +// pub const RSI: &'static str = "%rsi"; +pub const R: [&str; 8] = ["%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"]; + +macro_rules! reg { + ($a_0:ident, $a_1:ident, $a:ident, $range:expr) => { + let mut $a_0 = Vec::new(); + let mut $a_1 = Vec::new(); + for i in 0..$range { + $a_0.push(format!("{}({})", i * 8, $a)); + } + for i in 0..$range { + $a_1.push(&*$a_0[i]); + } + }; +} diff --git a/arkworks/algebra/ff-macros/Cargo.toml b/arkworks/algebra/ff-macros/Cargo.toml new file mode 100644 index 00000000..e199abd1 --- /dev/null +++ b/arkworks/algebra/ff-macros/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "ark-ff-macros" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for generating x86-64 assembly for finite field multiplication" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ff-asm/" +keywords = ["cryptography", "finite-fields", "assembly" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +quote = "1.0.0" +syn = { version = "1.0.0", features = ["full", "parsing", "extra-traits"]} +num-bigint = { version = "0.4", default-features = false } +num-traits = { version = "0.2", default-features = false } + +[lib] +proc-macro = true diff --git a/arkworks/algebra/ff-macros/LICENSE-APACHE b/arkworks/algebra/ff-macros/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/ff-macros/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/ff-macros/LICENSE-MIT b/arkworks/algebra/ff-macros/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/ff-macros/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/ff-macros/src/lib.rs b/arkworks/algebra/ff-macros/src/lib.rs new file mode 100644 index 00000000..07348d67 --- /dev/null +++ b/arkworks/algebra/ff-macros/src/lib.rs @@ -0,0 +1,81 @@ +#![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] +#![forbid(unsafe_code)] + +use num_bigint::{BigInt, Sign}; +use proc_macro::TokenStream; +use std::str::FromStr; +use syn::{Expr, Lit}; + +fn parse_string(input: TokenStream) -> Option { + let input: Expr = syn::parse(input).unwrap(); + let input = if let Expr::Group(syn::ExprGroup { expr, .. }) = input { + expr + } else { + panic!("could not parse"); + }; + match *input { + Expr::Lit(expr_lit) => match expr_lit.lit { + Lit::Str(s) => Some(s.value()), + _ => None, + }, + _ => None, + } +} + +fn str_to_limbs(num: &str) -> (bool, Vec) { + let (sign, digits) = BigInt::from_str(num) + .expect("could not parse to bigint") + .to_radix_le(16); + let limbs = digits + .chunks(16) + .map(|chunk| { + let mut this = 0u64; + for (i, hexit) in chunk.iter().enumerate() { + this += (*hexit as u64) << (4 * i); + } + format!("{}u64", this) + }) + .collect::>(); + + let sign_is_positive = sign != Sign::Minus; + (sign_is_positive, limbs) +} + +#[proc_macro] +pub fn to_sign_and_limbs(input: TokenStream) -> TokenStream { + let num = parse_string(input).expect("expected decimal string"); + let (is_positive, limbs) = str_to_limbs(&num); + + let limbs: String = limbs.join(", "); + let limbs_and_sign = format!("({}", is_positive) + ", [" + &limbs + "])"; + let tuple: Expr = syn::parse_str(&limbs_and_sign).unwrap(); + quote::quote!(#tuple).into() +} + +#[test] +fn test_str_to_limbs() { + let (is_positive, limbs) = str_to_limbs("-5"); + assert!(!is_positive); + assert_eq!(&limbs, &["5u64".to_string()]); + + let (is_positive, limbs) = str_to_limbs("100"); + assert!(is_positive); + assert_eq!(&limbs, &["100u64".to_string()]); + + let large_num = -((1i128 << 64) + 101234001234i128); + let (is_positive, limbs) = str_to_limbs(&large_num.to_string()); + assert!(!is_positive); + assert_eq!(&limbs, &["101234001234u64".to_string(), "1u64".to_string()]); + + let num = "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410946"; + let (is_positive, limbs) = str_to_limbs(&num.to_string()); + assert!(is_positive); + let expected_limbs = [ + format!("{}u64", 0x8508c00000000002u64), + format!("{}u64", 0x452217cc90000000u64), + format!("{}u64", 0xc5ed1347970dec00u64), + format!("{}u64", 0x619aaf7d34594aabu64), + format!("{}u64", 0x9b3af05dd14f6ecu64), + ]; + assert_eq!(&limbs, &expected_limbs); +} diff --git a/arkworks/algebra/ff/Cargo.toml b/arkworks/algebra/ff/Cargo.toml new file mode 100644 index 00000000..12f7c251 --- /dev/null +++ b/arkworks/algebra/ff/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "ark-ff" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for finite fields" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ff/" +keywords = ["cryptography", "finite-fields" ] +categories = ["cryptography"] +include = ["Cargo.toml", "build.rs", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" +build = "build.rs" + +[dependencies] +ark-ff-asm = { version = "^0.3.0", path = "../ff-asm" } +ark-ff-macros = { version = "^0.3.0", path = "../ff-macros" } +ark-std = { path = "../../std", version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", path = "../serialize", default-features = false } +derivative = { version = "2", features = ["use_core"] } +num-traits = { version = "0.2", default-features = false } +paste = "1.0" +rayon = { version = "1", optional = true } +zeroize = { version = "1", default-features = false, features = ["zeroize_derive"] } +num-bigint = { version = "0.4.0", default-features = false } + +[build-dependencies] +rustc_version = "0.3" + +[features] +default = [] +std = [ "ark-std/std", "ark-serialize/std" ] +parallel = [ "std", "rayon", "ark-std/parallel" ] +asm = [] diff --git a/arkworks/algebra/ff/LICENSE-APACHE b/arkworks/algebra/ff/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/ff/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/ff/LICENSE-MIT b/arkworks/algebra/ff/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/ff/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/ff/build.rs b/arkworks/algebra/ff/build.rs new file mode 100644 index 00000000..bc3a3772 --- /dev/null +++ b/arkworks/algebra/ff/build.rs @@ -0,0 +1,24 @@ +extern crate rustc_version; +use rustc_version::{version, version_meta, Channel, Version}; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + let is_nightly = version_meta().expect("nightly check failed").channel == Channel::Nightly; + + let should_use_asm = cfg!(all( + feature = "asm", + target_feature = "bmi2", + target_feature = "adx", + target_arch = "x86_64" + )) && is_nightly; + if should_use_asm { + println!("cargo:rustc-cfg=use_asm"); + } + + // TODO: remove this once RFC 2495 ships + if version().expect("Installed rustc version unparseable!") < Version::parse("1.51.0").unwrap() + { + panic!("This code base uses const generics and requires a Rust compiler version greater or equal to 1.51.0"); + } +} diff --git a/arkworks/algebra/ff/src/biginteger/arithmetic.rs b/arkworks/algebra/ff/src/biginteger/arithmetic.rs new file mode 100644 index 00000000..4d64e4ff --- /dev/null +++ b/arkworks/algebra/ff/src/biginteger/arithmetic.rs @@ -0,0 +1,106 @@ +use ark_std::vec::Vec; + +/// Calculate a + b + carry, returning the sum and modifying the +/// carry value. +macro_rules! adc { + ($a:expr, $b:expr, &mut $carry:expr$(,)?) => {{ + let tmp = ($a as u128) + ($b as u128) + ($carry as u128); + + $carry = (tmp >> 64) as u64; + + tmp as u64 + }}; +} + +/// Calculate a + (b * c) + carry, returning the least significant digit +/// and setting carry to the most significant digit. +macro_rules! mac_with_carry { + ($a:expr, $b:expr, $c:expr, &mut $carry:expr$(,)?) => {{ + let tmp = ($a as u128) + ($b as u128 * $c as u128) + ($carry as u128); + + $carry = (tmp >> 64) as u64; + + tmp as u64 + }}; +} + +/// Calculate a - b - borrow, returning the result and modifying +/// the borrow value. +macro_rules! sbb { + ($a:expr, $b:expr, &mut $borrow:expr$(,)?) => {{ + let tmp = (1u128 << 64) + ($a as u128) - ($b as u128) - ($borrow as u128); + + $borrow = if tmp >> 64 == 0 { 1 } else { 0 }; + + tmp as u64 + }}; +} + +#[inline(always)] +pub(crate) fn mac(a: u64, b: u64, c: u64, carry: &mut u64) -> u64 { + let tmp = (u128::from(a)) + u128::from(b) * u128::from(c); + + *carry = (tmp >> 64) as u64; + + tmp as u64 +} + +#[inline(always)] +pub(crate) fn mac_discard(a: u64, b: u64, c: u64, carry: &mut u64) { + let tmp = (u128::from(a)) + u128::from(b) * u128::from(c); + + *carry = (tmp >> 64) as u64; +} + +pub fn find_wnaf(num: &[u64]) -> Vec { + let is_zero = |num: &[u64]| num.iter().all(|x| *x == 0u64); + let is_odd = |num: &[u64]| num[0] & 1 == 1; + let sub_noborrow = |num: &mut [u64], z: u64| { + let mut other = vec![0u64; num.len()]; + other[0] = z; + let mut borrow = 0; + + for (a, b) in num.iter_mut().zip(other) { + *a = sbb!(*a, b, &mut borrow); + } + }; + let add_nocarry = |num: &mut [u64], z: u64| { + let mut other = vec![0u64; num.len()]; + other[0] = z; + let mut carry = 0; + + for (a, b) in num.iter_mut().zip(other) { + *a = adc!(*a, b, &mut carry); + } + }; + let div2 = |num: &mut [u64]| { + let mut t = 0; + for i in num.iter_mut().rev() { + let t2 = *i << 63; + *i >>= 1; + *i |= t; + t = t2; + } + }; + + let mut num = num.to_vec(); + let mut res = vec![]; + + while !is_zero(&num) { + let z: i64; + if is_odd(&num) { + z = 2 - (num[0] % 4) as i64; + if z >= 0 { + sub_noborrow(&mut num, z as u64) + } else { + add_nocarry(&mut num, (-z) as u64) + } + } else { + z = 0; + } + res.push(z); + div2(&mut num); + } + + res +} diff --git a/arkworks/algebra/ff/src/biginteger/macros.rs b/arkworks/algebra/ff/src/biginteger/macros.rs new file mode 100644 index 00000000..23e936e9 --- /dev/null +++ b/arkworks/algebra/ff/src/biginteger/macros.rs @@ -0,0 +1,394 @@ +macro_rules! bigint_impl { + ($name:ident, $num_limbs:expr) => { + #[derive(Copy, Clone, PartialEq, Eq, Debug, Default, Hash, Zeroize)] + pub struct $name(pub [u64; $num_limbs]); + + impl $name { + pub const fn new(value: [u64; $num_limbs]) -> Self { + $name(value) + } + } + + impl BigInteger for $name { + const NUM_LIMBS: usize = $num_limbs; + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn add_nocarry(&mut self, other: &Self) -> bool { + let mut carry = 0; + + for i in 0..$num_limbs { + #[cfg(all(target_arch = "x86_64", feature = "asm"))] + #[allow(unsafe_code)] + unsafe { + use core::arch::x86_64::_addcarry_u64; + carry = _addcarry_u64(carry, self.0[i], other.0[i], &mut self.0[i]) + }; + + #[cfg(not(all(target_arch = "x86_64", feature = "asm")))] + { + self.0[i] = adc!(self.0[i], other.0[i], &mut carry); + } + } + + carry != 0 + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn sub_noborrow(&mut self, other: &Self) -> bool { + let mut borrow = 0; + + for i in 0..$num_limbs { + #[cfg(all(target_arch = "x86_64", feature = "asm"))] + #[allow(unsafe_code)] + unsafe { + use core::arch::x86_64::_subborrow_u64; + borrow = _subborrow_u64(borrow, self.0[i], other.0[i], &mut self.0[i]) + }; + + #[cfg(not(all(target_arch = "x86_64", feature = "asm")))] + { + self.0[i] = sbb!(self.0[i], other.0[i], &mut borrow); + } + } + + borrow != 0 + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + #[allow(unused)] + fn mul2(&mut self) { + #[cfg(all(target_arch = "x86_64", feature = "asm"))] + #[allow(unsafe_code)] + { + let mut carry = 0; + + for i in 0..$num_limbs { + unsafe { + use core::arch::x86_64::_addcarry_u64; + carry = _addcarry_u64(carry, self.0[i], self.0[i], &mut self.0[i]) + }; + } + } + + #[cfg(not(all(target_arch = "x86_64", feature = "asm")))] + { + let mut last = 0; + for i in 0..$num_limbs { + let a = &mut self.0[i]; + let tmp = *a >> 63; + *a <<= 1; + *a |= last; + last = tmp; + } + } + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn muln(&mut self, mut n: u32) { + if n >= 64 * $num_limbs { + *self = Self::from(0); + return; + } + + while n >= 64 { + let mut t = 0; + for i in 0..$num_limbs { + core::mem::swap(&mut t, &mut self.0[i]); + } + n -= 64; + } + + if n > 0 { + let mut t = 0; + #[allow(unused)] + for i in 0..$num_limbs { + let a = &mut self.0[i]; + let t2 = *a >> (64 - n); + *a <<= n; + *a |= t; + t = t2; + } + } + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + #[allow(unused)] + fn div2(&mut self) { + let mut t = 0; + for i in 0..$num_limbs { + let a = &mut self.0[$num_limbs - i - 1]; + let t2 = *a << 63; + *a >>= 1; + *a |= t; + t = t2; + } + } + + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn divn(&mut self, mut n: u32) { + if n >= 64 * $num_limbs { + *self = Self::from(0); + return; + } + + while n >= 64 { + let mut t = 0; + for i in 0..$num_limbs { + core::mem::swap(&mut t, &mut self.0[$num_limbs - i - 1]); + } + n -= 64; + } + + if n > 0 { + let mut t = 0; + #[allow(unused)] + for i in 0..$num_limbs { + let a = &mut self.0[$num_limbs - i - 1]; + let t2 = *a << (64 - n); + *a >>= n; + *a |= t; + t = t2; + } + } + } + + #[inline] + fn is_odd(&self) -> bool { + self.0[0] & 1 == 1 + } + + #[inline] + fn is_even(&self) -> bool { + !self.is_odd() + } + + #[inline] + fn is_zero(&self) -> bool { + for i in 0..$num_limbs { + if self.0[i] != 0 { + return false; + } + } + true + } + + #[inline] + fn num_bits(&self) -> u32 { + let mut ret = $num_limbs * 64; + for i in self.0.iter().rev() { + let leading = i.leading_zeros(); + ret -= leading; + if leading != 64 { + break; + } + } + + ret + } + + #[inline] + fn get_bit(&self, i: usize) -> bool { + if i >= 64 * $num_limbs { + false + } else { + let limb = i / 64; + let bit = i - (64 * limb); + (self.0[limb] & (1 << bit)) != 0 + } + } + + #[inline] + fn from_bits_be(bits: &[bool]) -> Self { + let mut res = Self::default(); + let mut acc: u64 = 0; + + let mut bits = bits.to_vec(); + bits.reverse(); + for (i, bits64) in bits.chunks(64).enumerate() { + for bit in bits64.iter().rev() { + acc <<= 1; + acc += *bit as u64; + } + res.0[i] = acc; + acc = 0; + } + res + } + + fn from_bits_le(bits: &[bool]) -> Self { + let mut res = Self::default(); + let mut acc: u64 = 0; + + let bits = bits.to_vec(); + for (i, bits64) in bits.chunks(64).enumerate() { + for bit in bits64.iter().rev() { + acc <<= 1; + acc += *bit as u64; + } + res.0[i] = acc; + acc = 0; + } + res + } + + #[inline] + fn to_bytes_be(&self) -> Vec { + let mut le_bytes = self.to_bytes_le(); + le_bytes.reverse(); + le_bytes + } + + #[inline] + fn to_bytes_le(&self) -> Vec { + let array_map = self.0.iter().map(|limb| limb.to_le_bytes()); + let mut res = Vec::::with_capacity($num_limbs * 8); + for limb in array_map { + res.extend_from_slice(&limb); + } + res + } + } + + impl CanonicalSerialize for $name { + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.write(writer)?; + Ok(()) + } + + #[inline] + fn serialized_size(&self) -> usize { + Self::NUM_LIMBS * 8 + } + } + + impl CanonicalDeserialize for $name { + #[inline] + fn deserialize(reader: R) -> Result { + let value = Self::read(reader)?; + Ok(value) + } + } + + impl ToBytes for $name { + #[inline] + fn write(&self, writer: W) -> IoResult<()> { + self.0.write(writer) + } + } + + impl FromBytes for $name { + #[inline] + fn read(reader: R) -> IoResult { + <[u64; $num_limbs]>::read(reader).map(Self::new) + } + } + + impl Display for $name { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + for i in self.0.iter().rev() { + write!(f, "{:016X}", *i)?; + } + Ok(()) + } + } + + impl Ord for $name { + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn cmp(&self, other: &Self) -> ::core::cmp::Ordering { + use core::cmp::Ordering; + for i in 0..$num_limbs { + let a = &self.0[$num_limbs - i - 1]; + let b = &other.0[$num_limbs - i - 1]; + if a < b { + return Ordering::Less; + } else if a > b { + return Ordering::Greater; + } + } + Ordering::Equal + } + } + + impl PartialOrd for $name { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option<::core::cmp::Ordering> { + Some(self.cmp(other)) + } + } + + impl Distribution<$name> for Standard { + fn sample(&self, rng: &mut R) -> $name { + $name(rng.gen()) + } + } + + impl AsMut<[u64]> for $name { + #[inline] + fn as_mut(&mut self) -> &mut [u64] { + &mut self.0 + } + } + + impl AsRef<[u64]> for $name { + #[inline] + fn as_ref(&self) -> &[u64] { + &self.0 + } + } + + impl From for $name { + #[inline] + fn from(val: u64) -> $name { + let mut repr = Self::default(); + repr.0[0] = val; + repr + } + } + + impl TryFrom for $name { + type Error = ark_std::string::String; + + #[inline] + fn try_from(val: num_bigint::BigUint) -> Result<$name, Self::Error> { + let bytes = val.to_bytes_le(); + + if bytes.len() > $num_limbs * 8 { + Err(format!( + "A BigUint of {} bytes cannot fit into a {}.", + bytes.len(), + ark_std::stringify!($name) + )) + } else { + let mut limbs = [0u64; $num_limbs]; + + bytes + .chunks(8) + .into_iter() + .enumerate() + .for_each(|(i, chunk)| { + let mut chunk_padded = [0u8; 8]; + chunk_padded[..chunk.len()].copy_from_slice(chunk); + limbs[i] = u64::from_le_bytes(chunk_padded) + }); + + Ok(Self(limbs)) + } + } + } + + impl Into for $name { + #[inline] + fn into(self) -> num_bigint::BigUint { + BigUint::from_bytes_le(&self.to_bytes_le()) + } + } + }; +} diff --git a/arkworks/algebra/ff/src/biginteger/mod.rs b/arkworks/algebra/ff/src/biginteger/mod.rs new file mode 100644 index 00000000..ca1a28a2 --- /dev/null +++ b/arkworks/algebra/ff/src/biginteger/mod.rs @@ -0,0 +1,181 @@ +use crate::{ + bytes::{FromBytes, ToBytes}, + fields::{BitIteratorBE, BitIteratorLE}, + UniformRand, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use ark_std::{ + convert::TryFrom, + fmt::{Debug, Display}, + io::{Read, Result as IoResult, Write}, + vec::Vec, +}; +use num_bigint::BigUint; +use zeroize::Zeroize; + +#[macro_use] +pub mod arithmetic; +#[macro_use] +mod macros; + +pub fn signed_mod_reduction(n: u64, modulus: u64) -> i64 { + let t = (n % modulus) as i64; + if t as u64 >= (modulus / 2) { + t - (modulus as i64) + } else { + t + } +} + +bigint_impl!(BigInteger64, 1); +bigint_impl!(BigInteger128, 2); +bigint_impl!(BigInteger256, 4); +bigint_impl!(BigInteger320, 5); +bigint_impl!(BigInteger384, 6); +bigint_impl!(BigInteger448, 7); +bigint_impl!(BigInteger768, 12); +bigint_impl!(BigInteger832, 13); + +#[cfg(test)] +mod tests; + +/// This defines a `BigInteger`, a smart wrapper around a +/// sequence of `u64` limbs, least-significant limb first. +pub trait BigInteger: + ToBytes + + FromBytes + + CanonicalSerialize + + CanonicalDeserialize + + Copy + + Clone + + Debug + + Default + + Display + + Eq + + Ord + + Send + + Sized + + Sync + + 'static + + UniformRand + + Zeroize + + AsMut<[u64]> + + AsRef<[u64]> + + From + + TryFrom + + Into +{ + /// Number of limbs. + const NUM_LIMBS: usize; + + /// Add another representation to this one, returning the carry bit. + fn add_nocarry(&mut self, other: &Self) -> bool; + + /// Subtract another representation from this one, returning the borrow bit. + fn sub_noborrow(&mut self, other: &Self) -> bool; + + /// Performs a leftwise bitshift of this number, effectively multiplying + /// it by 2. Overflow is ignored. + fn mul2(&mut self); + + /// Performs a leftwise bitshift of this number by some amount. + fn muln(&mut self, amt: u32); + + /// Performs a rightwise bitshift of this number, effectively dividing + /// it by 2. + fn div2(&mut self); + + /// Performs a rightwise bitshift of this number by some amount. + fn divn(&mut self, amt: u32); + + /// Returns true iff this number is odd. + fn is_odd(&self) -> bool; + + /// Returns true iff this number is even. + fn is_even(&self) -> bool; + + /// Returns true iff this number is zero. + fn is_zero(&self) -> bool; + + /// Compute the number of bits needed to encode this number. Always a + /// multiple of 64. + fn num_bits(&self) -> u32; + + /// Compute the `i`-th bit of `self`. + fn get_bit(&self, i: usize) -> bool; + + /// Returns the big integer representation of a given big endian boolean + /// array. + fn from_bits_be(bits: &[bool]) -> Self; + + /// Returns the big integer representation of a given little endian boolean + /// array. + fn from_bits_le(bits: &[bool]) -> Self; + + /// Returns the bit representation in a big endian boolean array, + /// with leading zeroes. + fn to_bits_be(&self) -> Vec { + BitIteratorBE::new(self).collect::>() + } + + /// Returns the bit representation in a little endian boolean array, + /// with trailing zeroes. + fn to_bits_le(&self) -> Vec { + BitIteratorLE::new(self).collect::>() + } + + /// Returns the byte representation in a big endian byte array, + /// with leading zeros. + fn to_bytes_be(&self) -> Vec; + + /// Returns the byte representation in a little endian byte array, + /// with trailing zeros. + fn to_bytes_le(&self) -> Vec; + + /// Returns the windowed non-adjacent form of `self`, for a window of size `w`. + fn find_wnaf(&self, w: usize) -> Option> { + // w > 2 due to definition of wNAF, and w < 64 to make sure that `i64` + // can fit each signed digit + if w >= 2 && w < 64 { + let mut res = vec![]; + let mut e = *self; + + while !e.is_zero() { + let z: i64; + if e.is_odd() { + z = signed_mod_reduction(e.as_ref()[0], 1 << w); + if z >= 0 { + e.sub_noborrow(&Self::from(z as u64)); + } else { + e.add_nocarry(&Self::from((-z) as u64)); + } + } else { + z = 0; + } + res.push(z); + e.div2(); + } + + Some(res) + } else { + None + } + } + + /// Writes this `BigInteger` as a big endian integer. Always writes + /// `(num_bits` / 8) bytes. + fn write_le(&self, writer: &mut W) -> IoResult<()> { + self.write(writer) + } + + /// Reads a big endian integer occupying (`num_bits` / 8) bytes into this + /// representation. + fn read_le(&mut self, reader: &mut R) -> IoResult<()> { + *self = Self::read(reader)?; + Ok(()) + } +} diff --git a/arkworks/algebra/ff/src/biginteger/tests.rs b/arkworks/algebra/ff/src/biginteger/tests.rs new file mode 100644 index 00000000..2c929d94 --- /dev/null +++ b/arkworks/algebra/ff/src/biginteger/tests.rs @@ -0,0 +1,120 @@ +use crate::{biginteger::BigInteger, UniformRand}; +use num_bigint::BigUint; + +fn biginteger_arithmetic_test(a: B, b: B, zero: B) { + // zero == zero + assert_eq!(zero, zero); + + // zero.is_zero() == true + assert_eq!(zero.is_zero(), true); + + // a == a + assert_eq!(a, a); + + // a + 0 = a + let mut a0_add = a.clone(); + a0_add.add_nocarry(&zero); + assert_eq!(a0_add, a); + + // a - 0 = a + let mut a0_sub = a.clone(); + a0_sub.sub_noborrow(&zero); + assert_eq!(a0_sub, a); + + // a - a = 0 + let mut aa_sub = a.clone(); + aa_sub.sub_noborrow(&a); + assert_eq!(aa_sub, zero); + + // a + b = b + a + let mut ab_add = a.clone(); + ab_add.add_nocarry(&b); + let mut ba_add = b.clone(); + ba_add.add_nocarry(&a); + assert_eq!(ab_add, ba_add); +} + +fn biginteger_bits_test() { + let mut one = B::from(1u64); + assert!(one.get_bit(0)); + assert!(!one.get_bit(1)); + one.muln(5); + let thirty_two = one; + assert!(!thirty_two.get_bit(0)); + assert!(!thirty_two.get_bit(1)); + assert!(!thirty_two.get_bit(2)); + assert!(!thirty_two.get_bit(3)); + assert!(!thirty_two.get_bit(4)); + assert!(thirty_two.get_bit(5), "{:?}", thirty_two); +} + +fn biginteger_bytes_test() { + let mut bytes = [0u8; 256]; + let mut rng = ark_std::test_rng(); + let x: B = UniformRand::rand(&mut rng); + x.write(bytes.as_mut()).unwrap(); + let y = B::read(bytes.as_ref()).unwrap(); + assert_eq!(x, y); +} + +fn biginteger_conversion_test() { + let mut rng = ark_std::test_rng(); + + let x: B = UniformRand::rand(&mut rng); + let x_bigint: BigUint = x.clone().into(); + let x_recovered = B::try_from(x_bigint).ok().unwrap(); + + assert_eq!(x, x_recovered); +} + +fn test_biginteger(zero: B) { + let mut rng = ark_std::test_rng(); + let a: B = UniformRand::rand(&mut rng); + let b: B = UniformRand::rand(&mut rng); + biginteger_arithmetic_test(a, b, zero); + biginteger_bytes_test::(); + biginteger_bits_test::(); + biginteger_conversion_test::(); +} + +#[test] +fn test_biginteger64() { + use crate::biginteger::BigInteger64 as B; + test_biginteger(B::new([0u64; 1])); +} + +#[test] +fn test_biginteger128() { + use crate::biginteger::BigInteger128 as B; + test_biginteger(B::new([0u64; 2])); +} + +#[test] +fn test_biginteger256() { + use crate::biginteger::BigInteger256 as B; + test_biginteger(B::new([0u64; 4])); +} + +#[test] +fn test_biginteger384() { + use crate::biginteger::BigInteger384 as B; + test_biginteger(B::new([0u64; 6])); +} + +#[test] +fn test_biginteger448() { + use crate::biginteger::BigInteger448 as B; + test_biginteger(B::new([0u64; 7])); +} + +#[test] +fn test_biginteger768() { + use crate::biginteger::BigInteger768 as B; + test_biginteger(B::new([0u64; 12])); +} + +#[test] +fn test_biginteger832() { + use crate::biginteger::BigInteger832 as B; + test_biginteger(B::new([0u64; 13])); +} diff --git a/arkworks/algebra/ff/src/bytes.rs b/arkworks/algebra/ff/src/bytes.rs new file mode 100644 index 00000000..af2e8ef6 --- /dev/null +++ b/arkworks/algebra/ff/src/bytes.rs @@ -0,0 +1,338 @@ +use crate::error; +use ark_std::{ + io::{Read, Result as IoResult, Write}, + vec::Vec, +}; + +pub trait ToBytes { + /// Serializes `self` into `writer`. + fn write(&self, writer: W) -> IoResult<()>; +} + +pub trait FromBytes: Sized { + /// Reads `Self` from `reader`. + fn read(reader: R) -> IoResult; +} + +impl ToBytes for [u8; N] { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + writer.write_all(self) + } +} + +impl FromBytes for [u8; N] { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut arr = [0u8; N]; + reader.read_exact(&mut arr)?; + Ok(arr) + } +} + +impl ToBytes for [u16; N] { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + for num in self { + writer.write_all(&num.to_le_bytes())?; + } + Ok(()) + } +} + +impl FromBytes for [u16; N] { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut res = [0u16; N]; + for num in res.iter_mut() { + let mut bytes = [0u8; 2]; + reader.read_exact(&mut bytes)?; + *num = u16::from_le_bytes(bytes); + } + Ok(res) + } +} + +impl ToBytes for [u32; N] { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + for num in self { + writer.write_all(&num.to_le_bytes())?; + } + Ok(()) + } +} + +impl FromBytes for [u32; N] { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut res = [0u32; N]; + for num in res.iter_mut() { + let mut bytes = [0u8; 4]; + reader.read_exact(&mut bytes)?; + *num = u32::from_le_bytes(bytes); + } + Ok(res) + } +} + +impl ToBytes for [u64; N] { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + for num in self { + writer.write_all(&num.to_le_bytes())?; + } + Ok(()) + } +} + +impl FromBytes for [u64; N] { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut res = [0u64; N]; + for num in res.iter_mut() { + let mut bytes = [0u8; 8]; + reader.read_exact(&mut bytes)?; + *num = u64::from_le_bytes(bytes); + } + Ok(res) + } +} + +impl ToBytes for [u128; N] { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + for num in self { + writer.write_all(&num.to_le_bytes())?; + } + Ok(()) + } +} + +impl FromBytes for [u128; N] { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut res = [0u128; N]; + for num in res.iter_mut() { + let mut bytes = [0u8; 16]; + reader.read_exact(&mut bytes)?; + *num = u128::from_le_bytes(bytes); + } + Ok(res) + } +} + +/// Takes as input a sequence of structs, and converts them to a series of +/// bytes. All traits that implement `Bytes` can be automatically converted to +/// bytes in this manner. +#[macro_export] +macro_rules! to_bytes { + ($($x:expr),*) => ({ + let mut buf = $crate::vec![]; + {$crate::push_to_vec!(buf, $($x),*)}.map(|_| buf) + }); +} + +#[macro_export] +macro_rules! push_to_vec { + ($buf:expr, $y:expr, $($x:expr),*) => ({ + { + $crate::ToBytes::write(&$y, &mut $buf) + }.and({$crate::push_to_vec!($buf, $($x),*)}) + }); + + ($buf:expr, $x:expr) => ({ + $crate::ToBytes::write(&$x, &mut $buf) + }) +} + +impl ToBytes for u8 { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + writer.write_all(&[*self]) + } +} + +impl FromBytes for u8 { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut byte = [0u8]; + reader.read_exact(&mut byte)?; + Ok(byte[0]) + } +} + +impl ToBytes for u16 { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + writer.write_all(&self.to_le_bytes()) + } +} + +impl FromBytes for u16 { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut bytes = [0u8; 2]; + reader.read_exact(&mut bytes)?; + Ok(u16::from_le_bytes(bytes)) + } +} + +impl ToBytes for u32 { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + writer.write_all(&self.to_le_bytes()) + } +} + +impl FromBytes for u32 { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut bytes = [0u8; 4]; + reader.read_exact(&mut bytes)?; + Ok(u32::from_le_bytes(bytes)) + } +} + +impl ToBytes for u64 { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + writer.write_all(&self.to_le_bytes()) + } +} + +impl FromBytes for u64 { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut bytes = [0u8; 8]; + reader.read_exact(&mut bytes)?; + Ok(u64::from_le_bytes(bytes)) + } +} + +impl ToBytes for u128 { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + writer.write_all(&self.to_le_bytes()) + } +} + +impl FromBytes for u128 { + #[inline] + fn read(mut reader: R) -> IoResult { + let mut bytes = [0u8; 16]; + reader.read_exact(&mut bytes)?; + Ok(u128::from_le_bytes(bytes)) + } +} + +impl ToBytes for () { + #[inline] + fn write(&self, _writer: W) -> IoResult<()> { + Ok(()) + } +} + +impl FromBytes for () { + #[inline] + fn read(_bytes: R) -> IoResult { + Ok(()) + } +} + +impl ToBytes for bool { + #[inline] + fn write(&self, writer: W) -> IoResult<()> { + u8::write(&(*self as u8), writer) + } +} + +impl FromBytes for bool { + #[inline] + fn read(reader: R) -> IoResult { + match u8::read(reader) { + Ok(0) => Ok(false), + Ok(1) => Ok(true), + Ok(_) => Err(error("FromBytes::read failed")), + Err(err) => Err(err), + } + } +} + +impl ToBytes for Vec { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + for item in self { + item.write(&mut writer)?; + } + Ok(()) + } +} + +impl<'a, T: 'a + ToBytes> ToBytes for &'a [T] { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + for item in *self { + item.write(&mut writer)?; + } + Ok(()) + } +} + +impl<'a, T: 'a + ToBytes> ToBytes for &'a T { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + (*self).write(&mut writer) + } +} + +impl ToBytes for Option { + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + if let Some(val) = self { + true.write(&mut writer)?; + val.write(&mut writer) + } else { + false.write(&mut writer) + } + } +} + +impl FromBytes for Option { + #[inline] + fn read(mut reader: R) -> IoResult { + let is_some = bool::read(&mut reader)?; + if is_some { + T::read(&mut reader).map(Some) + } else { + Ok(None) + } + } +} + +#[cfg(test)] +mod test { + use ark_std::vec::Vec; + #[test] + fn test_macro_empty() { + let array: Vec = vec![]; + let bytes: Vec = to_bytes![array].unwrap(); + assert_eq!(&bytes, &[]); + assert_eq!(bytes.len(), 0); + } + + #[test] + fn test_macro() { + let array1 = [1u8; 32]; + let array2 = [2u8; 16]; + let array3 = [3u8; 8]; + let bytes = to_bytes![array1, array2, array3].unwrap(); + assert_eq!(bytes.len(), 56); + + let mut actual_bytes = Vec::new(); + actual_bytes.extend_from_slice(&array1); + actual_bytes.extend_from_slice(&array2); + actual_bytes.extend_from_slice(&array3); + assert_eq!(bytes, actual_bytes); + } +} diff --git a/arkworks/algebra/ff/src/fields/arithmetic.rs b/arkworks/algebra/ff/src/fields/arithmetic.rs new file mode 100644 index 00000000..e324c940 --- /dev/null +++ b/arkworks/algebra/ff/src/fields/arithmetic.rs @@ -0,0 +1,548 @@ +/// This modular multiplication algorithm uses Montgomery +/// reduction for efficient implementation. It also additionally +/// uses the "no-carry optimization" outlined +/// [here](https://hackmd.io/@zkteam/modular_multiplication) if +/// `P::MODULUS` has (a) a non-zero MSB, and (b) at least one +/// zero bit in the rest of the modulus. +macro_rules! impl_field_mul_assign { + ($limbs:expr) => { + #[inline] + #[ark_ff_asm::unroll_for_loops] + fn mul_assign(&mut self, other: &Self) { + // Checking the modulus at compile time + let first_bit_set = P::MODULUS.0[$limbs - 1] >> 63 != 0; + // $limbs can be 1, hence we can run into a case with an unused mut. + #[allow(unused_mut)] + let mut all_bits_set = P::MODULUS.0[$limbs - 1] == !0 - (1 << 63); + for i in 1..$limbs { + all_bits_set &= P::MODULUS.0[$limbs - i - 1] == !0u64; + } + let _no_carry: bool = !(first_bit_set || all_bits_set); + + // No-carry optimisation applied to CIOS + if _no_carry { + #[cfg(use_asm)] + #[allow(unsafe_code, unused_mut)] + { + // Tentatively avoid using assembly for `$limbs == 1`. + if $limbs <= 6 && $limbs > 1 { + ark_ff_asm::x86_64_asm_mul!($limbs, (self.0).0, (other.0).0); + self.reduce(); + return; + } + } + let mut r = [0u64; $limbs]; + let mut carry1 = 0u64; + let mut carry2 = 0u64; + + for i in 0..$limbs { + r[0] = fa::mac(r[0], (self.0).0[0], (other.0).0[i], &mut carry1); + let k = r[0].wrapping_mul(P::INV); + fa::mac_discard(r[0], k, P::MODULUS.0[0], &mut carry2); + for j in 1..$limbs { + r[j] = mac_with_carry!(r[j], (self.0).0[j], (other.0).0[i], &mut carry1); + r[j - 1] = mac_with_carry!(r[j], k, P::MODULUS.0[j], &mut carry2); + } + r[$limbs - 1] = carry1 + carry2; + } + (self.0).0 = r; + self.reduce(); + // Alternative implementation + } else { + *self = self.mul_without_reduce(other, P::MODULUS, P::INV); + self.reduce(); + } + } + }; +} + +macro_rules! impl_field_into_repr { + ($limbs:expr, $BigIntegerType:ty) => { + #[inline] + #[ark_ff_asm::unroll_for_loops] + #[allow(clippy::modulo_one)] + fn into_repr(&self) -> $BigIntegerType { + let mut tmp = self.0; + let mut r = tmp.0; + // Montgomery Reduction + for i in 0..$limbs { + let k = r[i].wrapping_mul(P::INV); + let mut carry = 0; + + mac_with_carry!(r[i], k, P::MODULUS.0[0], &mut carry); + for j in 1..$limbs { + r[(j + i) % $limbs] = + mac_with_carry!(r[(j + i) % $limbs], k, P::MODULUS.0[j], &mut carry); + } + r[i % $limbs] = carry; + } + tmp.0 = r; + tmp + } + }; +} + +macro_rules! impl_field_square_in_place { + ($limbs: expr) => { + #[inline] + #[ark_ff_asm::unroll_for_loops] + #[allow(unused_braces, clippy::absurd_extreme_comparisons)] + fn square_in_place(&mut self) -> &mut Self { + if $limbs == 1 { + // We default to multiplying with `self` using the `Mul` impl + // for the 1 limb case + *self = *self * *self; + return self; + } + #[cfg(use_asm)] + #[allow(unsafe_code, unused_mut)] + { + // Checking the modulus at compile time + let first_bit_set = P::MODULUS.0[$limbs - 1] >> 63 != 0; + let mut all_bits_set = P::MODULUS.0[$limbs - 1] == !0 - (1 << 63); + for i in 1..$limbs { + all_bits_set &= P::MODULUS.0[$limbs - i - 1] == core::u64::MAX; + } + let _no_carry: bool = !(first_bit_set || all_bits_set); + + if $limbs <= 6 && _no_carry { + ark_ff_asm::x86_64_asm_square!($limbs, (self.0).0); + self.reduce(); + return self; + } + } + let mut r = [0u64; $limbs * 2]; + + let mut carry = 0; + for i in 0..$limbs { + if i < $limbs - 1 { + for j in 0..$limbs { + if j > i { + r[i + j] = + mac_with_carry!(r[i + j], (self.0).0[i], (self.0).0[j], &mut carry); + } + } + r[$limbs + i] = carry; + carry = 0; + } + } + r[$limbs * 2 - 1] = r[$limbs * 2 - 2] >> 63; + for i in 0..$limbs { + // This computes `r[2 * ($limbs - 1) - (i + 1)]`, but additionally + // handles the case where the index underflows. + // Note that we should never hit this case because it only occurs + // when `$limbs == 1`, but we handle that separately above. + let subtractor = (2 * ($limbs - 1usize)) + .checked_sub(i + 1) + .map(|index| r[index]) + .unwrap_or(0); + r[2 * ($limbs - 1) - i] = (r[2 * ($limbs - 1) - i] << 1) | (subtractor >> 63); + } + for i in 3..$limbs { + r[$limbs + 1 - i] = (r[$limbs + 1 - i] << 1) | (r[$limbs - i] >> 63); + } + r[1] <<= 1; + + for i in 0..$limbs { + r[2 * i] = mac_with_carry!(r[2 * i], (self.0).0[i], (self.0).0[i], &mut carry); + // need unused assignment because the last iteration of the loop produces an + // assignment to `carry` that is unused. + #[allow(unused_assignments)] + { + r[2 * i + 1] = adc!(r[2 * i + 1], 0, &mut carry); + } + } + // Montgomery reduction + let mut _carry2 = 0; + for i in 0..$limbs { + let k = r[i].wrapping_mul(P::INV); + let mut carry = 0; + mac_with_carry!(r[i], k, P::MODULUS.0[0], &mut carry); + for j in 1..$limbs { + r[j + i] = mac_with_carry!(r[j + i], k, P::MODULUS.0[j], &mut carry); + } + r[$limbs + i] = adc!(r[$limbs + i], _carry2, &mut carry); + _carry2 = carry; + } + (self.0).0.copy_from_slice(&r[$limbs..]); + self.reduce(); + self + } + }; +} + +macro_rules! impl_field_bigint_conv { + ($field: ident, $bigint: ident, $params: ident) => { + impl Into<$bigint> for $field

{ + fn into(self) -> $bigint { + self.into_repr() + } + } + + impl From<$bigint> for $field

{ + /// Converts `Self::BigInteger` into `Self` + /// + /// # Panics + /// This method panics if `int` is larger than `P::MODULUS`. + fn from(int: $bigint) -> Self { + Self::from_repr(int).unwrap() + } + } + }; +} + +macro_rules! impl_prime_field_standard_sample { + ($field: ident, $params: ident) => { + impl ark_std::rand::distributions::Distribution<$field

> + for ark_std::rand::distributions::Standard + { + #[inline] + fn sample(&self, rng: &mut R) -> $field

{ + loop { + let mut tmp = $field( + rng.sample(ark_std::rand::distributions::Standard), + PhantomData, + ); + + // Mask away the unused bits at the beginning. + assert!(P::REPR_SHAVE_BITS <= 64); + let mask = if P::REPR_SHAVE_BITS == 64 { + 0 + } else { + core::u64::MAX >> P::REPR_SHAVE_BITS + }; + tmp.0.as_mut().last_mut().map(|val| *val &= mask); + + if tmp.is_valid() { + return tmp; + } + } + } + } + }; +} + +macro_rules! impl_prime_field_from_int { + ($field: ident, 128, $params: ident, $limbs:expr) => { + impl From for $field

{ + fn from(other: u128) -> Self { + let mut default_int = P::BigInt::default(); + if $limbs == 1 { + default_int.0[0] = (other % u128::from(P::MODULUS.0[0])) as u64; + } else { + let upper = (other >> 64) as u64; + let lower = ((other << 64) >> 64) as u64; + // This is equivalent to the following, but satisfying the compiler: + // default_int.0[0] = lower; + // default_int.0[1] = upper; + let limbs = [lower, upper]; + for (cur, other) in default_int.0.iter_mut().zip(&limbs) { + *cur = *other; + } + } + Self::from_repr(default_int).unwrap() + } + } + + impl From for $field

{ + fn from(other: i128) -> Self { + let abs = Self::from(other.unsigned_abs()); + if other.is_positive() { + abs + } else { + -abs + } + } + } + }; + ($field: ident, bool, $params: ident, $limbs:expr) => { + impl From for $field

{ + fn from(other: bool) -> Self { + if $limbs == 1 { + Self::from_repr(P::BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap() + } else { + Self::from_repr(P::BigInt::from(u64::from(other))).unwrap() + } + } + } + }; + ($field: ident, $int: expr, $params: ident, $limbs:expr) => { + paste::paste!{ + impl From<[]> for $field

{ + fn from(other: []) -> Self { + if $limbs == 1 { + Self::from_repr(P::BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap() + } else { + Self::from_repr(P::BigInt::from(u64::from(other))).unwrap() + } + } + } + + impl From<[]> for $field

{ + fn from(other: []) -> Self { + let abs = Self::from(other.unsigned_abs()); + if other.is_positive() { + abs + } else { + -abs + } + } + } + } + }; +} + +macro_rules! sqrt_impl { + ($Self:ident, $P:tt, $self:expr) => {{ + // https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5) + // Actually this is just normal Tonelli-Shanks; since `P::Generator` + // is a quadratic non-residue, `P::ROOT_OF_UNITY = P::GENERATOR ^ t` + // is also a quadratic non-residue (since `t` is odd). + if $self.is_zero() { + return Some($Self::zero()); + } + // Try computing the square root (x at the end of the algorithm) + // Check at the end of the algorithm if x was a square root + // Begin Tonelli-Shanks + let mut z = $Self::qnr_to_t(); + let mut w = $self.pow($P::T_MINUS_ONE_DIV_TWO); + let mut x = w * $self; + let mut b = x * &w; + + let mut v = $P::TWO_ADICITY as usize; + + while !b.is_one() { + let mut k = 0usize; + + let mut b2k = b; + while !b2k.is_one() { + // invariant: b2k = b^(2^k) after entering this loop + b2k.square_in_place(); + k += 1; + } + + if k == ($P::TWO_ADICITY as usize) { + // We are in the case where self^(T * 2^k) = x^(P::MODULUS - 1) = 1, + // which means that no square root exists. + return None; + } + let j = v - k; + w = z; + for _ in 1..j { + w.square_in_place(); + } + + z = w.square(); + b *= &z; + x *= &w; + v = k; + } + // Is x the square root? If so, return it. + if (x.square() == *$self) { + return Some(x); + } else { + // Consistency check that if no square root is found, + // it is because none exists. + #[cfg(debug_assertions)] + { + use crate::fields::LegendreSymbol::*; + if ($self.legendre() != QuadraticNonResidue) { + panic!("Input has a square root per its legendre symbol, but it was not found") + } + } + None + } + }}; +} + +// Implements AddAssign on Self by deferring to an implementation on &Self +#[macro_export] +macro_rules! impl_additive_ops_from_ref { + ($type: ident, $params: ident) => { + #[allow(unused_qualifications)] + impl core::ops::Add for $type

{ + type Output = Self; + + #[inline] + fn add(self, other: Self) -> Self { + let mut result = self; + result.add_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Add<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn add(self, other: &'a mut Self) -> Self { + let mut result = self; + result.add_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl core::ops::Sub for $type

{ + type Output = Self; + + #[inline] + fn sub(self, other: Self) -> Self { + let mut result = self; + result.sub_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Sub<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn sub(self, other: &'a mut Self) -> Self { + let mut result = self; + result.sub_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl core::iter::Sum for $type

{ + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), core::ops::Add::add) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::iter::Sum<&'a Self> for $type

{ + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), core::ops::Add::add) + } + } + + #[allow(unused_qualifications)] + impl core::ops::AddAssign for $type

{ + fn add_assign(&mut self, other: Self) { + self.add_assign(&other) + } + } + + #[allow(unused_qualifications)] + impl core::ops::SubAssign for $type

{ + fn sub_assign(&mut self, other: Self) { + self.sub_assign(&other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::AddAssign<&'a mut Self> for $type

{ + fn add_assign(&mut self, other: &'a mut Self) { + self.add_assign(&*other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::SubAssign<&'a mut Self> for $type

{ + fn sub_assign(&mut self, other: &'a mut Self) { + self.sub_assign(&*other) + } + } + }; +} + +// Implements AddAssign on Self by deferring to an implementation on &Self +#[macro_export] +macro_rules! impl_multiplicative_ops_from_ref { + ($type: ident, $params: ident) => { + #[allow(unused_qualifications)] + impl core::ops::Mul for $type

{ + type Output = Self; + + #[inline] + fn mul(self, other: Self) -> Self { + let mut result = self; + result.mul_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl core::ops::Div for $type

{ + type Output = Self; + + #[inline] + fn div(self, other: Self) -> Self { + let mut result = self; + result.div_assign(&other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Mul<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn mul(self, other: &'a mut Self) -> Self { + let mut result = self; + result.mul_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::Div<&'a mut Self> for $type

{ + type Output = Self; + + #[inline] + fn div(self, other: &'a mut Self) -> Self { + let mut result = self; + result.div_assign(&*other); + result + } + } + + #[allow(unused_qualifications)] + impl core::iter::Product for $type

{ + fn product>(iter: I) -> Self { + iter.fold(Self::one(), core::ops::Mul::mul) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::iter::Product<&'a Self> for $type

{ + fn product>(iter: I) -> Self { + iter.fold(Self::one(), Mul::mul) + } + } + + #[allow(unused_qualifications)] + impl core::ops::MulAssign for $type

{ + fn mul_assign(&mut self, other: Self) { + self.mul_assign(&other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::DivAssign<&'a mut Self> for $type

{ + fn div_assign(&mut self, other: &'a mut Self) { + self.div_assign(&*other) + } + } + + #[allow(unused_qualifications)] + impl<'a, P: $params> core::ops::MulAssign<&'a mut Self> for $type

{ + fn mul_assign(&mut self, other: &'a mut Self) { + self.mul_assign(&*other) + } + } + + #[allow(unused_qualifications)] + impl core::ops::DivAssign for $type

{ + fn div_assign(&mut self, other: Self) { + self.div_assign(&other) + } + } + }; +} diff --git a/arkworks/algebra/ff/src/fields/macros.rs b/arkworks/algebra/ff/src/fields/macros.rs new file mode 100644 index 00000000..98dbbf42 --- /dev/null +++ b/arkworks/algebra/ff/src/fields/macros.rs @@ -0,0 +1,735 @@ +macro_rules! impl_prime_field_serializer { + ($field: ident, $params: ident, $byte_size: expr) => { + impl CanonicalSerializeWithFlags for $field

{ + fn serialize_with_flags( + &self, + mut writer: W, + flags: F, + ) -> Result<(), SerializationError> { + // All reasonable `Flags` should be less than 8 bits in size + // (256 values are enough for anyone!) + if F::BIT_SIZE > 8 { + return Err(SerializationError::NotEnoughSpace); + } + + // Calculate the number of bytes required to represent a field element + // serialized with `flags`. If `F::BIT_SIZE < 8`, + // this is at most `$byte_size + 1` + let output_byte_size = buffer_byte_size(P::MODULUS_BITS as usize + F::BIT_SIZE); + + // Write out `self` to a temporary buffer. + // The size of the buffer is $byte_size + 1 because `F::BIT_SIZE` + // is at most 8 bits. + let mut bytes = [0u8; $byte_size + 1]; + self.write(&mut bytes[..$byte_size])?; + + // Mask out the bits of the last byte that correspond to the flag. + bytes[output_byte_size - 1] |= flags.u8_bitmask(); + + writer.write_all(&bytes[..output_byte_size])?; + Ok(()) + } + + // Let `m = 8 * n` for some `n` be the smallest multiple of 8 greater + // than `P::MODULUS_BITS`. + // If `(m - P::MODULUS_BITS) >= F::BIT_SIZE` , then this method returns `n`; + // otherwise, it returns `n + 1`. + fn serialized_size_with_flags(&self) -> usize { + buffer_byte_size(P::MODULUS_BITS as usize + F::BIT_SIZE) + } + } + + impl CanonicalSerialize for $field

{ + #[inline] + fn serialize( + &self, + writer: W, + ) -> Result<(), SerializationError> { + self.serialize_with_flags(writer, EmptyFlags) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.serialized_size_with_flags::() + } + } + + impl CanonicalDeserializeWithFlags for $field

{ + fn deserialize_with_flags( + mut reader: R, + ) -> Result<(Self, F), SerializationError> { + // All reasonable `Flags` should be less than 8 bits in size + // (256 values are enough for anyone!) + if F::BIT_SIZE > 8 { + return Err(SerializationError::NotEnoughSpace); + } + // Calculate the number of bytes required to represent a field element + // serialized with `flags`. If `F::BIT_SIZE < 8`, + // this is at most `$byte_size + 1` + let output_byte_size = buffer_byte_size(P::MODULUS_BITS as usize + F::BIT_SIZE); + + let mut masked_bytes = [0; $byte_size + 1]; + reader.read_exact(&mut masked_bytes[..output_byte_size])?; + + let flags = F::from_u8_remove_flags(&mut masked_bytes[output_byte_size - 1]) + .ok_or(SerializationError::UnexpectedFlags)?; + + Ok((Self::read(&masked_bytes[..])?, flags)) + } + } + + impl CanonicalDeserialize for $field

{ + fn deserialize(reader: R) -> Result { + Self::deserialize_with_flags::(reader).map(|(r, _)| r) + } + } + }; +} + +macro_rules! impl_Fp { + ($Fp:ident, $FpParameters:ident, $BigInteger:ident, $BigIntegerType:ty, $limbs:expr, $field_size:expr) => { + pub trait $FpParameters: FpParameters {} + + /// Represents an element of the prime field F_p, where `p == P::MODULUS`. + /// This type can represent elements in any field of size at most + #[doc = $field_size] + /// bits. + #[derive(Derivative)] + #[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Copy(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") + )] + pub struct $Fp

( + pub $BigIntegerType, + #[derivative(Debug = "ignore")] + #[doc(hidden)] + pub PhantomData

, + ); + + impl

$Fp

{ + #[inline] + pub const fn new(element: $BigIntegerType) -> Self { + Self(element, PhantomData) + } + + #[ark_ff_asm::unroll_for_loops] + const fn const_is_zero(&self) -> bool { + let mut is_zero = true; + for i in 0..$limbs { + is_zero &= (self.0).0[i] == 0; + } + is_zero + } + + const fn const_neg(self, modulus: $BigIntegerType) -> Self { + if !self.const_is_zero() { + Self::new(Self::sub_noborrow(&modulus, &self.0)) + } else { + self + } + } + + /// Interpret a string of decimal numbers as a prime field element. + /// Does not accept unnecessary leading zeroes or a blank string. + /// For *internal* use only; please use the `field_new` macro instead + /// of this method + #[doc(hidden)] + pub const fn const_from_str(limbs: &[u64], is_positive: bool, r2: $BigIntegerType, modulus: $BigIntegerType, inv: u64) -> Self { + let mut repr = $BigInteger([0; $limbs]); + let mut i = 0; + while i < limbs.len() { + repr.0[i] = limbs[i]; + i += 1; + } + let res = Self::const_from_repr(repr, r2, modulus, inv); + if is_positive { + res + } else { + res.const_neg(modulus) + } + } + + #[inline] + pub(crate) const fn const_from_repr(repr: $BigIntegerType, r2: $BigIntegerType, modulus: $BigIntegerType, inv: u64) -> Self { + let mut r = Self::new(repr); + if r.const_is_zero() { + r + } else { + r = r.const_mul(&$Fp(r2, PhantomData), modulus, inv); + r + } + } + + #[ark_ff_asm::unroll_for_loops] + const fn mul_without_reduce(mut self, other: &Self, modulus: $BigIntegerType, inv: u64) -> Self { + let mut r = [0u64; $limbs * 2]; + + for i in 0..$limbs { + let mut carry = 0; + for j in 0..$limbs { + r[j + i] = mac_with_carry!(r[j + i], (self.0).0[i], (other.0).0[j], &mut carry); + } + r[$limbs + i] = carry; + } + // Montgomery reduction + let mut _carry2 = 0; + for i in 0..$limbs { + let k = r[i].wrapping_mul(inv); + let mut carry = 0; + mac_with_carry!(r[i], k, modulus.0[0], &mut carry); + for j in 1..$limbs { + r[j + i] = mac_with_carry!(r[j + i], k, modulus.0[j], &mut carry); + } + r[$limbs + i] = adc!(r[$limbs + i], _carry2, &mut carry); + _carry2 = carry; + } + + for i in 0..$limbs { + (self.0).0[i] = r[$limbs + i]; + } + self + } + + #[ark_ff_asm::unroll_for_loops] + const fn const_mul(mut self, other: &Self, modulus: $BigIntegerType, inv: u64) -> Self { + self = self.mul_without_reduce(other, modulus, inv); + self.const_reduce(modulus) + } + + + #[ark_ff_asm::unroll_for_loops] + const fn const_is_valid(&self, modulus: $BigIntegerType) -> bool { + for i in 0..$limbs { + if (self.0).0[($limbs - i - 1)] < modulus.0[($limbs - i - 1)] { + return true + } else if (self.0).0[($limbs - i - 1)] > modulus.0[($limbs - i - 1)] { + return false + } + } + false + } + + #[inline] + const fn const_reduce(mut self, modulus: $BigIntegerType) -> Self { + if !self.const_is_valid(modulus) { + self.0 = Self::sub_noborrow(&self.0, &modulus); + } + self + } + + #[ark_ff_asm::unroll_for_loops] + // need unused assignment because the last iteration of the loop produces an assignment + // to `borrow` that is unused. + #[allow(unused_assignments)] + const fn sub_noborrow(a: &$BigIntegerType, b: &$BigIntegerType) -> $BigInteger { + let mut a = *a; + let mut borrow = 0; + for i in 0..$limbs { + a.0[i] = sbb!(a.0[i], b.0[i], &mut borrow); + } + a + } + } + + impl $Fp

{ + #[inline(always)] + pub(crate) fn is_valid(&self) -> bool { + self.0 < P::MODULUS + } + + #[inline] + fn reduce(&mut self) { + if !self.is_valid() { + self.0.sub_noborrow(&P::MODULUS); + } + } + } + + impl Zero for $Fp

{ + #[inline] + fn zero() -> Self { + $Fp::

($BigInteger::from(0), PhantomData) + } + + #[inline] + fn is_zero(&self) -> bool { + self.0.is_zero() + } + } + + impl One for $Fp

{ + #[inline] + fn one() -> Self { + $Fp::

(P::R, PhantomData) + } + + #[inline] + fn is_one(&self) -> bool { + self.0 == P::R + } + } + + impl Field for $Fp

{ + type BasePrimeField = Self; + + fn extension_degree() -> u64 { + 1 + } + + fn from_base_prime_field_elems(elems: &[Self::BasePrimeField]) -> Option { + if elems.len() != (Self::extension_degree() as usize) { + return None; + } + Some(elems[0]) + } + + #[inline] + fn double(&self) -> Self { + let mut temp = *self; + temp.double_in_place(); + temp + } + + #[inline] + fn double_in_place(&mut self) -> &mut Self { + // This cannot exceed the backing capacity. + self.0.mul2(); + // However, it may need to be reduced. + self.reduce(); + self + } + + #[inline] + fn characteristic() -> &'static [u64] { + P::MODULUS.as_ref() + } + + #[inline] + fn from_random_bytes_with_flags(bytes: &[u8]) -> Option<(Self, F)> { + if F::BIT_SIZE > 8 { + return None + } else { + let mut result_bytes = [0u8; $limbs * 8 + 1]; + // Copy the input into a temporary buffer. + result_bytes.iter_mut().zip(bytes).for_each(|(result, input)| { + *result = *input; + }); + // This mask retains everything in the last limb + // that is below `P::MODULUS_BITS`. + let last_limb_mask = (u64::MAX >> P::REPR_SHAVE_BITS).to_le_bytes(); + let mut last_bytes_mask = [0u8; 9]; + last_bytes_mask[..8].copy_from_slice(&last_limb_mask); + + + // Length of the buffer containing the field element and the flag. + let output_byte_size = buffer_byte_size(P::MODULUS_BITS as usize + F::BIT_SIZE); + // Location of the flag is the last byte of the serialized + // form of the field element. + let flag_location = output_byte_size - 1; + + // At which byte is the flag located in the last limb? + let flag_location_in_last_limb = flag_location - (8 * ($limbs - 1)); + + // Take all but the last 9 bytes. + let last_bytes = &mut result_bytes[8 * ($limbs - 1)..]; + + // The mask only has the last `F::BIT_SIZE` bits set + let flags_mask = u8::MAX.checked_shl(8 - (F::BIT_SIZE as u32)).unwrap_or(0); + + // Mask away the remaining bytes, and try to reconstruct the + // flag + let mut flags: u8 = 0; + for (i, (b, m)) in last_bytes.iter_mut().zip(&last_bytes_mask).enumerate() { + if i == flag_location_in_last_limb { + flags = *b & flags_mask + } + *b &= m; + } + Self::deserialize(&result_bytes[..($limbs * 8)]) + .ok() + .and_then(|f| F::from_u8(flags).map(|flag| (f, flag))) + } + } + + #[inline] + fn square(&self) -> Self { + let mut temp = self.clone(); + temp.square_in_place(); + temp + } + + impl_field_square_in_place!($limbs); + + #[inline] + fn inverse(&self) -> Option { + if self.is_zero() { + None + } else { + // Guajardo Kumar Paar Pelzl + // Efficient Software-Implementation of Finite Fields with Applications to + // Cryptography + // Algorithm 16 (BEA for Inversion in Fp) + + let one = $BigInteger::from(1); + + let mut u = self.0; + let mut v = P::MODULUS; + let mut b = $Fp::

(P::R2, PhantomData); // Avoids unnecessary reduction step. + let mut c = Self::zero(); + + while u != one && v != one { + while u.is_even() { + u.div2(); + + if b.0.is_even() { + b.0.div2(); + } else { + b.0.add_nocarry(&P::MODULUS); + b.0.div2(); + } + } + + while v.is_even() { + v.div2(); + + if c.0.is_even() { + c.0.div2(); + } else { + c.0.add_nocarry(&P::MODULUS); + c.0.div2(); + } + } + + if v < u { + u.sub_noborrow(&v); + b.sub_assign(&c); + } else { + v.sub_noborrow(&u); + c.sub_assign(&b); + } + } + + if u == one { + Some(b) + } else { + Some(c) + } + } + } + + fn inverse_in_place(&mut self) -> Option<&mut Self> { + if let Some(inverse) = self.inverse() { + *self = inverse; + Some(self) + } else { + None + } + } + + /// The Frobenius map has no effect in a prime field. + #[inline] + fn frobenius_map(&mut self, _: usize) {} + } + + impl PrimeField for $Fp

{ + type Params = P; + type BigInt = $BigIntegerType; + + #[inline] + fn from_repr(r: $BigIntegerType) -> Option { + let mut r = $Fp(r, PhantomData); + if r.is_zero() { + Some(r) + } else if r.is_valid() { + r *= &$Fp(P::R2, PhantomData); + Some(r) + } else { + None + } + } + + impl_field_into_repr!($limbs, $BigIntegerType); + } + + impl FftField for $Fp

{ + type FftParams = P; + + #[inline] + fn two_adic_root_of_unity() -> Self { + $Fp::

(P::TWO_ADIC_ROOT_OF_UNITY, PhantomData) + } + + #[inline] + fn large_subgroup_root_of_unity() -> Option { + Some($Fp::

(P::LARGE_SUBGROUP_ROOT_OF_UNITY?, PhantomData)) + } + + #[inline] + fn multiplicative_generator() -> Self { + $Fp::

(P::GENERATOR, PhantomData) + } + } + + impl SquareRootField for $Fp

{ + #[inline] + fn legendre(&self) -> LegendreSymbol { + use crate::fields::LegendreSymbol::*; + + // s = self^((MODULUS - 1) // 2) + let s = self.pow(P::MODULUS_MINUS_ONE_DIV_TWO); + if s.is_zero() { + Zero + } else if s.is_one() { + QuadraticResidue + } else { + QuadraticNonResidue + } + } + + #[inline] + fn sqrt(&self) -> Option { + sqrt_impl!(Self, P, self) + } + + fn sqrt_in_place(&mut self) -> Option<&mut Self> { + (*self).sqrt().map(|sqrt| { + *self = sqrt; + self + }) + } + } + + /// Note that this implementation of `Ord` compares field elements viewing + /// them as integers in the range 0, 1, ..., P::MODULUS - 1. However, other + /// implementations of `PrimeField` might choose a different ordering, and + /// as such, users should use this `Ord` for applications where + /// any ordering suffices (like in a BTreeMap), and not in applications + /// where a particular ordering is required. + impl Ord for $Fp

{ + #[inline(always)] + fn cmp(&self, other: &Self) -> Ordering { + self.into_repr().cmp(&other.into_repr()) + } + } + + /// Note that this implementation of `PartialOrd` compares field elements viewing + /// them as integers in the range 0, 1, ..., `P::MODULUS` - 1. However, other + /// implementations of `PrimeField` might choose a different ordering, and + /// as such, users should use this `PartialOrd` for applications where + /// any ordering suffices (like in a BTreeMap), and not in applications + /// where a particular ordering is required. + impl PartialOrd for $Fp

{ + #[inline(always)] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl_prime_field_from_int!($Fp, 128, $FpParameters, $limbs); + impl_prime_field_from_int!($Fp, 64, $FpParameters, $limbs); + impl_prime_field_from_int!($Fp, 32, $FpParameters, $limbs); + impl_prime_field_from_int!($Fp, 16, $FpParameters, $limbs); + impl_prime_field_from_int!($Fp, 8, $FpParameters, $limbs); + impl_prime_field_from_int!($Fp, bool, $FpParameters, $limbs); + + impl_prime_field_standard_sample!($Fp, $FpParameters); + + impl_prime_field_serializer!($Fp, $FpParameters, $limbs * 8); + + impl ToBytes for $Fp

{ + #[inline] + fn write(&self, writer: W) -> IoResult<()> { + self.into_repr().write(writer) + } + } + + impl FromBytes for $Fp

{ + #[inline] + fn read(reader: R) -> IoResult { + $BigInteger::read(reader).and_then(|b| + match $Fp::from_repr(b) { + Some(f) => Ok(f), + None => Err(crate::error("FromBytes::read failed")), + }) + } + } + + impl FromStr for $Fp

{ + type Err = (); + + /// Interpret a string of numbers as a (congruent) prime field element. + /// Does not accept unnecessary leading zeroes or a blank string. + fn from_str(s: &str) -> Result { + if s.is_empty() { + return Err(()); + } + + if s == "0" { + return Ok(Self::zero()); + } + + let mut res = Self::zero(); + + let ten = Self::from(::BigInt::from(10)); + + let mut first_digit = true; + + for c in s.chars() { + match c.to_digit(10) { + Some(c) => { + if first_digit { + if c == 0 { + return Err(()); + } + + first_digit = false; + } + + res.mul_assign(&ten); + let digit = Self::from(u64::from(c)); + res.add_assign(&digit); + }, + None => { + return Err(()); + }, + } + } + if !res.is_valid() { + Err(()) + } else { + Ok(res) + } + } + } + + /// Outputs a string containing the value of `self`, chunked up into + /// 64-bit limbs. + impl Display for $Fp

{ + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, stringify!($Fp"({})"), self.into_repr()) + } + } + + impl Neg for $Fp

{ + type Output = Self; + #[inline] + #[must_use] + fn neg(self) -> Self { + if !self.is_zero() { + let mut tmp = P::MODULUS; + tmp.sub_noborrow(&self.0); + $Fp::

(tmp, PhantomData) + } else { + self + } + } + } + + impl<'a, P: $FpParameters> Add<&'a $Fp

> for $Fp

{ + type Output = Self; + + #[inline] + fn add(mut self, other: &Self) -> Self { + self.add_assign(other); + self + } + } + + impl<'a, P: $FpParameters> Sub<&'a $Fp

> for $Fp

{ + type Output = Self; + + #[inline] + fn sub(mut self, other: &Self) -> Self { + self.sub_assign(other); + self + } + } + + impl<'a, P: $FpParameters> Mul<&'a $Fp

> for $Fp

{ + type Output = Self; + + #[inline] + fn mul(mut self, other: &Self) -> Self { + self.mul_assign(other); + self + } + } + + impl<'a, P: $FpParameters> Div<&'a $Fp

> for $Fp

{ + type Output = Self; + + /// Returns `self * other.inverse()` if `other.inverse()` is `Some`, and + /// panics otherwise. + #[inline] + fn div(mut self, other: &Self) -> Self { + self.mul_assign(&other.inverse().unwrap()); + self + } + } + + impl_additive_ops_from_ref!($Fp, $FpParameters); + impl_multiplicative_ops_from_ref!($Fp, $FpParameters); + + impl<'a, P: $FpParameters> AddAssign<&'a Self> for $Fp

{ + #[inline] + fn add_assign(&mut self, other: &Self) { + // This cannot exceed the backing capacity. + self.0.add_nocarry(&other.0); + // However, it may need to be reduced + self.reduce(); + } + } + + impl<'a, P: $FpParameters> SubAssign<&'a Self> for $Fp

{ + #[inline] + fn sub_assign(&mut self, other: &Self) { + // If `other` is larger than `self`, add the modulus to self first. + if other.0 > self.0 { + self.0.add_nocarry(&P::MODULUS); + } + self.0.sub_noborrow(&other.0); + } + } + + impl<'a, P: $FpParameters> MulAssign<&'a Self> for $Fp

{ + impl_field_mul_assign!($limbs); + } + + /// Computes `self *= other.inverse()` if `other.inverse()` is `Some`, and + /// panics otherwise. + impl<'a, P: $FpParameters> DivAssign<&'a Self> for $Fp

{ + #[inline] + fn div_assign(&mut self, other: &Self) { + self.mul_assign(&other.inverse().unwrap()); + } + } + + impl zeroize::Zeroize for $Fp

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.0.zeroize(); + } + } + + impl From for $Fp

{ + #[inline] + fn from(val: num_bigint::BigUint) -> $Fp

{ + $Fp::

::from_le_bytes_mod_order(&val.to_bytes_le()) + } + } + + impl Into for $Fp

{ + #[inline] + fn into(self) -> num_bigint::BigUint { + self.into_repr().into() + } + } + } +} diff --git a/arkworks/algebra/ff/src/fields/mod.rs b/arkworks/algebra/ff/src/fields/mod.rs new file mode 100644 index 00000000..7beb5d7c --- /dev/null +++ b/arkworks/algebra/ff/src/fields/mod.rs @@ -0,0 +1,798 @@ +use crate::{ + biginteger::BigInteger, + bytes::{FromBytes, ToBytes}, + fields::utils::k_adicity, + UniformRand, +}; +use ark_serialize::{ + CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, + CanonicalSerializeWithFlags, EmptyFlags, Flags, +}; +use ark_std::{ + cmp::min, + fmt::{Debug, Display}, + hash::Hash, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + str::FromStr, + vec::Vec, +}; + +pub use ark_ff_macros; +use num_traits::{One, Zero}; +use zeroize::Zeroize; + +#[macro_use] +pub mod macros; +pub mod utils; + +#[macro_use] +pub mod arithmetic; + +pub mod models; +pub use self::models::*; + +#[cfg(feature = "parallel")] +use ark_std::cmp::max; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +#[macro_export] +macro_rules! field_new { + ($name:ident, $c0:expr) => {{ + use $crate::FpParameters; + type Params = <$name as $crate::PrimeField>::Params; + let (is_positive, limbs) = $crate::ark_ff_macros::to_sign_and_limbs!($c0); + $name::const_from_str( + &limbs, + is_positive, + Params::R2, + Params::MODULUS, + Params::INV, + ) + }}; + ($name:ident, $c0:expr, $c1:expr $(,)?) => { + $name { + c0: $c0, + c1: $c1, + _parameters: core::marker::PhantomData, + } + }; + ($name:ident, $c0:expr, $c1:expr, $c2:expr $(,)?) => { + $name { + c0: $c0, + c1: $c1, + c2: $c2, + _parameters: core::marker::PhantomData, + } + }; +} + +/// The interface for a generic field. +pub trait Field: + ToBytes + + 'static + + FromBytes + + Copy + + Clone + + Debug + + Display + + Default + + Send + + Sync + + Eq + + Zero + + One + + Ord + + Neg + + UniformRand + + Zeroize + + Sized + + Hash + + CanonicalSerialize + + CanonicalSerializeWithFlags + + CanonicalDeserialize + + CanonicalDeserializeWithFlags + + Add + + Sub + + Mul + + Div + + AddAssign + + SubAssign + + MulAssign + + DivAssign + + for<'a> Add<&'a Self, Output = Self> + + for<'a> Sub<&'a Self, Output = Self> + + for<'a> Mul<&'a Self, Output = Self> + + for<'a> Div<&'a Self, Output = Self> + + for<'a> AddAssign<&'a Self> + + for<'a> SubAssign<&'a Self> + + for<'a> MulAssign<&'a Self> + + for<'a> DivAssign<&'a Self> + + core::iter::Sum + + for<'a> core::iter::Sum<&'a Self> + + core::iter::Product + + for<'a> core::iter::Product<&'a Self> + + From + + From + + From + + From + + From + + From +{ + type BasePrimeField: PrimeField; + + /// Returns the characteristic of the field, + /// in little-endian representation. + fn characteristic() -> &'static [u64] { + Self::BasePrimeField::characteristic() + } + + /// Returns the extension degree of this field with respect + /// to `Self::BasePrimeField`. + fn extension_degree() -> u64; + + /// Convert a slice of base prime field elements into a field element. + /// If the slice length != Self::extension_degree(), must return None. + fn from_base_prime_field_elems(elems: &[Self::BasePrimeField]) -> Option; + + /// Returns `self + self`. + #[must_use] + fn double(&self) -> Self; + + /// Doubles `self` in place. + fn double_in_place(&mut self) -> &mut Self; + + /// Returns a field element if the set of bytes forms a valid field element, + /// otherwise returns None. This function is primarily intended for sampling + /// random field elements from a hash-function or RNG output. + fn from_random_bytes(bytes: &[u8]) -> Option { + Self::from_random_bytes_with_flags::(bytes).map(|f| f.0) + } + + /// Returns a field element with an extra sign bit used for group parsing if + /// the set of bytes forms a valid field element, otherwise returns + /// None. This function is primarily intended for sampling + /// random field elements from a hash-function or RNG output. + fn from_random_bytes_with_flags(bytes: &[u8]) -> Option<(Self, F)>; + + /// Returns `self * self`. + #[must_use] + fn square(&self) -> Self; + + /// Squares `self` in place. + fn square_in_place(&mut self) -> &mut Self; + + /// Computes the multiplicative inverse of `self` if `self` is nonzero. + #[must_use] + fn inverse(&self) -> Option; + + // If `self.inverse().is_none()`, this just returns `None`. Otherwise, it sets + // `self` to `self.inverse().unwrap()`. + fn inverse_in_place(&mut self) -> Option<&mut Self>; + + /// Exponentiates this element by a power of the base prime modulus via + /// the Frobenius automorphism. + fn frobenius_map(&mut self, power: usize); + + /// Exponentiates this element by a number represented with `u64` limbs, + /// least significant limb first. + #[must_use] + fn pow>(&self, exp: S) -> Self { + let mut res = Self::one(); + + for i in BitIteratorBE::without_leading_zeros(exp) { + res.square_in_place(); + + if i { + res *= self; + } + } + res + } + + /// Exponentiates a field element `f` by a number represented with `u64` limbs, + /// using a precomputed table containing as many powers of 2 of `f` + /// as the 1 + the floor of log2 of the exponent `exp`, starting from the 1st power. + /// That is, `powers_of_2` should equal `&[p, p^2, p^4, ..., p^(2^n)]` + /// when `exp` has at most `n` bits. + /// + /// This returns `None` when a power is missing from the table. + #[inline] + fn pow_with_table>(powers_of_2: &[Self], exp: S) -> Option { + let mut res = Self::one(); + for (pow, bit) in BitIteratorLE::without_trailing_zeros(exp).enumerate() { + if bit { + res *= powers_of_2.get(pow)?; + } + } + Some(res) + } +} + +/// A trait that defines parameters for a field that can be used for FFTs. +pub trait FftParameters: 'static + Send + Sync + Sized { + type BigInt: BigInteger; + + /// Let `N` be the size of the multiplicative group defined by the field. + /// Then `TWO_ADICITY` is the two-adicity of `N`, i.e. the integer `s` + /// such that `N = 2^s * t` for some odd integer `t`. + const TWO_ADICITY: u32; + + /// 2^s root of unity computed by GENERATOR^t + const TWO_ADIC_ROOT_OF_UNITY: Self::BigInt; + + /// An integer `b` such that there exists a multiplicative subgroup + /// of size `b^k` for some integer `k`. + const SMALL_SUBGROUP_BASE: Option = None; + + /// The integer `k` such that there exists a multiplicative subgroup + /// of size `Self::SMALL_SUBGROUP_BASE^k`. + const SMALL_SUBGROUP_BASE_ADICITY: Option = None; + + /// GENERATOR^((MODULUS-1) / (2^s * + /// SMALL_SUBGROUP_BASE^SMALL_SUBGROUP_BASE_ADICITY)) Used for mixed-radix FFT. + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = None; +} + +/// A trait that defines parameters for a prime field. +pub trait FpParameters: FftParameters { + /// The modulus of the field. + const MODULUS: Self::BigInt; + + /// The number of bits needed to represent the `Self::MODULUS`. + const MODULUS_BITS: u32; + + /// The number of bits that must be shaved from the beginning of + /// the representation when randomly sampling. + const REPR_SHAVE_BITS: u32; + + /// Let `M` be the power of 2^64 nearest to `Self::MODULUS_BITS`. Then + /// `R = M % Self::MODULUS`. + const R: Self::BigInt; + + /// R2 = R^2 % Self::MODULUS + const R2: Self::BigInt; + + /// INV = -MODULUS^{-1} mod 2^64 + const INV: u64; + + /// A multiplicative generator of the field. + /// `Self::GENERATOR` is an element having multiplicative order + /// `Self::MODULUS - 1`. + const GENERATOR: Self::BigInt; + + /// The number of bits that can be reliably stored. + /// (Should equal `SELF::MODULUS_BITS - 1`) + const CAPACITY: u32; + + /// t for 2^s * t = MODULUS - 1, and t coprime to 2. + const T: Self::BigInt; + + /// (t - 1) / 2 + const T_MINUS_ONE_DIV_TWO: Self::BigInt; + + /// (Self::MODULUS - 1) / 2 + const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt; +} + +/// The interface for fields that are able to be used in FFTs. +pub trait FftField: Field { + type FftParams: FftParameters; + + /// Returns the 2^s root of unity. + fn two_adic_root_of_unity() -> Self; + + /// Returns the 2^s * small_subgroup_base^small_subgroup_base_adicity root of unity + /// if a small subgroup is defined. + fn large_subgroup_root_of_unity() -> Option; + + /// Returns the multiplicative generator of `char()` - 1 order. + fn multiplicative_generator() -> Self; + + /// Returns the root of unity of order n, if one exists. + /// If no small multiplicative subgroup is defined, this is the 2-adic root of unity of order n + /// (for n a power of 2). + /// If a small multiplicative subgroup is defined, this is the root of unity of order n for + /// the larger subgroup generated by `FftParams::LARGE_SUBGROUP_ROOT_OF_UNITY` + /// (for n = 2^i * FftParams::SMALL_SUBGROUP_BASE^j for some i, j). + fn get_root_of_unity(n: usize) -> Option { + let mut omega: Self; + if let Some(large_subgroup_root_of_unity) = Self::large_subgroup_root_of_unity() { + let q = Self::FftParams::SMALL_SUBGROUP_BASE.expect( + "LARGE_SUBGROUP_ROOT_OF_UNITY should only be set in conjunction with SMALL_SUBGROUP_BASE", + ) as usize; + let small_subgroup_base_adicity = Self::FftParams::SMALL_SUBGROUP_BASE_ADICITY.expect( + "LARGE_SUBGROUP_ROOT_OF_UNITY should only be set in conjunction with SMALL_SUBGROUP_BASE_ADICITY", + ); + + let q_adicity = k_adicity(q, n); + let q_part = q.pow(q_adicity); + + let two_adicity = k_adicity(2, n); + let two_part = 1 << two_adicity; + + if n != two_part * q_part + || (two_adicity > Self::FftParams::TWO_ADICITY) + || (q_adicity > small_subgroup_base_adicity) + { + return None; + } + + omega = large_subgroup_root_of_unity; + for _ in q_adicity..small_subgroup_base_adicity { + omega = omega.pow(&[q as u64]); + } + + for _ in two_adicity..Self::FftParams::TWO_ADICITY { + omega.square_in_place(); + } + } else { + use core::convert::TryFrom; + // Compute the next power of 2. + let size = n.next_power_of_two() as u64; + let log_size_of_group = ark_std::log2(usize::try_from(size).expect("too large")); + + if n != size as usize || log_size_of_group > Self::FftParams::TWO_ADICITY { + return None; + } + + // Compute the generator for the multiplicative subgroup. + // It should be 2^(log_size_of_group) root of unity. + omega = Self::two_adic_root_of_unity(); + for _ in log_size_of_group..Self::FftParams::TWO_ADICITY { + omega.square_in_place(); + } + } + Some(omega) + } +} + +/// The interface for a prime field. +pub trait PrimeField: + Field + + FftField::Params> + + FromStr + + From + + Into +{ + type Params: FpParameters; + type BigInt: BigInteger; + + /// Returns a prime field element from its underlying representation. + fn from_repr(repr: Self::BigInt) -> Option; + + /// Returns the underlying representation of the prime field element. + fn into_repr(&self) -> Self::BigInt; + + /// Reads bytes in big-endian, and converts them to a field element. + /// If the bytes are larger than the modulus, it will reduce them. + fn from_be_bytes_mod_order(bytes: &[u8]) -> Self { + let num_modulus_bytes = ((Self::Params::MODULUS_BITS + 7) / 8) as usize; + let num_bytes_to_directly_convert = min(num_modulus_bytes - 1, bytes.len()); + // Copy the leading big-endian bytes directly into a field element. + // The number of bytes directly converted must be less than the + // number of bytes needed to represent the modulus, as we must begin + // modular reduction once the data is of the same number of bytes as the modulus. + let mut bytes_to_directly_convert = Vec::new(); + bytes_to_directly_convert.extend(bytes[..num_bytes_to_directly_convert].iter().rev()); + // Guaranteed to not be None, as the input is less than the modulus size. + let mut res = Self::from_random_bytes(&bytes_to_directly_convert).unwrap(); + + // Update the result, byte by byte. + // We go through existing field arithmetic, which handles the reduction. + // TODO: If we need higher speeds, parse more bytes at once, or implement + // modular multiplication by a u64 + let window_size = Self::from(256u64); + for byte in bytes[num_bytes_to_directly_convert..].iter() { + res *= window_size; + res += Self::from(*byte); + } + res + } + + /// Reads bytes in little-endian, and converts them to a field element. + /// If the bytes are larger than the modulus, it will reduce them. + fn from_le_bytes_mod_order(bytes: &[u8]) -> Self { + let mut bytes_copy = bytes.to_vec(); + bytes_copy.reverse(); + Self::from_be_bytes_mod_order(&bytes_copy) + } + + /// Return the QNR^t, for t defined by + /// `2^s * t = MODULUS - 1`, and t coprime to 2. + fn qnr_to_t() -> Self { + Self::two_adic_root_of_unity() + } + + /// Returns the field size in bits. + fn size_in_bits() -> usize { + Self::Params::MODULUS_BITS as usize + } + + /// Returns the trace. + fn trace() -> Self::BigInt { + Self::Params::T + } + + /// Returns the trace minus one divided by two. + fn trace_minus_one_div_two() -> Self::BigInt { + Self::Params::T_MINUS_ONE_DIV_TWO + } + + /// Returns the modulus minus one divided by two. + fn modulus_minus_one_div_two() -> Self::BigInt { + Self::Params::MODULUS_MINUS_ONE_DIV_TWO + } +} + +/// The interface for a field that supports an efficient square-root operation. +pub trait SquareRootField: Field { + /// Returns a `LegendreSymbol`, which indicates whether this field element is + /// 1 : a quadratic residue + /// 0 : equal to 0 + /// -1 : a quadratic non-residue + fn legendre(&self) -> LegendreSymbol; + + /// Returns the square root of self, if it exists. + #[must_use] + fn sqrt(&self) -> Option; + + /// Sets `self` to be the square root of `self`, if it exists. + fn sqrt_in_place(&mut self) -> Option<&mut Self>; +} + +#[derive(Debug, PartialEq)] +pub enum LegendreSymbol { + Zero = 0, + QuadraticResidue = 1, + QuadraticNonResidue = -1, +} + +impl LegendreSymbol { + pub fn is_zero(&self) -> bool { + *self == LegendreSymbol::Zero + } + + pub fn is_qnr(&self) -> bool { + *self == LegendreSymbol::QuadraticNonResidue + } + + pub fn is_qr(&self) -> bool { + *self == LegendreSymbol::QuadraticResidue + } +} + +/// Iterates over a slice of `u64` in *big-endian* order. +#[derive(Debug)] +pub struct BitIteratorBE> { + s: Slice, + n: usize, +} + +impl> BitIteratorBE { + pub fn new(s: Slice) -> Self { + let n = s.as_ref().len() * 64; + BitIteratorBE { s, n } + } + + /// Construct an iterator that automatically skips any leading zeros. + /// That is, it skips all zeros before the most-significant one. + pub fn without_leading_zeros(s: Slice) -> impl Iterator { + Self::new(s).skip_while(|b| !b) + } +} + +impl> Iterator for BitIteratorBE { + type Item = bool; + + fn next(&mut self) -> Option { + if self.n == 0 { + None + } else { + self.n -= 1; + let part = self.n / 64; + let bit = self.n - (64 * part); + + Some(self.s.as_ref()[part] & (1 << bit) > 0) + } + } +} + +/// Iterates over a slice of `u64` in *little-endian* order. +#[derive(Debug)] +pub struct BitIteratorLE> { + s: Slice, + n: usize, + max_len: usize, +} + +impl> BitIteratorLE { + pub fn new(s: Slice) -> Self { + let n = 0; + let max_len = s.as_ref().len() * 64; + BitIteratorLE { s, n, max_len } + } + + /// Construct an iterator that automatically skips any trailing zeros. + /// That is, it skips all zeros after the most-significant one. + pub fn without_trailing_zeros(s: Slice) -> impl Iterator { + let mut first_trailing_zero = 0; + for (i, limb) in s.as_ref().iter().enumerate().rev() { + first_trailing_zero = i * 64 + (64 - limb.leading_zeros()) as usize; + if *limb != 0 { + break; + } + } + let mut iter = Self::new(s); + iter.max_len = first_trailing_zero; + iter + } +} + +impl> Iterator for BitIteratorLE { + type Item = bool; + + fn next(&mut self) -> Option { + if self.n == self.max_len { + None + } else { + let part = self.n / 64; + let bit = self.n - (64 * part); + self.n += 1; + + Some(self.s.as_ref()[part] & (1 << bit) > 0) + } + } +} + +use crate::biginteger::{ + BigInteger256, BigInteger320, BigInteger384, BigInteger448, BigInteger64, BigInteger768, + BigInteger832, +}; +use num_bigint::BigUint; + +impl_field_bigint_conv!(Fp64, BigInteger64, Fp64Parameters); +impl_field_bigint_conv!(Fp256, BigInteger256, Fp256Parameters); +impl_field_bigint_conv!(Fp320, BigInteger320, Fp320Parameters); +impl_field_bigint_conv!(Fp384, BigInteger384, Fp384Parameters); +impl_field_bigint_conv!(Fp448, BigInteger448, Fp448Parameters); +impl_field_bigint_conv!(Fp768, BigInteger768, Fp768Parameters); +impl_field_bigint_conv!(Fp832, BigInteger832, Fp832Parameters); + +// Given a vector of field elements {v_i}, compute the vector {v_i^(-1)} +pub fn batch_inversion(v: &mut [F]) { + batch_inversion_and_mul(v, &F::one()); +} + +#[cfg(not(feature = "parallel"))] +// Given a vector of field elements {v_i}, compute the vector {coeff * v_i^(-1)} +pub fn batch_inversion_and_mul(v: &mut [F], coeff: &F) { + serial_batch_inversion_and_mul(v, coeff); +} + +#[cfg(feature = "parallel")] +// Given a vector of field elements {v_i}, compute the vector {coeff * v_i^(-1)} +pub fn batch_inversion_and_mul(v: &mut [F], coeff: &F) { + // Divide the vector v evenly between all available cores + let min_elements_per_thread = 1; + let num_cpus_available = rayon::current_num_threads(); + let num_elems = v.len(); + let num_elem_per_thread = max(num_elems / num_cpus_available, min_elements_per_thread); + + // Batch invert in parallel, without copying the vector + v.par_chunks_mut(num_elem_per_thread).for_each(|mut chunk| { + serial_batch_inversion_and_mul(&mut chunk, coeff); + }); +} + +/// Given a vector of field elements {v_i}, compute the vector {coeff * v_i^(-1)} +/// This method is explicitly single core. +fn serial_batch_inversion_and_mul(v: &mut [F], coeff: &F) { + // Montgomery’s Trick and Fast Implementation of Masked AES + // Genelle, Prouff and Quisquater + // Section 3.2 + // but with an optimization to multiply every element in the returned vector by coeff + + // First pass: compute [a, ab, abc, ...] + let mut prod = Vec::with_capacity(v.len()); + let mut tmp = F::one(); + for f in v.iter().filter(|f| !f.is_zero()) { + tmp.mul_assign(f); + prod.push(tmp); + } + + // Invert `tmp`. + tmp = tmp.inverse().unwrap(); // Guaranteed to be nonzero. + + // Multiply product by coeff, so all inverses will be scaled by coeff + tmp *= coeff; + + // Second pass: iterate backwards to compute inverses + for (f, s) in v.iter_mut() + // Backwards + .rev() + // Ignore normalized elements + .filter(|f| !f.is_zero()) + // Backwards, skip last element, fill in one for last term. + .zip(prod.into_iter().rev().skip(1).chain(Some(F::one()))) + { + // tmp := tmp * f; f := tmp * s = 1/f + let new_tmp = tmp * *f; + *f = tmp * &s; + tmp = new_tmp; + } +} + +#[cfg(all(test, feature = "std"))] +mod std_tests { + use super::BitIteratorLE; + + #[test] + fn bit_iterator_le() { + let bits = BitIteratorLE::new(&[0, 1 << 10]).collect::>(); + dbg!(&bits); + assert!(bits[74]); + for (i, bit) in bits.into_iter().enumerate() { + if i != 74 { + assert!(!bit) + } else { + assert!(bit) + } + } + } +} + +#[cfg(test)] +mod no_std_tests { + use super::*; + use crate::test_field::{Fr, FrParameters}; + use ark_std::test_rng; + + #[test] + fn test_batch_inversion() { + let mut random_coeffs = Vec::::new(); + let vec_size = 1000; + + for _ in 0..=vec_size { + random_coeffs.push(Fr::rand(&mut test_rng())); + } + + let mut random_coeffs_inv = random_coeffs.clone(); + batch_inversion::(&mut random_coeffs_inv); + for i in 0..=vec_size { + assert_eq!(random_coeffs_inv[i] * random_coeffs[i], Fr::one()); + } + let rand_multiplier = Fr::rand(&mut test_rng()); + let mut random_coeffs_inv_shifted = random_coeffs.clone(); + batch_inversion_and_mul(&mut random_coeffs_inv_shifted, &rand_multiplier); + for i in 0..=vec_size { + assert_eq!( + random_coeffs_inv_shifted[i] * random_coeffs[i], + rand_multiplier + ); + } + } + + #[test] + fn test_from_into_biguint() { + let mut rng = ark_std::test_rng(); + + let modulus_bits = FrParameters::MODULUS_BITS; + let modulus: num_bigint::BigUint = FrParameters::MODULUS.into(); + + let mut rand_bytes = Vec::new(); + for _ in 0..(2 * modulus_bits / 8) { + rand_bytes.push(u8::rand(&mut rng)); + } + + let rand = BigUint::from_bytes_le(&rand_bytes); + + let a: BigUint = Fr::from(rand.clone()).into(); + let b = rand % modulus; + + assert_eq!(a, b); + } + + #[test] + fn test_from_be_bytes_mod_order() { + // Each test vector is a byte array, + // and its tested by parsing it with from_bytes_mod_order, and the num-bigint library. + // The bytes are currently generated from scripts/test_vectors.py. + // TODO: Eventually generate all the test vector bytes via computation with the modulus + use ark_std::rand::Rng; + use ark_std::string::ToString; + use num_bigint::BigUint; + + let ref_modulus = + BigUint::from_bytes_be(&::Params::MODULUS.to_bytes_be()); + + let mut test_vectors = vec![ + // 0 + vec![0u8], + // 1 + vec![1u8], + // 255 + vec![255u8], + // 256 + vec![1u8, 0u8], + // 65791 + vec![1u8, 0u8, 255u8], + // 204827637402836681560342736360101429053478720705186085244545541796635082752 + vec![ + 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, 8u8, 9u8, + 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, 255u8, + 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, + ], + // 204827637402836681560342736360101429053478720705186085244545541796635082753 + vec![ + 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, 8u8, 9u8, + 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, 255u8, + 255u8, 255u8, 255u8, 0u8, 0u8, 1u8, + ], + // 52435875175126190479447740508185965837690552500527637822603658699938581184512 + vec![ + 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, 8u8, 9u8, + 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, 255u8, + 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, 0u8, + ], + // 52435875175126190479447740508185965837690552500527637822603658699938581184513 + vec![ + 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, 8u8, 9u8, + 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, 255u8, + 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, 1u8, + ], + // 52435875175126190479447740508185965837690552500527637822603658699938581184514 + vec![ + 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, 8u8, 9u8, + 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, 255u8, + 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, 2u8, + ], + // 104871750350252380958895481016371931675381105001055275645207317399877162369026 + vec![ + 231u8, 219u8, 78u8, 166u8, 83u8, 58u8, 250u8, 144u8, 102u8, 115u8, 176u8, 16u8, + 19u8, 67u8, 176u8, 10u8, 167u8, 123u8, 72u8, 5u8, 255u8, 252u8, 183u8, 253u8, + 255u8, 255u8, 255u8, 254u8, 0u8, 0u8, 0u8, 2u8, + ], + // 13423584044832304762738621570095607254448781440135075282586536627184276783235328 + vec![ + 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, 8u8, 9u8, + 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, 255u8, + 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, 1u8, 0u8, + ], + // 115792089237316195423570985008687907853269984665640564039457584007913129639953 + vec![ + 1u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 17u8, + ], + // 168227964412442385903018725516873873690960537166168201862061242707851710824468 + vec![ + 1u8, 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, 8u8, + 9u8, 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, 255u8, + 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, 20u8, + ], + // 29695210719928072218913619902732290376274806626904512031923745164725699769008210 + vec![ + 1u8, 0u8, 115u8, 237u8, 167u8, 83u8, 41u8, 157u8, 125u8, 72u8, 51u8, 57u8, 216u8, + 8u8, 9u8, 161u8, 216u8, 5u8, 83u8, 189u8, 164u8, 2u8, 255u8, 254u8, 91u8, 254u8, + 255u8, 255u8, 255u8, 255u8, 0u8, 0u8, 0u8, 82u8, + ], + ]; + // Add random bytestrings to the test vector list + for i in 1..512 { + let mut rng = test_rng(); + let data: Vec = (0..i).map(|_| rng.gen()).collect(); + test_vectors.push(data); + } + for i in test_vectors { + let mut expected_biguint = BigUint::from_bytes_be(&i); + // Reduce expected_biguint using modpow API + expected_biguint = + expected_biguint.modpow(&BigUint::from_bytes_be(&[1u8]), &ref_modulus); + let expected_string = expected_biguint.to_string(); + let expected = Fr::from_str(&expected_string).unwrap(); + let actual = Fr::from_be_bytes_mod_order(&i); + assert_eq!(expected, actual, "failed on test {:?}", i); + } + } +} diff --git a/arkworks/algebra/ff/src/fields/models/cubic_extension.rs b/arkworks/algebra/ff/src/fields/models/cubic_extension.rs new file mode 100644 index 00000000..88db731b --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/cubic_extension.rs @@ -0,0 +1,669 @@ +use ark_serialize::{ + CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, + CanonicalSerializeWithFlags, EmptyFlags, Flags, SerializationError, +}; +use ark_std::{ + cmp::{Ord, Ordering, PartialOrd}, + fmt, + io::{Read, Result as IoResult, Write}, + marker::PhantomData, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + vec::Vec, +}; + +use num_traits::{One, Zero}; +use zeroize::Zeroize; + +use ark_std::rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use crate::{ + bytes::{FromBytes, ToBytes}, + fields::{Field, PrimeField}, + ToConstraintField, UniformRand, +}; + +pub trait CubicExtParameters: 'static + Send + Sync { + /// The prime field that this cubic extension is eventually an extension of. + type BasePrimeField: PrimeField; + /// The base field that this field is a cubic extension of. + type BaseField: Field; + /// The type of the coefficients for an efficient implemntation of the + /// Frobenius endomorphism. + type FrobCoeff: Field; + + /// The degree of the extension over the base prime field. + const DEGREE_OVER_BASE_PRIME_FIELD: usize; + + /// The cubic non-residue used to construct the extension. + const NONRESIDUE: Self::BaseField; + + /// Coefficients for the Frobenius automorphism. + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff]; + const FROBENIUS_COEFF_C2: &'static [Self::FrobCoeff]; + + /// A specializable method for multiplying an element of the base field by + /// the quadratic non-residue. This is used in multiplication and squaring. + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + Self::NONRESIDUE * fe + } + + /// A specializable method for multiplying an element of the base field by + /// the appropriate Frobenius coefficient. + fn mul_base_field_by_frob_coeff( + c1: &mut Self::BaseField, + c2: &mut Self::BaseField, + power: usize, + ); +} + +#[derive(Derivative)] +#[derivative( + Default(bound = "P: CubicExtParameters"), + Hash(bound = "P: CubicExtParameters"), + Clone(bound = "P: CubicExtParameters"), + Copy(bound = "P: CubicExtParameters"), + Debug(bound = "P: CubicExtParameters"), + PartialEq(bound = "P: CubicExtParameters"), + Eq(bound = "P: CubicExtParameters") +)] +pub struct CubicExtField { + pub c0: P::BaseField, + pub c1: P::BaseField, + pub c2: P::BaseField, + #[derivative(Debug = "ignore")] + #[doc(hidden)] + pub _parameters: PhantomData

, +} + +impl CubicExtField

{ + pub fn new(c0: P::BaseField, c1: P::BaseField, c2: P::BaseField) -> Self { + CubicExtField { + c0, + c1, + c2, + _parameters: PhantomData, + } + } + + pub fn mul_assign_by_base_field(&mut self, value: &P::BaseField) { + self.c0.mul_assign(value); + self.c1.mul_assign(value); + self.c2.mul_assign(value); + } + + /// Calculate the norm of an element with respect to the base field `P::BaseField`. + pub fn norm(&self) -> P::BaseField { + let mut self_to_p = *self; + self_to_p.frobenius_map(1); + let mut self_to_p2 = *self; + self_to_p2.frobenius_map(2); + self_to_p *= &(self_to_p2 * self); + assert!(self_to_p.c1.is_zero() && self_to_p.c2.is_zero()); + self_to_p.c0 + } +} + +impl Zero for CubicExtField

{ + fn zero() -> Self { + CubicExtField { + c0: P::BaseField::zero(), + c1: P::BaseField::zero(), + c2: P::BaseField::zero(), + _parameters: PhantomData, + } + } + + fn is_zero(&self) -> bool { + self.c0.is_zero() && self.c1.is_zero() && self.c2.is_zero() + } +} + +impl One for CubicExtField

{ + fn one() -> Self { + CubicExtField { + c0: P::BaseField::one(), + c1: P::BaseField::zero(), + c2: P::BaseField::zero(), + _parameters: PhantomData, + } + } + + fn is_one(&self) -> bool { + self.c0.is_one() && self.c1.is_zero() && self.c2.is_zero() + } +} + +impl Field for CubicExtField

{ + type BasePrimeField = P::BasePrimeField; + + fn extension_degree() -> u64 { + 3 * P::BaseField::extension_degree() + } + + fn from_base_prime_field_elems(elems: &[Self::BasePrimeField]) -> Option { + if elems.len() != (Self::extension_degree() as usize) { + return None; + } + let base_ext_deg = P::BaseField::extension_degree() as usize; + Some(Self::new( + P::BaseField::from_base_prime_field_elems(&elems[0..base_ext_deg]).unwrap(), + P::BaseField::from_base_prime_field_elems(&elems[base_ext_deg..2 * base_ext_deg]) + .unwrap(), + P::BaseField::from_base_prime_field_elems(&elems[2 * base_ext_deg..]).unwrap(), + )) + } + + fn double(&self) -> Self { + let mut result = *self; + result.double_in_place(); + result + } + + fn double_in_place(&mut self) -> &mut Self { + self.c0.double_in_place(); + self.c1.double_in_place(); + self.c2.double_in_place(); + self + } + + #[inline] + fn from_random_bytes_with_flags(bytes: &[u8]) -> Option<(Self, F)> { + let split_at = bytes.len() / 3; + if let Some(c0) = P::BaseField::from_random_bytes(&bytes[..split_at]) { + if let Some(c1) = P::BaseField::from_random_bytes(&bytes[split_at..2 * split_at]) { + if let Some((c2, flags)) = + P::BaseField::from_random_bytes_with_flags(&bytes[2 * split_at..]) + { + return Some((CubicExtField::new(c0, c1, c2), flags)); + } + } + } + None + } + + #[inline] + fn from_random_bytes(bytes: &[u8]) -> Option { + Self::from_random_bytes_with_flags::(bytes).map(|f| f.0) + } + + fn square(&self) -> Self { + let mut result = *self; + result.square_in_place(); + result + } + + fn square_in_place(&mut self) -> &mut Self { + // Devegili OhEig Scott Dahab --- Multiplication and Squaring on + // AbstractPairing-Friendly + // Fields.pdf; Section 4 (CH-SQR2) + let a = self.c0; + let b = self.c1; + let c = self.c2; + + let s0 = a.square(); + let ab = a * &b; + let s1 = ab.double(); + let s2 = (a - &b + &c).square(); + let bc = b * &c; + let s3 = bc.double(); + let s4 = c.square(); + + self.c0 = s0 + &P::mul_base_field_by_nonresidue(&s3); + self.c1 = s1 + &P::mul_base_field_by_nonresidue(&s4); + self.c2 = s1 + &s2 + &s3 - &s0 - &s4; + self + } + + fn inverse(&self) -> Option { + if self.is_zero() { + None + } else { + // From "High-Speed Software Implementation of the Optimal Ate AbstractPairing + // over + // Barreto-Naehrig Curves"; Algorithm 17 + let t0 = self.c0.square(); + let t1 = self.c1.square(); + let t2 = self.c2.square(); + let t3 = self.c0 * &self.c1; + let t4 = self.c0 * &self.c2; + let t5 = self.c1 * &self.c2; + let n5 = P::mul_base_field_by_nonresidue(&t5); + + let s0 = t0 - &n5; + let s1 = P::mul_base_field_by_nonresidue(&t2) - &t3; + let s2 = t1 - &t4; // typo in paper referenced above. should be "-" as per Scott, but is "*" + + let a1 = self.c2 * &s1; + let a2 = self.c1 * &s2; + let mut a3 = a1 + &a2; + a3 = P::mul_base_field_by_nonresidue(&a3); + let t6 = (self.c0 * &s0 + &a3).inverse().unwrap(); + + let c0 = t6 * &s0; + let c1 = t6 * &s1; + let c2 = t6 * &s2; + + Some(Self::new(c0, c1, c2)) + } + } + + fn inverse_in_place(&mut self) -> Option<&mut Self> { + if let Some(inverse) = self.inverse() { + *self = inverse; + Some(self) + } else { + None + } + } + + fn frobenius_map(&mut self, power: usize) { + self.c0.frobenius_map(power); + self.c1.frobenius_map(power); + self.c2.frobenius_map(power); + + P::mul_base_field_by_frob_coeff(&mut self.c1, &mut self.c2, power); + } +} + +/// `CubicExtField` elements are ordered lexicographically. +impl Ord for CubicExtField

{ + #[inline(always)] + fn cmp(&self, other: &Self) -> Ordering { + let c2_cmp = self.c2.cmp(&other.c2); + let c1_cmp = self.c1.cmp(&other.c1); + let c0_cmp = self.c0.cmp(&other.c0); + if c2_cmp == Ordering::Equal { + if c1_cmp == Ordering::Equal { + c0_cmp + } else { + c1_cmp + } + } else { + c2_cmp + } + } +} + +impl PartialOrd for CubicExtField

{ + #[inline(always)] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Zeroize for CubicExtField

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.c0.zeroize(); + self.c1.zeroize(); + self.c2.zeroize(); + } +} + +impl From for CubicExtField

{ + fn from(other: u128) -> Self { + let fe: P::BaseField = other.into(); + Self::new(fe, P::BaseField::zero(), P::BaseField::zero()) + } +} + +impl From for CubicExtField

{ + #[inline] + fn from(val: i128) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for CubicExtField

{ + fn from(other: u64) -> Self { + let fe: P::BaseField = other.into(); + Self::new(fe, P::BaseField::zero(), P::BaseField::zero()) + } +} + +impl From for CubicExtField

{ + #[inline] + fn from(val: i64) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for CubicExtField

{ + fn from(other: u32) -> Self { + let fe: P::BaseField = other.into(); + Self::new(fe, P::BaseField::zero(), P::BaseField::zero()) + } +} + +impl From for CubicExtField

{ + #[inline] + fn from(val: i32) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for CubicExtField

{ + fn from(other: u16) -> Self { + let fe: P::BaseField = other.into(); + Self::new(fe, P::BaseField::zero(), P::BaseField::zero()) + } +} + +impl From for CubicExtField

{ + #[inline] + fn from(val: i16) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for CubicExtField

{ + fn from(other: u8) -> Self { + let fe: P::BaseField = other.into(); + Self::new(fe, P::BaseField::zero(), P::BaseField::zero()) + } +} + +impl From for CubicExtField

{ + #[inline] + fn from(val: i8) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for CubicExtField

{ + fn from(other: bool) -> Self { + Self::new( + u8::from(other).into(), + P::BaseField::zero(), + P::BaseField::zero(), + ) + } +} + +impl ToBytes for CubicExtField

{ + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + self.c0.write(&mut writer)?; + self.c1.write(&mut writer)?; + self.c2.write(writer) + } +} + +impl FromBytes for CubicExtField

{ + #[inline] + fn read(mut reader: R) -> IoResult { + let c0 = P::BaseField::read(&mut reader)?; + let c1 = P::BaseField::read(&mut reader)?; + let c2 = P::BaseField::read(reader)?; + Ok(CubicExtField::new(c0, c1, c2)) + } +} + +impl Neg for CubicExtField

{ + type Output = Self; + #[inline] + fn neg(mut self) -> Self { + self.c0 = -self.c0; + self.c1 = -self.c1; + self.c2 = -self.c2; + self + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> CubicExtField

{ + CubicExtField::new( + UniformRand::rand(rng), + UniformRand::rand(rng), + UniformRand::rand(rng), + ) + } +} + +impl<'a, P: CubicExtParameters> Add<&'a CubicExtField

> for CubicExtField

{ + type Output = Self; + + #[inline] + fn add(mut self, other: &Self) -> Self { + self.add_assign(other); + self + } +} + +impl<'a, P: CubicExtParameters> Sub<&'a CubicExtField

> for CubicExtField

{ + type Output = Self; + + #[inline] + fn sub(mut self, other: &Self) -> Self { + self.sub_assign(other); + self + } +} + +impl<'a, P: CubicExtParameters> Mul<&'a CubicExtField

> for CubicExtField

{ + type Output = Self; + + #[inline] + fn mul(mut self, other: &Self) -> Self { + self.mul_assign(other); + self + } +} + +impl<'a, P: CubicExtParameters> Div<&'a CubicExtField

> for CubicExtField

{ + type Output = Self; + + #[inline] + fn div(mut self, other: &Self) -> Self { + self.mul_assign(&other.inverse().unwrap()); + self + } +} + +impl_additive_ops_from_ref!(CubicExtField, CubicExtParameters); +impl_multiplicative_ops_from_ref!(CubicExtField, CubicExtParameters); +impl<'a, P: CubicExtParameters> AddAssign<&'a Self> for CubicExtField

{ + #[inline] + fn add_assign(&mut self, other: &Self) { + self.c0.add_assign(&other.c0); + self.c1.add_assign(&other.c1); + self.c2.add_assign(&other.c2); + } +} + +impl<'a, P: CubicExtParameters> SubAssign<&'a Self> for CubicExtField

{ + #[inline] + fn sub_assign(&mut self, other: &Self) { + self.c0.sub_assign(&other.c0); + self.c1.sub_assign(&other.c1); + self.c2.sub_assign(&other.c2); + } +} + +impl<'a, P: CubicExtParameters> MulAssign<&'a Self> for CubicExtField

{ + #[inline] + #[allow(clippy::many_single_char_names)] + fn mul_assign(&mut self, other: &Self) { + // Devegili OhEig Scott Dahab --- Multiplication and Squaring on + // AbstractPairing-Friendly + // Fields.pdf; Section 4 (Karatsuba) + + let a = other.c0; + let b = other.c1; + let c = other.c2; + + let d = self.c0; + let e = self.c1; + let f = self.c2; + + let ad = d * &a; + let be = e * &b; + let cf = f * &c; + + let x = (e + &f) * &(b + &c) - &be - &cf; + let y = (d + &e) * &(a + &b) - &ad - &be; + let z = (d + &f) * &(a + &c) - &ad + &be - &cf; + + self.c0 = ad + &P::mul_base_field_by_nonresidue(&x); + self.c1 = y + &P::mul_base_field_by_nonresidue(&cf); + self.c2 = z; + } +} + +impl<'a, P: CubicExtParameters> DivAssign<&'a Self> for CubicExtField

{ + #[inline] + fn div_assign(&mut self, other: &Self) { + self.mul_assign(&other.inverse().unwrap()); + } +} + +impl fmt::Display for CubicExtField

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "CubicExtField({}, {}, {})", self.c0, self.c1, self.c2) + } +} + +impl CanonicalSerializeWithFlags for CubicExtField

{ + #[inline] + fn serialize_with_flags( + &self, + mut writer: W, + flags: F, + ) -> Result<(), SerializationError> { + self.c0.serialize(&mut writer)?; + self.c1.serialize(&mut writer)?; + self.c2.serialize_with_flags(&mut writer, flags)?; + Ok(()) + } + + #[inline] + fn serialized_size_with_flags(&self) -> usize { + self.c0.serialized_size() + + self.c1.serialized_size() + + self.c2.serialized_size_with_flags::() + } +} + +impl CanonicalSerialize for CubicExtField

{ + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.serialize_with_flags(writer, EmptyFlags) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.serialized_size_with_flags::() + } +} + +impl CanonicalDeserializeWithFlags for CubicExtField

{ + #[inline] + fn deserialize_with_flags( + mut reader: R, + ) -> Result<(Self, F), SerializationError> { + let c0 = CanonicalDeserialize::deserialize(&mut reader)?; + let c1 = CanonicalDeserialize::deserialize(&mut reader)?; + let (c2, flags) = CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; + Ok((CubicExtField::new(c0, c1, c2), flags)) + } +} + +impl CanonicalDeserialize for CubicExtField

{ + #[inline] + fn deserialize(mut reader: R) -> Result { + let c0: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let c1: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let c2: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + Ok(CubicExtField::new(c0, c1, c2)) + } +} + +impl ToConstraintField for CubicExtField

+where + P::BaseField: ToConstraintField, +{ + fn to_field_elements(&self) -> Option> { + let mut res = Vec::new(); + let mut c0_elems = self.c0.to_field_elements()?; + let mut c1_elems = self.c1.to_field_elements()?; + let mut c2_elems = self.c2.to_field_elements()?; + + res.append(&mut c0_elems); + res.append(&mut c1_elems); + res.append(&mut c2_elems); + + Some(res) + } +} + +#[cfg(test)] +mod cube_ext_tests { + use super::*; + use crate::test_field::{Fq, Fq2, Fq6}; + use ark_std::test_rng; + + #[test] + fn test_from_base_prime_field_elements() { + let ext_degree = Fq6::extension_degree() as usize; + // Test on slice lengths that aren't equal to the extension degree + let max_num_elems_to_test = 10; + for d in 0..max_num_elems_to_test { + if d == ext_degree { + continue; + } + let mut random_coeffs = Vec::::new(); + for _ in 0..d { + random_coeffs.push(Fq::rand(&mut test_rng())); + } + let res = Fq6::from_base_prime_field_elems(&random_coeffs); + assert_eq!(res, None); + } + // Test on slice lengths that are equal to the extension degree + // We test consistency against Fq2::new + let number_of_tests = 10; + for _ in 0..number_of_tests { + let mut random_coeffs = Vec::::new(); + for _ in 0..ext_degree { + random_coeffs.push(Fq::rand(&mut test_rng())); + } + let actual = Fq6::from_base_prime_field_elems(&random_coeffs).unwrap(); + + let expected_0 = Fq2::new(random_coeffs[0], random_coeffs[1]); + let expected_1 = Fq2::new(random_coeffs[2], random_coeffs[3]); + let expected_2 = Fq2::new(random_coeffs[3], random_coeffs[4]); + let expected = Fq6::new(expected_0, expected_1, expected_2); + assert_eq!(actual, expected); + } + } +} diff --git a/arkworks/algebra/ff/src/fields/models/fp12_2over3over2.rs b/arkworks/algebra/ff/src/fields/models/fp12_2over3over2.rs new file mode 100644 index 00000000..e7115fca --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/fp12_2over3over2.rs @@ -0,0 +1,261 @@ +use super::quadratic_extension::*; +use crate::{ + fields::{fp6_3over2::*, Field, Fp2, Fp2Parameters}, + One, +}; +use core::marker::PhantomData; +use core::ops::{AddAssign, SubAssign}; + +type Fp2Params

= <

::Fp6Params as Fp6Parameters>::Fp2Params; + +pub trait Fp12Parameters: 'static + Send + Sync + Copy { + type Fp6Params: Fp6Parameters; + + /// This *must* equal (0, 1, 0); + /// see [[DESD06, Section 6.1]](https://eprint.iacr.org/2006/471.pdf). + const NONRESIDUE: Fp6; + + /// Coefficients for the Frobenius automorphism. + const FROBENIUS_COEFF_FP12_C1: &'static [Fp2>]; + + /// Multiply by quadratic nonresidue v. + #[inline(always)] + fn mul_fp6_by_nonresidue(fe: &Fp6) -> Fp6 { + // see [[DESD06, Section 6.1]](https://eprint.iacr.org/2006/471.pdf). + let new_c0 = Self::Fp6Params::mul_fp2_by_nonresidue(&fe.c2); + let new_c1 = fe.c0; + let new_c2 = fe.c1; + Fp6::new(new_c0, new_c1, new_c2) + } +} + +pub struct Fp12ParamsWrapper(PhantomData

); + +impl QuadExtParameters for Fp12ParamsWrapper

{ + type BasePrimeField = as Fp2Parameters>::Fp; + type BaseField = Fp6; + type FrobCoeff = Fp2>; + + const DEGREE_OVER_BASE_PRIME_FIELD: usize = 12; + + const NONRESIDUE: Self::BaseField = P::NONRESIDUE; + + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP12_C1; + + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + P::mul_fp6_by_nonresidue(fe) + } + + fn mul_base_field_by_frob_coeff(fe: &mut Self::BaseField, power: usize) { + fe.mul_assign_by_fp2(Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]); + } + + fn cyclotomic_exp(fe: &Fp12

, exponent: impl AsRef<[u64]>) -> Fp12

{ + let mut res = QuadExtField::one(); + let mut fe_inverse = *fe; + fe_inverse.conjugate(); + + let mut found_nonzero = false; + let naf = crate::biginteger::arithmetic::find_wnaf(exponent.as_ref()); + + for &value in naf.iter().rev() { + if found_nonzero { + res.cyclotomic_square_in_place(); + } + + if value != 0 { + found_nonzero = true; + + if value > 0 { + res *= fe; + } else { + res *= &fe_inverse; + } + } + } + res + } +} + +pub type Fp12

= QuadExtField>; + +impl Fp12

{ + pub fn mul_by_fp( + &mut self, + element: &<::Fp2Params as Fp2Parameters>::Fp, + ) { + self.c0.mul_by_fp(&element); + self.c1.mul_by_fp(&element); + } + + pub fn mul_by_034( + &mut self, + c0: &Fp2>, + c3: &Fp2>, + c4: &Fp2>, + ) { + let a0 = self.c0.c0 * c0; + let a1 = self.c0.c1 * c0; + let a2 = self.c0.c2 * c0; + let a = Fp6::new(a0, a1, a2); + let mut b = self.c1; + b.mul_by_01(&c3, &c4); + + let c0 = *c0 + c3; + let c1 = c4; + let mut e = self.c0 + &self.c1; + e.mul_by_01(&c0, &c1); + self.c1 = e - &(a + &b); + self.c0 = a + &P::mul_fp6_by_nonresidue(&b); + } + + pub fn mul_by_014( + &mut self, + c0: &Fp2>, + c1: &Fp2>, + c4: &Fp2>, + ) { + let mut aa = self.c0; + aa.mul_by_01(c0, c1); + let mut bb = self.c1; + bb.mul_by_1(c4); + let mut o = *c1; + o.add_assign(c4); + self.c1.add_assign(&self.c0); + self.c1.mul_by_01(c0, &o); + self.c1.sub_assign(&aa); + self.c1.sub_assign(&bb); + self.c0 = bb; + self.c0 = P::mul_fp6_by_nonresidue(&self.c0); + self.c0.add_assign(&aa); + } + + pub fn cyclotomic_square_in_place(&mut self) { + // Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions + // - Robert Granger and Michael Scott + // + if characteristic_square_mod_6_is_one(Self::characteristic()) { + let fp2_nr = ::mul_fp2_by_nonresidue; + + let r0 = &self.c0.c0; + let r4 = &self.c0.c1; + let r3 = &self.c0.c2; + let r2 = &self.c1.c0; + let r1 = &self.c1.c1; + let r5 = &self.c1.c2; + + // t0 + t1*y = (z0 + z1*y)^2 = a^2 + let mut tmp = *r0 * r1; + let t0 = (*r0 + r1) * &(fp2_nr(&r1) + r0) - &tmp - &fp2_nr(&tmp); + let t1 = tmp.double(); + + // t2 + t3*y = (z2 + z3*y)^2 = b^2 + tmp = *r2 * r3; + let t2 = (*r2 + r3) * &(fp2_nr(&r3) + r2) - &tmp - &fp2_nr(&tmp); + let t3 = tmp.double(); + + // t4 + t5*y = (z4 + z5*y)^2 = c^2 + tmp = *r4 * r5; + let t4 = (*r4 + r5) * &(fp2_nr(&r5) + r4) - &tmp - &fp2_nr(&tmp); + let t5 = tmp.double(); + + let z0 = &mut self.c0.c0; + let z4 = &mut self.c0.c1; + let z3 = &mut self.c0.c2; + let z2 = &mut self.c1.c0; + let z1 = &mut self.c1.c1; + let z5 = &mut self.c1.c2; + + // for A + + // z0 = 3 * t0 - 2 * z0 + *z0 = t0 - &*z0; + z0.double_in_place(); + *z0 += &t0; + + // z1 = 3 * t1 + 2 * z1 + *z1 = t1 + &*z1; + z1.double_in_place(); + *z1 += &t1; + + // for B + + // z2 = 3 * (xi * t5) + 2 * z2 + tmp = fp2_nr(&t5); + *z2 += tmp; + z2.double_in_place(); + *z2 += &tmp; + + // z3 = 3 * t4 - 2 * z3 + *z3 = t4 - &*z3; + z3.double_in_place(); + *z3 += &t4; + + // for C + + // z4 = 3 * t2 - 2 * z4 + *z4 = t2 - &*z4; + z4.double_in_place(); + *z4 += &t2; + + // z5 = 3 * t3 + 2 * z5 + *z5 += t3; + z5.double_in_place(); + *z5 += &t3; + } else { + self.square_in_place(); + } + } + + pub fn cyclotomic_square(&self) -> Self { + let mut result = *self; + result.cyclotomic_square_in_place(); + result + } +} + +// TODO: make `const fn` in 1.46. +pub fn characteristic_square_mod_6_is_one(characteristic: &[u64]) -> bool { + // characteristic mod 6 = (a_0 + 2**64 * a_1 + ...) mod 6 + // = a_0 mod 6 + (2**64 * a_1 mod 6) + (...) mod 6 + // = a_0 mod 6 + (4 * a_1 mod 6) + (4 * ...) mod 6 + let mut char_mod_6 = 0u64; + for (i, limb) in characteristic.iter().enumerate() { + char_mod_6 += if i == 0 { + limb % 6 + } else { + (4 * (limb % 6)) % 6 + }; + } + (char_mod_6 * char_mod_6) % 6 == 1 +} + +#[cfg(test)] +mod test { + #[test] + fn test_characteristic_square_mod_6_is_one() { + use super::*; + assert!(!characteristic_square_mod_6_is_one(&[36])); + assert!(characteristic_square_mod_6_is_one(&[37])); + assert!(!characteristic_square_mod_6_is_one(&[38])); + assert!(!characteristic_square_mod_6_is_one(&[39])); + assert!(!characteristic_square_mod_6_is_one(&[40])); + assert!(characteristic_square_mod_6_is_one(&[41])); + + assert!(!characteristic_square_mod_6_is_one(&[36, 36])); + assert!(!characteristic_square_mod_6_is_one(&[36, 37])); + assert!(!characteristic_square_mod_6_is_one(&[36, 38])); + assert!(!characteristic_square_mod_6_is_one(&[36, 39])); + assert!(!characteristic_square_mod_6_is_one(&[36, 40])); + assert!(!characteristic_square_mod_6_is_one(&[36, 41])); + + assert!(!characteristic_square_mod_6_is_one(&[36, 41])); + assert!(!characteristic_square_mod_6_is_one(&[37, 41])); + assert!(!characteristic_square_mod_6_is_one(&[38, 41])); + assert!(characteristic_square_mod_6_is_one(&[39, 41])); + assert!(!characteristic_square_mod_6_is_one(&[40, 41])); + assert!(characteristic_square_mod_6_is_one(&[41, 41])); + assert!(characteristic_square_mod_6_is_one(&[1, u64::MAX])); + } +} diff --git a/arkworks/algebra/ff/src/fields/models/fp2.rs b/arkworks/algebra/ff/src/fields/models/fp2.rs new file mode 100644 index 00000000..e820c5c8 --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/fp2.rs @@ -0,0 +1,101 @@ +use super::quadratic_extension::*; +use crate::fields::PrimeField; +use core::marker::PhantomData; + +pub trait Fp2Parameters: 'static + Send + Sync { + type Fp: PrimeField; + + const NONRESIDUE: Self::Fp; + + const QUADRATIC_NONRESIDUE: (Self::Fp, Self::Fp); + + /// Coefficients for the Frobenius automorphism. + const FROBENIUS_COEFF_FP2_C1: &'static [Self::Fp]; + + /// Return `fe * Self::NONRESIDUE`. + #[inline(always)] + fn mul_fp_by_nonresidue(fe: &Self::Fp) -> Self::Fp { + Self::NONRESIDUE * fe + } + + /// A specializable method for computing `x + mul_base_field_by_nonresidue(y)` + /// This allows for optimizations when the non-residue is + /// canonically negative in the field. + #[inline(always)] + fn add_and_mul_fp_by_nonresidue(x: &Self::Fp, y: &Self::Fp) -> Self::Fp { + *x + Self::mul_fp_by_nonresidue(y) + } + + /// A specializable method for computing `x + y + mul_base_field_by_nonresidue(y)` + /// This allows for optimizations when the non-residue is not `-1`. + #[inline(always)] + fn add_and_mul_fp_by_nonresidue_plus_one(x: &Self::Fp, y: &Self::Fp) -> Self::Fp { + let mut tmp = *x; + tmp += y; + Self::add_and_mul_fp_by_nonresidue(&tmp, &y) + } + + /// A specializable method for computing `x - mul_base_field_by_nonresidue(y)` + /// This allows for optimizations when the non-residue is + /// canonically negative in the field. + #[inline(always)] + fn sub_and_mul_fp_by_nonresidue(x: &Self::Fp, y: &Self::Fp) -> Self::Fp { + *x - Self::mul_fp_by_nonresidue(y) + } +} + +pub struct Fp2ParamsWrapper(PhantomData

); + +impl QuadExtParameters for Fp2ParamsWrapper

{ + type BasePrimeField = P::Fp; + type BaseField = P::Fp; + type FrobCoeff = P::Fp; + + const DEGREE_OVER_BASE_PRIME_FIELD: usize = 2; + + const NONRESIDUE: Self::BaseField = P::NONRESIDUE; + + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP2_C1; + + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + P::mul_fp_by_nonresidue(fe) + } + + #[inline(always)] + fn add_and_mul_base_field_by_nonresidue( + x: &Self::BaseField, + y: &Self::BaseField, + ) -> Self::BaseField { + P::add_and_mul_fp_by_nonresidue(x, y) + } + + #[inline(always)] + fn add_and_mul_base_field_by_nonresidue_plus_one( + x: &Self::BaseField, + y: &Self::BaseField, + ) -> Self::BaseField { + P::add_and_mul_fp_by_nonresidue_plus_one(x, y) + } + + #[inline(always)] + fn sub_and_mul_base_field_by_nonresidue( + x: &Self::BaseField, + y: &Self::BaseField, + ) -> Self::BaseField { + P::sub_and_mul_fp_by_nonresidue(x, y) + } + + fn mul_base_field_by_frob_coeff(fe: &mut Self::BaseField, power: usize) { + *fe *= &Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} + +pub type Fp2

= QuadExtField>; + +impl Fp2

{ + pub fn mul_assign_by_fp(&mut self, other: &P::Fp) { + self.c0 *= other; + self.c1 *= other; + } +} diff --git a/arkworks/algebra/ff/src/fields/models/fp3.rs b/arkworks/algebra/ff/src/fields/models/fp3.rs new file mode 100644 index 00000000..809bfa11 --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/fp3.rs @@ -0,0 +1,92 @@ +use super::cubic_extension::*; +use crate::fields::*; +use core::marker::PhantomData; + +pub trait Fp3Parameters: 'static + Send + Sync { + type Fp: PrimeField + SquareRootField; + + const NONRESIDUE: Self::Fp; + + const FROBENIUS_COEFF_FP3_C1: &'static [Self::Fp]; + const FROBENIUS_COEFF_FP3_C2: &'static [Self::Fp]; + + /// p^3 - 1 = 2^s * t, where t is odd. + const TWO_ADICITY: u32; + const T_MINUS_ONE_DIV_TWO: &'static [u64]; + /// t-th power of a quadratic nonresidue in Fp3. + const QUADRATIC_NONRESIDUE_TO_T: (Self::Fp, Self::Fp, Self::Fp); + + #[inline(always)] + fn mul_fp_by_nonresidue(fe: &Self::Fp) -> Self::Fp { + Self::NONRESIDUE * fe + } +} + +pub struct Fp3ParamsWrapper(PhantomData

); + +impl CubicExtParameters for Fp3ParamsWrapper

{ + type BasePrimeField = P::Fp; + type BaseField = P::Fp; + type FrobCoeff = P::Fp; + + const DEGREE_OVER_BASE_PRIME_FIELD: usize = 3; + + const NONRESIDUE: Self::BaseField = P::NONRESIDUE; + + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP3_C1; + const FROBENIUS_COEFF_C2: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP3_C2; + + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + P::mul_fp_by_nonresidue(fe) + } + + fn mul_base_field_by_frob_coeff( + c1: &mut Self::BaseField, + c2: &mut Self::BaseField, + power: usize, + ) { + *c1 *= &Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + *c2 *= &Self::FROBENIUS_COEFF_C2[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} + +pub type Fp3

= CubicExtField>; + +impl Fp3

{ + pub fn mul_assign_by_fp(&mut self, value: &P::Fp) { + self.c0.mul_assign(value); + self.c1.mul_assign(value); + self.c2.mul_assign(value); + } + + /// Returns the value of QNR^T. + #[inline] + pub fn qnr_to_t() -> Self { + Self::new( + P::QUADRATIC_NONRESIDUE_TO_T.0, + P::QUADRATIC_NONRESIDUE_TO_T.1, + P::QUADRATIC_NONRESIDUE_TO_T.2, + ) + } +} + +impl SquareRootField for Fp3

{ + /// Returns the Legendre symbol. + fn legendre(&self) -> LegendreSymbol { + self.norm().legendre() + } + + /// Returns the square root of self, if it exists. + fn sqrt(&self) -> Option { + sqrt_impl!(Self, P, self) + } + + /// Sets `self` to be the square root of `self`, if it exists. + fn sqrt_in_place(&mut self) -> Option<&mut Self> { + (*self).sqrt().map(|sqrt| { + *self = sqrt; + self + }) + } +} diff --git a/arkworks/algebra/ff/src/fields/models/fp4.rs b/arkworks/algebra/ff/src/fields/models/fp4.rs new file mode 100644 index 00000000..29039102 --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/fp4.rs @@ -0,0 +1,62 @@ +use super::quadratic_extension::*; +use core::marker::PhantomData; + +use crate::fields::{Fp2, Fp2Parameters}; + +pub trait Fp4Parameters: 'static + Send + Sync { + type Fp2Params: Fp2Parameters; + + /// This *must* equal (0, 1); + /// see [[DESD06, Section 5.1]](https://eprint.iacr.org/2006/471.pdf). + const NONRESIDUE: Fp2; + + /// Coefficients for the Frobenius automorphism. + /// non_residue^((modulus^i-1)/4) for i=0,1,2,3 + const FROBENIUS_COEFF_FP4_C1: &'static [::Fp]; + + #[inline(always)] + fn mul_fp2_by_nonresidue(fe: &Fp2) -> Fp2 { + // see [[DESD06, Section 5.1]](https://eprint.iacr.org/2006/471.pdf). + Fp2::new( + ::NONRESIDUE * &fe.c1, + fe.c0, + ) + } +} + +pub struct Fp4ParamsWrapper(PhantomData

); + +impl QuadExtParameters for Fp4ParamsWrapper

{ + type BasePrimeField = ::Fp; + type BaseField = Fp2; + type FrobCoeff = Self::BasePrimeField; + + const DEGREE_OVER_BASE_PRIME_FIELD: usize = 4; + + const NONRESIDUE: Self::BaseField = P::NONRESIDUE; + + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP4_C1; + + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + P::mul_fp2_by_nonresidue(fe) + } + + fn mul_base_field_by_frob_coeff(fe: &mut Self::BaseField, power: usize) { + fe.mul_assign_by_fp(&Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]); + } +} + +pub type Fp4

= QuadExtField>; + +impl Fp4

{ + pub fn mul_by_fp(&mut self, element: &::Fp) { + self.c0.mul_assign_by_fp(element); + self.c1.mul_assign_by_fp(element); + } + + pub fn mul_by_fp2(&mut self, element: &Fp2) { + self.c0 *= element; + self.c1 *= element; + } +} diff --git a/arkworks/algebra/ff/src/fields/models/fp6_2over3.rs b/arkworks/algebra/ff/src/fields/models/fp6_2over3.rs new file mode 100644 index 00000000..82e3fe6a --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/fp6_2over3.rs @@ -0,0 +1,111 @@ +use super::quadratic_extension::*; +use core::marker::PhantomData; +use core::ops::MulAssign; + +use crate::fields::{Fp3, Fp3Parameters}; + +pub trait Fp6Parameters: 'static + Send + Sync { + type Fp3Params: Fp3Parameters; + + const NONRESIDUE: Fp3; + + /// Coefficients for the Frobenius automorphism. + const FROBENIUS_COEFF_FP6_C1: &'static [::Fp]; + + #[inline(always)] + fn mul_fp3_by_nonresidue(fe: &Fp3) -> Fp3 { + let mut res = *fe; + res.c0 = fe.c2; + res.c1 = fe.c0; + res.c2 = fe.c1; + res.c0 = ::mul_fp_by_nonresidue(&res.c0); + res + } +} + +pub struct Fp6ParamsWrapper(PhantomData

); + +impl QuadExtParameters for Fp6ParamsWrapper

{ + type BasePrimeField = ::Fp; + type BaseField = Fp3; + type FrobCoeff = Self::BasePrimeField; + + const DEGREE_OVER_BASE_PRIME_FIELD: usize = 6; + + const NONRESIDUE: Self::BaseField = P::NONRESIDUE; + + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP6_C1; + + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + P::mul_fp3_by_nonresidue(fe) + } + + fn mul_base_field_by_frob_coeff(fe: &mut Self::BaseField, power: usize) { + fe.mul_assign_by_fp(&Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]); + } +} + +pub type Fp6

= QuadExtField>; + +impl Fp6

{ + pub fn mul_by_034( + &mut self, + c0: &::Fp, + c3: &::Fp, + c4: &::Fp, + ) { + let z0 = self.c0.c0; + let z1 = self.c0.c1; + let z2 = self.c0.c2; + let z3 = self.c1.c0; + let z4 = self.c1.c1; + let z5 = self.c1.c2; + + let x0 = *c0; + let x3 = *c3; + let x4 = *c4; + + let mut tmp1 = x3; + tmp1.mul_assign(&::NONRESIDUE); + let mut tmp2 = x4; + tmp2.mul_assign(&::NONRESIDUE); + + self.c0.c0 = x0 * &z0 + &(tmp1 * &z5) + &(tmp2 * &z4); + self.c0.c1 = x0 * &z1 + &(x3 * &z3) + &(tmp2 * &z5); + self.c0.c2 = x0 * &z2 + &(x3 * &z4) + &(x4 * &z3); + self.c1.c0 = x0 * &z3 + &(x3 * &z0) + &(tmp2 * &z2); + self.c1.c1 = x0 * &z4 + &(x3 * &z1) + &(x4 * &z0); + self.c1.c2 = x0 * &z5 + &(x3 * &z2) + &(x4 * &z1); + } + + pub fn mul_by_014( + &mut self, + c0: &::Fp, + c1: &::Fp, + c4: &::Fp, + ) { + let z0 = self.c0.c0; + let z1 = self.c0.c1; + let z2 = self.c0.c2; + let z3 = self.c1.c0; + let z4 = self.c1.c1; + let z5 = self.c1.c2; + + let x0 = *c0; + let x1 = *c1; + let x4 = *c4; + + let mut tmp1 = x1; + tmp1.mul_assign(&::NONRESIDUE); + let mut tmp2 = x4; + tmp2.mul_assign(&::NONRESIDUE); + + self.c0.c0 = x0 * &z0 + &(tmp1 * &z2) + &(tmp2 * &z4); + self.c0.c1 = x0 * &z1 + &(x1 * &z0) + &(tmp2 * &z5); + self.c0.c2 = x0 * &z2 + &(x1 * &z1) + &(x4 * &z3); + self.c1.c0 = x0 * &z3 + &(tmp1 * &z5) + &(tmp2 * &z2); + self.c1.c1 = x0 * &z4 + &(x1 * &z3) + &(x4 * &z0); + self.c1.c2 = x0 * &z5 + &(x1 * &z4) + &(x4 * &z1); + } +} diff --git a/arkworks/algebra/ff/src/fields/models/fp6_3over2.rs b/arkworks/algebra/ff/src/fields/models/fp6_3over2.rs new file mode 100644 index 00000000..1fe91c7e --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/fp6_3over2.rs @@ -0,0 +1,140 @@ +use super::cubic_extension::*; +use crate::fields::*; +use core::marker::PhantomData; + +pub trait Fp6Parameters: 'static + Send + Sync + Copy { + type Fp2Params: Fp2Parameters; + + const NONRESIDUE: Fp2; + + /// Coefficients for the Frobenius automorphism. + const FROBENIUS_COEFF_FP6_C1: &'static [Fp2]; + const FROBENIUS_COEFF_FP6_C2: &'static [Fp2]; + + #[inline(always)] + fn mul_fp2_by_nonresidue(fe: &Fp2) -> Fp2 { + Self::NONRESIDUE * fe + } +} + +pub struct Fp6ParamsWrapper(PhantomData

); + +impl CubicExtParameters for Fp6ParamsWrapper

{ + type BasePrimeField = ::Fp; + type BaseField = Fp2; + type FrobCoeff = Fp2; + + const DEGREE_OVER_BASE_PRIME_FIELD: usize = 6; + + const NONRESIDUE: Self::BaseField = P::NONRESIDUE; + + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP6_C1; + const FROBENIUS_COEFF_C2: &'static [Self::FrobCoeff] = P::FROBENIUS_COEFF_FP6_C2; + + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + P::mul_fp2_by_nonresidue(fe) + } + + fn mul_base_field_by_frob_coeff( + c1: &mut Self::BaseField, + c2: &mut Self::BaseField, + power: usize, + ) { + *c1 *= &Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + *c2 *= &Self::FROBENIUS_COEFF_C2[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} + +pub type Fp6

= CubicExtField>; + +impl Fp6

{ + pub fn mul_assign_by_fp2(&mut self, other: Fp2) { + self.c0 *= &other; + self.c1 *= &other; + self.c2 *= &other; + } + + pub fn mul_by_fp(&mut self, element: &::Fp) { + self.c0.mul_assign_by_fp(&element); + self.c1.mul_assign_by_fp(&element); + self.c2.mul_assign_by_fp(&element); + } + + pub fn mul_by_fp2(&mut self, element: &Fp2) { + self.c0.mul_assign(element); + self.c1.mul_assign(element); + self.c2.mul_assign(element); + } + + pub fn mul_by_1(&mut self, c1: &Fp2) { + let mut b_b = self.c1; + b_b.mul_assign(c1); + + let mut t1 = *c1; + { + let mut tmp = self.c1; + tmp.add_assign(&self.c2); + + t1.mul_assign(&tmp); + t1.sub_assign(&b_b); + t1 = P::mul_fp2_by_nonresidue(&t1); + } + + let mut t2 = *c1; + { + let mut tmp = self.c0; + tmp.add_assign(&self.c1); + + t2.mul_assign(&tmp); + t2.sub_assign(&b_b); + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = b_b; + } + + pub fn mul_by_01(&mut self, c0: &Fp2, c1: &Fp2) { + let mut a_a = self.c0; + let mut b_b = self.c1; + a_a.mul_assign(c0); + b_b.mul_assign(c1); + + let mut t1 = *c1; + { + let mut tmp = self.c1; + tmp.add_assign(&self.c2); + + t1.mul_assign(&tmp); + t1.sub_assign(&b_b); + t1 = P::mul_fp2_by_nonresidue(&t1); + t1.add_assign(&a_a); + } + + let mut t3 = *c0; + { + let mut tmp = self.c0; + tmp.add_assign(&self.c2); + + t3.mul_assign(&tmp); + t3.sub_assign(&a_a); + t3.add_assign(&b_b); + } + + let mut t2 = *c0; + t2.add_assign(c1); + { + let mut tmp = self.c0; + tmp.add_assign(&self.c1); + + t2.mul_assign(&tmp); + t2.sub_assign(&a_a); + t2.sub_assign(&b_b); + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = t3; + } +} diff --git a/arkworks/algebra/ff/src/fields/models/mod.rs b/arkworks/algebra/ff/src/fields/models/mod.rs new file mode 100644 index 00000000..e6b48ea9 --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/mod.rs @@ -0,0 +1,92 @@ +use ark_std::{ + cmp::{Ord, Ordering, PartialOrd}, + fmt::{Display, Formatter, Result as FmtResult}, + io::{Read, Result as IoResult, Write}, + marker::PhantomData, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + str::FromStr, +}; +use num_traits::{One, Zero}; + +use crate::{ + biginteger::{ + arithmetic as fa, BigInteger as _BigInteger, BigInteger256, BigInteger320, BigInteger384, + BigInteger448, BigInteger64, BigInteger768, BigInteger832, + }, + bytes::{FromBytes, ToBytes}, + fields::{FftField, Field, FpParameters, LegendreSymbol, PrimeField, SquareRootField}, +}; +use ark_serialize::*; + +impl_Fp!(Fp64, Fp64Parameters, BigInteger64, BigInteger64, 1, "64"); +impl_Fp!( + Fp256, + Fp256Parameters, + BigInteger256, + BigInteger256, + 4, + "256" +); +impl_Fp!( + Fp320, + Fp320Parameters, + BigInteger320, + BigInteger320, + 5, + "320" +); +impl_Fp!( + Fp384, + Fp384Parameters, + BigInteger384, + BigInteger384, + 6, + "384" +); +impl_Fp!( + Fp448, + Fp448Parameters, + BigInteger448, + BigInteger448, + 7, + "448" +); +impl_Fp!( + Fp768, + Fp768Parameters, + BigInteger768, + BigInteger768, + 12, + "768" +); +impl_Fp!( + Fp832, + Fp832Parameters, + BigInteger832, + BigInteger832, + 13, + "832" +); + +pub mod fp2; +pub use self::fp2::*; + +pub mod fp3; +pub use self::fp3::*; + +pub mod fp4; +pub use self::fp4::*; + +pub mod fp6_2over3; + +pub mod fp6_3over2; +pub use self::fp6_3over2::*; + +pub mod fp12_2over3over2; +pub use self::fp12_2over3over2::*; + +pub mod quadratic_extension; +pub use quadratic_extension::*; + +pub mod cubic_extension; +pub use cubic_extension::*; diff --git a/arkworks/algebra/ff/src/fields/models/quadratic_extension.rs b/arkworks/algebra/ff/src/fields/models/quadratic_extension.rs new file mode 100644 index 00000000..a7e36dac --- /dev/null +++ b/arkworks/algebra/ff/src/fields/models/quadratic_extension.rs @@ -0,0 +1,753 @@ +use ark_serialize::{ + CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, + CanonicalSerializeWithFlags, EmptyFlags, Flags, SerializationError, +}; +use ark_std::{ + cmp::{Ord, Ordering, PartialOrd}, + fmt, + io::{Read, Result as IoResult, Write}, + marker::PhantomData, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + vec::Vec, +}; + +use num_traits::{One, Zero}; +use zeroize::Zeroize; + +use ark_std::rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use crate::{ + bytes::{FromBytes, ToBytes}, + fields::{Field, LegendreSymbol, PrimeField, SquareRootField}, + ToConstraintField, UniformRand, +}; + +/// Defines a Quadratic extension field from a quadratic non-residue. +pub trait QuadExtParameters: 'static + Send + Sync + Sized { + /// The prime field that this quadratic extension is eventually an extension of. + type BasePrimeField: PrimeField; + /// The base field that this field is a quadratic extension of. + type BaseField: Field; + /// The type of the coefficients for an efficient implemntation of the + /// Frobenius endomorphism. + type FrobCoeff: Field; + + /// The degree of the extension over the base prime field. + const DEGREE_OVER_BASE_PRIME_FIELD: usize; + + /// The quadratic non-residue used to construct the extension. + const NONRESIDUE: Self::BaseField; + + /// Coefficients for the Frobenius automorphism. + const FROBENIUS_COEFF_C1: &'static [Self::FrobCoeff]; + + /// A specializable method for multiplying an element of the base field by + /// the quadratic non-residue. This is used in Karatsuba multiplication + /// and in complex squaring. + #[inline(always)] + fn mul_base_field_by_nonresidue(fe: &Self::BaseField) -> Self::BaseField { + Self::NONRESIDUE * fe + } + + /// A specializable method for computing x + mul_base_field_by_nonresidue(y) + /// This allows for optimizations when the non-residue is + /// canonically negative in the field. + #[inline(always)] + fn add_and_mul_base_field_by_nonresidue( + x: &Self::BaseField, + y: &Self::BaseField, + ) -> Self::BaseField { + *x + Self::mul_base_field_by_nonresidue(y) + } + + /// A specializable method for computing x + mul_base_field_by_nonresidue(y) + y + /// This allows for optimizations when the non-residue is not -1. + #[inline(always)] + fn add_and_mul_base_field_by_nonresidue_plus_one( + x: &Self::BaseField, + y: &Self::BaseField, + ) -> Self::BaseField { + let mut tmp = *x; + tmp += y; + Self::add_and_mul_base_field_by_nonresidue(&tmp, &y) + } + + /// A specializable method for computing x - mul_base_field_by_nonresidue(y) + /// This allows for optimizations when the non-residue is + /// canonically negative in the field. + #[inline(always)] + fn sub_and_mul_base_field_by_nonresidue( + x: &Self::BaseField, + y: &Self::BaseField, + ) -> Self::BaseField { + *x - Self::mul_base_field_by_nonresidue(y) + } + + /// A specializable method for multiplying an element of the base field by + /// the appropriate Frobenius coefficient. + fn mul_base_field_by_frob_coeff(fe: &mut Self::BaseField, power: usize); + + /// A specializable method for exponentiating that is to be used + /// *only* when `fe` is known to be in the cyclotommic subgroup. + fn cyclotomic_exp(fe: &QuadExtField, exponent: impl AsRef<[u64]>) -> QuadExtField { + let mut res = QuadExtField::one(); + let mut self_inverse = fe.clone(); + self_inverse.conjugate(); + + let mut found_nonzero = false; + let naf = crate::biginteger::arithmetic::find_wnaf(exponent.as_ref()); + + for &value in naf.iter().rev() { + if found_nonzero { + res.square_in_place(); + } + + if value != 0 { + found_nonzero = true; + + if value > 0 { + res *= fe; + } else { + res *= &self_inverse; + } + } + } + res + } +} + +/// An element of a quadratic extension field F_p\[X\]/(X^2 - P::NONRESIDUE) is +/// represented as c0 + c1 * X, for c0, c1 in `P::BaseField`. +#[derive(Derivative)] +#[derivative( + Default(bound = "P: QuadExtParameters"), + Hash(bound = "P: QuadExtParameters"), + Clone(bound = "P: QuadExtParameters"), + Copy(bound = "P: QuadExtParameters"), + Debug(bound = "P: QuadExtParameters"), + PartialEq(bound = "P: QuadExtParameters"), + Eq(bound = "P: QuadExtParameters") +)] +pub struct QuadExtField { + pub c0: P::BaseField, + pub c1: P::BaseField, + #[derivative(Debug = "ignore")] + #[doc(hidden)] + pub _parameters: PhantomData

, +} + +impl QuadExtField

{ + pub fn new(c0: P::BaseField, c1: P::BaseField) -> Self { + QuadExtField { + c0, + c1, + _parameters: PhantomData, + } + } + + /// This is only to be used when the element is *known* to be in the cyclotomic subgroup. + pub fn conjugate(&mut self) { + self.c1 = -self.c1; + } + + /// This is only to be used when the element is *known* to be in the cyclotomic subgroup. + pub fn cyclotomic_exp(&self, exponent: impl AsRef<[u64]>) -> Self { + P::cyclotomic_exp(self, exponent) + } + + /// Norm of QuadExtField over `P::BaseField`:`Norm(a) = a * a.conjugate()`. + /// This simplifies to: `Norm(a) = a.x^2 - P::NON_RESIDUE * a.y^2`. + /// This is alternatively expressed as `Norm(a) = a^(1 + p)`. + pub fn norm(&self) -> P::BaseField { + let t0 = self.c0.square(); + // t1 = t0 - P::NON_RESIDUE * c1^2 + let mut t1 = self.c1.square(); + t1 = P::sub_and_mul_base_field_by_nonresidue(&t0, &t1); + t1 + } + + pub fn mul_assign_by_basefield(&mut self, element: &P::BaseField) { + self.c0.mul_assign(element); + self.c1.mul_assign(element); + } +} + +impl Zero for QuadExtField

{ + fn zero() -> Self { + QuadExtField::new(P::BaseField::zero(), P::BaseField::zero()) + } + + fn is_zero(&self) -> bool { + self.c0.is_zero() && self.c1.is_zero() + } +} + +impl One for QuadExtField

{ + fn one() -> Self { + QuadExtField::new(P::BaseField::one(), P::BaseField::zero()) + } + + fn is_one(&self) -> bool { + self.c0.is_one() && self.c1.is_zero() + } +} + +impl Field for QuadExtField

{ + type BasePrimeField = P::BasePrimeField; + + fn extension_degree() -> u64 { + 2 * P::BaseField::extension_degree() + } + + fn from_base_prime_field_elems(elems: &[Self::BasePrimeField]) -> Option { + if elems.len() != (Self::extension_degree() as usize) { + return None; + } + let base_ext_deg = P::BaseField::extension_degree() as usize; + Some(Self::new( + P::BaseField::from_base_prime_field_elems(&elems[0..base_ext_deg]).unwrap(), + P::BaseField::from_base_prime_field_elems(&elems[base_ext_deg..]).unwrap(), + )) + } + + fn double(&self) -> Self { + let mut result = *self; + result.double_in_place(); + result + } + + fn double_in_place(&mut self) -> &mut Self { + self.c0.double_in_place(); + self.c1.double_in_place(); + self + } + + fn square(&self) -> Self { + let mut result = *self; + result.square_in_place(); + result + } + + #[inline] + fn from_random_bytes_with_flags(bytes: &[u8]) -> Option<(Self, F)> { + let split_at = bytes.len() / 2; + if let Some(c0) = P::BaseField::from_random_bytes(&bytes[..split_at]) { + if let Some((c1, flags)) = + P::BaseField::from_random_bytes_with_flags(&bytes[split_at..]) + { + return Some((QuadExtField::new(c0, c1), flags)); + } + } + None + } + + #[inline] + fn from_random_bytes(bytes: &[u8]) -> Option { + Self::from_random_bytes_with_flags::(bytes).map(|f| f.0) + } + + fn square_in_place(&mut self) -> &mut Self { + // (c0, c1)^2 = (c0 + x*c1)^2 + // = c0^2 + 2 c0 c1 x + c1^2 x^2 + // = c0^2 + beta * c1^2 + 2 c0 * c1 * x + // = (c0^2 + beta * c1^2, 2 c0 * c1) + // Where beta is P::NONRESIDUE. + // When beta = -1, we can re-use intermediate additions to improve performance. + if P::NONRESIDUE == -P::BaseField::one() { + // When the non-residue is -1, we save 2 intermediate additions, + // and use one fewer intermediate variable + + let c0_copy = self.c0; + // v0 = c0 - c1 + let v0 = self.c0 - &self.c1; + // result.c1 = 2 c1 + self.c1.double_in_place(); + // result.c0 = (c0 - c1) + 2c1 = c0 + c1 + self.c0 = v0 + &self.c1; + // result.c0 *= (c0 - c1) + // result.c0 = (c0 - c1) * (c0 + c1) = c0^2 - c1^2 + self.c0 *= &v0; + // result.c1 *= c0 + // result.c1 = (2 * c1) * c0 + self.c1 *= &c0_copy; + + self + } else { + // v0 = c0 - c1 + let mut v0 = self.c0 - &self.c1; + // v3 = c0 - beta * c1 + let v3 = P::sub_and_mul_base_field_by_nonresidue(&self.c0, &self.c1); + // v2 = c0 * c1 + let v2 = self.c0 * &self.c1; + + // v0 = (v0 * v3) + // v0 = (c0 - c1) * (c0 - beta*c1) + // v0 = c0^2 - beta * c0 * c1 - c0 * c1 + beta * c1^2 + v0 *= &v3; + + // result.c1 = 2 * c0 * c1 + self.c1 = v2.double(); + // result.c0 = (v0) + ((beta + 1) * v2) + // result.c0 = (c0^2 - beta * c0 * c1 - c0 * c1 + beta * c1^2) + ((beta + 1) c0 * c1) + // result.c0 = (c0^2 - beta * c0 * c1 + beta * c1^2) + (beta * c0 * c1) + // result.c0 = c0^2 + beta * c1^2 + self.c0 = P::add_and_mul_base_field_by_nonresidue_plus_one(&v0, &v2); + + self + } + } + + fn inverse(&self) -> Option { + if self.is_zero() { + None + } else { + // Guide to Pairing-based Cryptography, Algorithm 5.19. + // v1 = c1.square() + let v1 = self.c1.square(); + // v0 = c0.square() - beta * v1 + let v0 = P::sub_and_mul_base_field_by_nonresidue(&self.c0.square(), &v1); + + v0.inverse().map(|v1| { + let c0 = self.c0 * &v1; + let c1 = -(self.c1 * &v1); + Self::new(c0, c1) + }) + } + } + + fn inverse_in_place(&mut self) -> Option<&mut Self> { + if let Some(inverse) = self.inverse() { + *self = inverse; + Some(self) + } else { + None + } + } + + fn frobenius_map(&mut self, power: usize) { + self.c0.frobenius_map(power); + self.c1.frobenius_map(power); + P::mul_base_field_by_frob_coeff(&mut self.c1, power); + } +} + +impl<'a, P: QuadExtParameters> SquareRootField for QuadExtField

+where + P::BaseField: SquareRootField, +{ + fn legendre(&self) -> LegendreSymbol { + // The LegendreSymbol in a field of order q for an element x can be + // computed as x^((q-1)/2). + // Since we are in a quadratic extension of a field F_p, + // we have that q = p^2. + // Notice then that (q-1)/2 = ((p-1)/2) * (1 + p). + // This implies that we can compute the symbol as (x^(1+p))^((p-1)/2). + // Recall that computing x^(1 + p) is equivalent to taking the norm of x, + // and it will output an element in the base field F_p. + // Then exponentiating by (p-1)/2 in the base field is equivalent to computing + // the legendre symbol in the base field. + self.norm().legendre() + } + + fn sqrt(&self) -> Option { + // Square root based on the complex method. See + // https://eprint.iacr.org/2012/685.pdf (page 15, algorithm 8) + if self.c1.is_zero() { + return self.c0.sqrt().map(|c0| Self::new(c0, P::BaseField::zero())); + } + // Try computing the square root + // Check at the end of the algorithm if it was a square root + let alpha = self.norm(); + // TODO: Precompute this + let two_inv = P::BaseField::one() + .double() + .inverse() + .expect("Two should always have an inverse"); + alpha.sqrt().and_then(|alpha| { + let mut delta = (alpha + &self.c0) * &two_inv; + if delta.legendre().is_qnr() { + delta -= α + } + let c0 = delta.sqrt().expect("Delta must have a square root"); + let c0_inv = c0.inverse().expect("c0 must have an inverse"); + let sqrt_cand = Self::new(c0, self.c1 * &two_inv * &c0_inv); + // Check if sqrt_cand is actually the square root + // if not, there exists no square root. + if sqrt_cand.square() == *self { + Some(sqrt_cand) + } else { + #[cfg(debug_assertions)] + { + use crate::fields::LegendreSymbol::*; + if self.legendre() != QuadraticNonResidue { + panic!( + "Input has a square root per its legendre symbol, but it was not found" + ) + } + } + None + } + }) + } + + fn sqrt_in_place(&mut self) -> Option<&mut Self> { + (*self).sqrt().map(|sqrt| { + *self = sqrt; + self + }) + } +} + +/// `QuadExtField` elements are ordered lexicographically. +impl Ord for QuadExtField

{ + #[inline(always)] + fn cmp(&self, other: &Self) -> Ordering { + match self.c1.cmp(&other.c1) { + Ordering::Greater => Ordering::Greater, + Ordering::Less => Ordering::Less, + Ordering::Equal => self.c0.cmp(&other.c0), + } + } +} + +impl PartialOrd for QuadExtField

{ + #[inline(always)] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Zeroize for QuadExtField

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.c0.zeroize(); + self.c1.zeroize(); + } +} + +impl From for QuadExtField

{ + fn from(other: u128) -> Self { + Self::new(other.into(), P::BaseField::zero()) + } +} + +impl From for QuadExtField

{ + #[inline] + fn from(val: i128) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for QuadExtField

{ + fn from(other: u64) -> Self { + Self::new(other.into(), P::BaseField::zero()) + } +} + +impl From for QuadExtField

{ + #[inline] + fn from(val: i64) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for QuadExtField

{ + fn from(other: u32) -> Self { + Self::new(other.into(), P::BaseField::zero()) + } +} + +impl From for QuadExtField

{ + #[inline] + fn from(val: i32) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for QuadExtField

{ + fn from(other: u16) -> Self { + Self::new(other.into(), P::BaseField::zero()) + } +} + +impl From for QuadExtField

{ + #[inline] + fn from(val: i16) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for QuadExtField

{ + fn from(other: u8) -> Self { + Self::new(other.into(), P::BaseField::zero()) + } +} + +impl From for QuadExtField

{ + #[inline] + fn from(val: i8) -> Self { + let abs = Self::from(val.unsigned_abs()); + if val.is_positive() { + abs + } else { + -abs + } + } +} + +impl From for QuadExtField

{ + fn from(other: bool) -> Self { + Self::new(u8::from(other).into(), P::BaseField::zero()) + } +} + +impl ToBytes for QuadExtField

{ + #[inline] + fn write(&self, mut writer: W) -> IoResult<()> { + self.c0.write(&mut writer)?; + self.c1.write(writer) + } +} + +impl FromBytes for QuadExtField

{ + #[inline] + fn read(mut reader: R) -> IoResult { + let c0 = P::BaseField::read(&mut reader)?; + let c1 = P::BaseField::read(reader)?; + Ok(QuadExtField::new(c0, c1)) + } +} + +impl Neg for QuadExtField

{ + type Output = Self; + #[inline] + #[must_use] + fn neg(mut self) -> Self { + self.c0 = -self.c0; + self.c1 = -self.c1; + self + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> QuadExtField

{ + QuadExtField::new(UniformRand::rand(rng), UniformRand::rand(rng)) + } +} + +impl<'a, P: QuadExtParameters> Add<&'a QuadExtField

> for QuadExtField

{ + type Output = Self; + + #[inline] + fn add(mut self, other: &Self) -> Self { + self.add_assign(other); + self + } +} + +impl<'a, P: QuadExtParameters> Sub<&'a QuadExtField

> for QuadExtField

{ + type Output = Self; + + #[inline] + fn sub(mut self, other: &Self) -> Self { + self.sub_assign(other); + self + } +} + +impl<'a, P: QuadExtParameters> Mul<&'a QuadExtField

> for QuadExtField

{ + type Output = Self; + + #[inline] + fn mul(mut self, other: &Self) -> Self { + self.mul_assign(other); + self + } +} + +impl<'a, P: QuadExtParameters> Div<&'a QuadExtField

> for QuadExtField

{ + type Output = Self; + + #[inline] + fn div(mut self, other: &Self) -> Self { + self.mul_assign(&other.inverse().unwrap()); + self + } +} + +impl<'a, P: QuadExtParameters> AddAssign<&'a Self> for QuadExtField

{ + #[inline] + fn add_assign(&mut self, other: &Self) { + self.c0 += &other.c0; + self.c1 += &other.c1; + } +} + +impl<'a, P: QuadExtParameters> SubAssign<&'a Self> for QuadExtField

{ + #[inline] + fn sub_assign(&mut self, other: &Self) { + self.c0 -= &other.c0; + self.c1 -= &other.c1; + } +} + +impl_additive_ops_from_ref!(QuadExtField, QuadExtParameters); +impl_multiplicative_ops_from_ref!(QuadExtField, QuadExtParameters); + +impl<'a, P: QuadExtParameters> MulAssign<&'a Self> for QuadExtField

{ + #[inline] + fn mul_assign(&mut self, other: &Self) { + // Karatsuba multiplication; + // Guide to Pairing-based cryprography, Algorithm 5.16. + let v0 = self.c0 * &other.c0; + let v1 = self.c1 * &other.c1; + + self.c1 += &self.c0; + self.c1 *= &(other.c0 + &other.c1); + self.c1 -= &v0; + self.c1 -= &v1; + self.c0 = P::add_and_mul_base_field_by_nonresidue(&v0, &v1); + } +} + +impl<'a, P: QuadExtParameters> DivAssign<&'a Self> for QuadExtField

{ + #[inline] + fn div_assign(&mut self, other: &Self) { + self.mul_assign(&other.inverse().unwrap()); + } +} + +impl fmt::Display for QuadExtField

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "QuadExtField({} + {} * u)", self.c0, self.c1) + } +} + +impl CanonicalSerializeWithFlags for QuadExtField

{ + #[inline] + fn serialize_with_flags( + &self, + mut writer: W, + flags: F, + ) -> Result<(), SerializationError> { + self.c0.serialize(&mut writer)?; + self.c1.serialize_with_flags(&mut writer, flags)?; + Ok(()) + } + + #[inline] + fn serialized_size_with_flags(&self) -> usize { + self.c0.serialized_size() + self.c1.serialized_size_with_flags::() + } +} + +impl CanonicalSerialize for QuadExtField

{ + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.serialize_with_flags(writer, EmptyFlags) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.serialized_size_with_flags::() + } +} + +impl CanonicalDeserializeWithFlags for QuadExtField

{ + #[inline] + fn deserialize_with_flags( + mut reader: R, + ) -> Result<(Self, F), SerializationError> { + let c0: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let (c1, flags): (P::BaseField, _) = + CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; + Ok((QuadExtField::new(c0, c1), flags)) + } +} + +impl CanonicalDeserialize for QuadExtField

{ + #[inline] + fn deserialize(mut reader: R) -> Result { + let c0: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let c1: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + Ok(QuadExtField::new(c0, c1)) + } +} + +impl ToConstraintField for QuadExtField

+where + P::BaseField: ToConstraintField, +{ + fn to_field_elements(&self) -> Option> { + let mut res = Vec::new(); + let mut c0_elems = self.c0.to_field_elements()?; + let mut c1_elems = self.c1.to_field_elements()?; + + res.append(&mut c0_elems); + res.append(&mut c1_elems); + + Some(res) + } +} + +#[cfg(test)] +mod quad_ext_tests { + use super::*; + use crate::test_field::{Fq, Fq2}; + use ark_std::test_rng; + + #[test] + fn test_from_base_prime_field_elements() { + let ext_degree = Fq2::extension_degree() as usize; + // Test on slice lengths that aren't equal to the extension degree + let max_num_elems_to_test = 4; + for d in 0..max_num_elems_to_test { + if d == ext_degree { + continue; + } + let mut random_coeffs = Vec::::new(); + for _ in 0..d { + random_coeffs.push(Fq::rand(&mut test_rng())); + } + let res = Fq2::from_base_prime_field_elems(&random_coeffs); + assert_eq!(res, None); + } + // Test on slice lengths that are equal to the extension degree + // We test consistency against Fq2::new + let number_of_tests = 10; + for _ in 0..number_of_tests { + let mut random_coeffs = Vec::::new(); + for _ in 0..ext_degree { + random_coeffs.push(Fq::rand(&mut test_rng())); + } + let actual = Fq2::from_base_prime_field_elems(&random_coeffs).unwrap(); + let expected = Fq2::new(random_coeffs[0], random_coeffs[1]); + assert_eq!(actual, expected); + } + } +} diff --git a/arkworks/algebra/ff/src/fields/utils.rs b/arkworks/algebra/ff/src/fields/utils.rs new file mode 100644 index 00000000..340eb482 --- /dev/null +++ b/arkworks/algebra/ff/src/fields/utils.rs @@ -0,0 +1,14 @@ +/// Calculates the k-adicity of n, i.e., the number of trailing 0s in a base-k +/// representation. +pub fn k_adicity(k: usize, mut n: usize) -> u32 { + let mut r = 0; + while n > 1 { + if n % k == 0 { + r += 1; + n /= k; + } else { + return r; + } + } + r +} diff --git a/arkworks/algebra/ff/src/lib.rs b/arkworks/algebra/ff/src/lib.rs new file mode 100644 index 00000000..b5c2ac0d --- /dev/null +++ b/arkworks/algebra/ff/src/lib.rs @@ -0,0 +1,51 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] +#![allow(clippy::op_ref, clippy::suspicious_op_assign_impl)] +#![cfg_attr(not(feature = "asm"), forbid(unsafe_code))] +#![cfg_attr(use_asm, feature(llvm_asm))] +#![cfg_attr(feature = "asm", deny(unsafe_code))] + +#[macro_use] +extern crate ark_std; + +#[macro_use] +extern crate derivative; + +#[cfg_attr(test, macro_use)] +pub mod bytes; +pub use self::bytes::*; + +#[macro_use] +pub mod biginteger; +pub use self::biginteger::*; + +#[macro_use] +pub mod fields; +pub use self::fields::*; + +// This is only used for testing. +#[cfg(test)] +mod test_field; + +pub use ark_std::UniformRand; + +mod to_field_vec; +pub use to_field_vec::ToConstraintField; + +pub use num_traits::{One, Zero}; + +pub use ark_std::vec; + +pub mod prelude { + pub use crate::biginteger::BigInteger; + + pub use crate::fields::{Field, FpParameters, PrimeField, SquareRootField}; + + pub use ark_std::UniformRand; + + pub use num_traits::{One, Zero}; +} + +fn error(msg: &'static str) -> ark_std::io::Error { + ark_std::io::Error::new(ark_std::io::ErrorKind::Other, msg) +} diff --git a/arkworks/algebra/ff/src/test_field/mod.rs b/arkworks/algebra/ff/src/test_field/mod.rs new file mode 100644 index 00000000..861ce190 --- /dev/null +++ b/arkworks/algebra/ff/src/test_field/mod.rs @@ -0,0 +1,385 @@ +#[allow(unused)] +pub(crate) use fq::*; +#[allow(unused)] +pub(crate) use fq2::*; +#[allow(unused)] +pub(crate) use fq6::*; +pub(crate) use fr::*; + +pub(crate) mod fr { + /// Copy of BLS12-381's Fr + use crate::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters, FpParameters}, + }; + + #[allow(unused)] + pub type Fr = Fp256; + + pub struct FrParameters; + + impl Fp256Parameters for FrParameters {} + impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 32; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0xb9b58d8c5f0e466a, + 0x5b1b4c801819d7ec, + 0xaf53ae352a31e64, + 0x5bf3adda19e9b27b, + ]); + } + impl FpParameters for FrParameters { + /// MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xffffffff00000001, + 0x53bda402fffe5bfe, + 0x3339d80809a1d805, + 0x73eda753299d7d48, + ]); + + const MODULUS_BITS: u32 = 255; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 1; + + /// R = 10920338887063814464675503992315976177888879664585288394250266608035967270910 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0x1fffffffe, + 0x5884b7fa00034802, + 0x998c4fefecbc4ff5, + 0x1824b159acc5056f, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xc999e990f3f29c6d, + 0x2b6cedcb87925c23, + 0x5d314967254398f, + 0x748d9d99f59ff11, + ]); + + const INV: u64 = 0xfffffffeffffffff; + + /// GENERATOR = 7 + /// Encoded in Montgomery form, so the value here is + /// 7 * R % q = 24006497034320510773280787438025867407531605151569380937148207556313189711857 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0xefffffff1, + 0x17e363d300189c0f, + 0xff9c57876f8457b0, + 0x351332208fc5a8c4, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x7fffffff80000000, + 0xa9ded2017fff2dff, + 0x199cec0404d0ec02, + 0x39f6d3a994cebea4, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + // For T coprime to 2 + + // T = (MODULUS - 1) / 2^S = + // 12208678567578594777604504606729831043093128246378069236549469339647 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0xfffe5bfeffffffff, + 0x9a1d80553bda402, + 0x299d7d483339d808, + 0x73eda753, + ]); + + // (T - 1) / 2 = + // 6104339283789297388802252303364915521546564123189034618274734669823 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x7fff2dff7fffffff, + 0x4d0ec02a9ded201, + 0x94cebea4199cec04, + 0x39f6d3a9, + ]); + } +} + +pub(crate) mod fq { + /// Copy of BLS12-377's Fq + use crate::{ + biginteger::BigInteger384 as BigInteger, + fields::{FftParameters, Fp384, Fp384Parameters, FpParameters}, + }; + + pub type Fq = Fp384; + + pub struct FqParameters; + + impl Fp384Parameters for FqParameters {} + impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 46u32; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 2022196864061697551u64, + 17419102863309525423u64, + 8564289679875062096u64, + 17152078065055548215u64, + 17966377291017729567u64, + 68610905582439508u64, + ]); + } + impl FpParameters for FqParameters { + /// MODULUS = 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0x8508c00000000001, + 0x170b5d4430000000, + 0x1ef3622fba094800, + 0x1a22d9f300f5138f, + 0xc63b05c06ca1493b, + 0x1ae3a4617c510ea, + ]); + + const MODULUS_BITS: u32 = 377; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 7; + + /// R = 85013442423176922659824578519796707547925331718418265885885478904210582549405549618995257669764901891699128663912 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 202099033278250856u64, + 5854854902718660529u64, + 11492539364873682930u64, + 8885205928937022213u64, + 5545221690922665192u64, + 39800542322357402u64, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xb786686c9400cd22, + 0x329fcaab00431b1, + 0x22a5f11162d6b46d, + 0xbfdf7d03827dc3ac, + 0x837e92f041790bf9, + 0x6dfccb1e914b88, + ]); + + const INV: u64 = 9586122913090633727u64; + + /// GENERATOR = -5 + /// Encoded in Montgomery form, so the value here is + /// (-5 * R) % q = 92261639910053574722182574790803529333160366917737991650341130812388023949653897454961487930322210790384999596794 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0xfc0b8000000002fa, + 0x97d39cf6e000018b, + 0x2072420fbfa05044, + 0xcbbcbd50d97c3802, + 0xbaf1ec35813f9eb, + 0x9974a2c0945ad2, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x4284600000000000, + 0xb85aea218000000, + 0x8f79b117dd04a400, + 0x8d116cf9807a89c7, + 0x631d82e03650a49d, + 0xd71d230be28875, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + // For T coprime to 2 + + // T = (MODULUS - 1) // 2^S = + // 3675842578061421676390135839012792950148785745837396071634149488243117337281387659330802195819009059 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0x7510c00000021423, + 0x88bee82520005c2d, + 0x67cc03d44e3c7bcd, + 0x1701b28524ec688b, + 0xe9185f1443ab18ec, + 0x6b8, + ]); + + // (T - 1) // 2 = + // 1837921289030710838195067919506396475074392872918698035817074744121558668640693829665401097909504529 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xba88600000010a11, + 0xc45f741290002e16, + 0xb3e601ea271e3de6, + 0xb80d94292763445, + 0x748c2f8a21d58c76, + 0x35c, + ]); + } + + #[allow(dead_code)] + pub const FQ_ONE: Fq = Fq::new(FqParameters::R); + #[allow(dead_code)] + pub const FQ_ZERO: Fq = Fq::new(BigInteger([0, 0, 0, 0, 0, 0])); + + #[test] + fn test_const_from_repr() { + use crate::fields::PrimeField; + let int = BigInteger([ + 9586122913090633730, + 4981570305181876224, + 14262076793150106624, + 7033126720376490667, + 699094806891394796, + 0, + ]); + let r2 = FqParameters::R2; + let modulus = FqParameters::MODULUS; + let inv = FqParameters::INV; + + assert_eq!( + Fq::from_repr(int).unwrap(), + Fq::const_from_repr(int, r2, modulus, inv) + ); + } +} + +pub(crate) mod fq2 { + // Copy of BLS12-377's Fq2 + use super::fq::*; + use crate::{field_new, fields::*}; + + pub type Fq2 = Fp2; + + pub struct Fq2Parameters; + + impl Fp2Parameters for Fq2Parameters { + type Fp = Fq; + + /// NONRESIDUE = -5 + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "-5"); + + /// QUADRATIC_NONRESIDUE = U + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE: (Fq, Fq) = (FQ_ZERO, FQ_ONE); + + /// Coefficients for the Frobenius automorphism. + #[rustfmt::skip] + const FROBENIUS_COEFF_FP2_C1: &'static [Fq] = &[ + // NONRESIDUE**(((q^0) - 1) / 2) + FQ_ONE, + // NONRESIDUE**(((q^1) - 1) / 2) + field_new!(Fq, "-1"), + ]; + + #[inline(always)] + fn mul_fp_by_nonresidue(fe: &Self::Fp) -> Self::Fp { + let original = fe; + let mut fe = -fe.double(); + fe.double_in_place(); + fe - original + } + } + + #[allow(dead_code)] + pub const FQ2_ZERO: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ZERO); + #[allow(dead_code)] + pub const FQ2_ONE: Fq2 = field_new!(Fq2, FQ_ONE, FQ_ZERO); +} + +pub(crate) mod fq6 { + // Copy of BLS12-377's Fq6 + use super::{fq::*, fq2::*}; + use crate::{field_new, fields::*}; + + #[allow(dead_code)] + pub type Fq6 = Fp6; + + #[derive(Clone, Copy)] + pub struct Fq6Parameters; + + impl Fp6Parameters for Fq6Parameters { + type Fp2Params = Fq2Parameters; + + /// NONRESIDUE = U + #[rustfmt::skip] + const NONRESIDUE: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ONE); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C1: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^(((q^0) - 1) / 3) + field_new!(Fq2, FQ_ONE, FQ_ZERO), + // Fp2::NONRESIDUE^(((q^1) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410946"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^2) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410945"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^3) - 1) / 3) + field_new!(Fq2, field_new!(Fq, "-1"), FQ_ZERO), + // Fp2::NONRESIDUE^(((q^4) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047231"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^5) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047232"), + FQ_ZERO, + ), + ]; + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C2: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^((2*(q^0) - 2) / 3) + field_new!(Fq2, FQ_ONE, FQ_ZERO), + // Fp2::NONRESIDUE^((2*(q^1) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410945"), + FQ_ZERO + ), + // Fp2::NONRESIDUE^((2*(q^2) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047231"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^((2*(q^3) - 2) / 3) + field_new!(Fq2, FQ_ONE, FQ_ZERO), + // Fp2::NONRESIDUE^((2*(q^4) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410945"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^((2*(q^5) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047231"), + FQ_ZERO, + ), + ]; + + #[inline(always)] + fn mul_fp2_by_nonresidue(fe: &Fq2) -> Fq2 { + // Karatsuba multiplication with constant other = u. + let c0 = Fq2Parameters::mul_fp_by_nonresidue(&fe.c1); + let c1 = fe.c0; + field_new!(Fq2, c0, c1) + } + } +} diff --git a/arkworks/algebra/ff/src/to_field_vec.rs b/arkworks/algebra/ff/src/to_field_vec.rs new file mode 100644 index 00000000..e321d22f --- /dev/null +++ b/arkworks/algebra/ff/src/to_field_vec.rs @@ -0,0 +1,72 @@ +use crate::{biginteger::BigInteger, Field, FpParameters, PrimeField}; +use ark_std::vec::Vec; + +/// Types that can be converted to a vector of `F` elements. Useful for +/// specifying how public inputs to a constraint system should be represented +/// inside that constraint system. +pub trait ToConstraintField { + fn to_field_elements(&self) -> Option>; +} + +impl ToConstraintField for bool { + fn to_field_elements(&self) -> Option> { + if *self { + Some(vec![F::one()]) + } else { + Some(vec![F::zero()]) + } + } +} + +impl ToConstraintField for F { + fn to_field_elements(&self) -> Option> { + Some(vec![*self]) + } +} + +// Impl for base field +impl ToConstraintField for [F] { + #[inline] + fn to_field_elements(&self) -> Option> { + Some(self.to_vec()) + } +} + +impl ToConstraintField for () { + #[inline] + fn to_field_elements(&self) -> Option> { + Some(Vec::new()) + } +} + +impl ToConstraintField for [u8] { + #[inline] + fn to_field_elements(&self) -> Option> { + use core::convert::TryFrom; + let max_size = usize::try_from(::Params::CAPACITY / 8).unwrap(); + let bigint_size = ::BigInt::NUM_LIMBS * 8; + let fes = self + .chunks(max_size) + .map(|chunk| { + let mut bigint = vec![0u8; bigint_size]; + bigint.iter_mut().zip(chunk).for_each(|(a, b)| *a = *b); + ConstraintF::read(bigint.as_slice()).ok() + }) + .collect::>>()?; + Some(fes) + } +} + +impl ToConstraintField for [u8; 32] { + #[inline] + fn to_field_elements(&self) -> Option> { + self.as_ref().to_field_elements() + } +} + +impl ToConstraintField for Vec { + #[inline] + fn to_field_elements(&self) -> Option> { + self.as_slice().to_field_elements() + } +} diff --git a/arkworks/algebra/poly-benches/Cargo.toml b/arkworks/algebra/poly-benches/Cargo.toml new file mode 100644 index 00000000..7883f0f3 --- /dev/null +++ b/arkworks/algebra/poly-benches/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "ark-poly-benches" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for benchmarking finite-field FFTs" +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" +publish = false + +[dependencies] +ark-ff = { version = "^0.3.0", path = "../ff" } +ark-poly = { version = "^0.3.0", path = "../poly" } +ark-std = { version = "^0.3.0", default-features = false } +ark-test-curves = { version = "^0.3.0", path = "../test-curves", default-features = false, features = [ "bls12_381_scalar_field", "mnt4_753_curve" ] } +criterion = "0.3.1" +rayon = { version = "1", optional = true } + +[features] +default = [] +parallel = ["ark-ff/parallel", "rayon", "ark-poly/parallel", "ark-std/parallel" ] + +[[bench]] +name = "fft" +path = "benches/fft.rs" +harness = false + +[[bench]] +name = "dense_uv_polynomial" +path = "benches/dense_uv_polynomial.rs" +harness = false + +[[bench]] +name = "dense_multilinear" +path = "benches/dense_multilinear.rs" +harness = false + +[[bench]] +name = "sparse_multilinear" +path = "benches/sparse_multilinear.rs" +harness = false + +[lib] +bench = false diff --git a/arkworks/algebra/poly-benches/LICENSE-APACHE b/arkworks/algebra/poly-benches/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/poly-benches/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/poly-benches/LICENSE-MIT b/arkworks/algebra/poly-benches/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/poly-benches/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/poly-benches/benches/dense_multilinear.rs b/arkworks/algebra/poly-benches/benches/dense_multilinear.rs new file mode 100644 index 00000000..5bb87334 --- /dev/null +++ b/arkworks/algebra/poly-benches/benches/dense_multilinear.rs @@ -0,0 +1,56 @@ +#[macro_use] +extern crate criterion; + +use ark_ff::Field; +use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; +use ark_std::ops::Range; +use ark_std::test_rng; +use ark_test_curves::bls12_381; +use criterion::{black_box, BenchmarkId, Criterion}; + +const NUM_VARIABLES_RANGE: Range = 10..21; + +fn arithmetic_op_bench(c: &mut Criterion) { + let mut rng = test_rng(); + + let mut group = c.benchmark_group("Add"); + for nv in NUM_VARIABLES_RANGE { + group.bench_with_input(BenchmarkId::new("Add", nv), &nv, |b, &nv| { + let poly1 = DenseMultilinearExtension::::rand(nv, &mut rng); + let poly2 = DenseMultilinearExtension::::rand(nv, &mut rng); + b.iter(|| black_box(&poly1 + &poly2)) + }); + } + group.finish(); + + let mut group = c.benchmark_group("Sub"); + for nv in NUM_VARIABLES_RANGE { + group.bench_with_input(BenchmarkId::new("Sub", nv), &nv, |b, &nv| { + let poly1 = DenseMultilinearExtension::::rand(nv, &mut rng); + let poly2 = DenseMultilinearExtension::::rand(nv, &mut rng); + b.iter(|| black_box(&poly1 - &poly2)) + }); + } + group.finish(); +} + +fn evaluation_op_bench(c: &mut Criterion) { + let mut rng = test_rng(); + let mut group = c.benchmark_group("Evaluate"); + for nv in NUM_VARIABLES_RANGE { + group.bench_with_input(BenchmarkId::new("evaluate", nv), &nv, |b, &nv| { + let poly = DenseMultilinearExtension::::rand(nv, &mut rng); + let point: Vec<_> = (0..nv).map(|_| F::rand(&mut rng)).collect(); + b.iter(|| black_box(poly.evaluate(&point).unwrap())) + }); + } + group.finish(); +} + +fn bench_bls_381(c: &mut Criterion) { + arithmetic_op_bench::(c); + evaluation_op_bench::(c); +} + +criterion_group!(benches, bench_bls_381); +criterion_main!(benches); diff --git a/arkworks/algebra/poly-benches/benches/dense_uv_polynomial.rs b/arkworks/algebra/poly-benches/benches/dense_uv_polynomial.rs new file mode 100644 index 00000000..6d123ceb --- /dev/null +++ b/arkworks/algebra/poly-benches/benches/dense_uv_polynomial.rs @@ -0,0 +1,116 @@ +extern crate criterion; + +use ark_ff::Field; +use ark_poly::{ + polynomial::univariate::DensePolynomial, polynomial::univariate::SparsePolynomial, Polynomial, + UVPolynomial, +}; +use ark_poly_benches::size_range; +use ark_std::rand::Rng; +use ark_test_curves::bls12_381::Fr as bls12_381_fr; +use criterion::BenchmarkId; +use criterion::{criterion_group, criterion_main, Bencher, Criterion}; + +const BENCHMARK_MIN_DEGREE: usize = 1 << 15; +const BENCHMARK_MAX_DEGREE: usize = 1 << 17; +const BENCHMARK_LOG_INTERVAL_DEGREE: usize = 1; + +const ENABLE_ADD_BENCH: bool = true; +const ENABLE_ADD_ASSIGN_BENCH: bool = true; +const ENABLE_EVALUATE_BENCH: bool = true; +const ENABLE_SPARSE_EVALUATE_BENCH: bool = true; + +// returns vec![2^{min}, 2^{min + interval}, ..., 2^{max}], where: +// interval = BENCHMARK_LOG_INTERVAL_DEGREE +// min = ceil(log_2(BENCHMARK_MIN_DEGREE)) +// max = ceil(log_2(BENCHMARK_MAX_DEGREE)) +fn default_size_range() -> Vec { + size_range( + BENCHMARK_LOG_INTERVAL_DEGREE, + BENCHMARK_MIN_DEGREE, + BENCHMARK_MAX_DEGREE, + ) +} + +fn setup_bench(c: &mut Criterion, name: &str, bench_fn: fn(&mut Bencher, &usize)) { + let mut group = c.benchmark_group(name); + for degree in default_size_range().iter() { + group.bench_with_input(BenchmarkId::from_parameter(degree), degree, bench_fn); + } + group.finish(); +} + +fn bench_sparse_poly_evaluate(b: &mut Bencher, non_zero_entries: &usize) { + const MAX_DEGREE: usize = 1 << 15; + // Per benchmark setup + let mut rng = &mut ark_std::test_rng(); + let mut inner: Vec<(usize, F)> = Vec::with_capacity(*non_zero_entries); + (0..*non_zero_entries) + .for_each(|_| inner.push((rng.gen_range(0..MAX_DEGREE), F::rand(&mut rng)))); + let poly = SparsePolynomial::::from_coefficients_vec(inner); + b.iter(|| { + // Per benchmark iteration + let pt = F::rand(&mut rng); + poly.evaluate(&pt); + }); +} + +fn bench_poly_evaluate(b: &mut Bencher, degree: &usize) { + // Per benchmark setup + let mut rng = &mut ark_std::test_rng(); + let poly = DensePolynomial::::rand(*degree, &mut rng); + b.iter(|| { + // Per benchmark iteration + let pt = F::rand(&mut rng); + poly.evaluate(&pt); + }); +} + +fn bench_poly_add(b: &mut Bencher, degree: &usize) { + // Per benchmark setup + let mut rng = &mut ark_std::test_rng(); + let poly_one = DensePolynomial::::rand(*degree, &mut rng); + let poly_two = DensePolynomial::::rand(*degree, &mut rng); + b.iter(|| { + // Per benchmark iteration + let _poly_three = &poly_one + &poly_two; + }); +} + +fn bench_poly_add_assign(b: &mut Bencher, degree: &usize) { + // Per benchmark setup + let mut rng = &mut ark_std::test_rng(); + let mut poly_one = DensePolynomial::::rand(*degree, &mut rng); + let poly_two = DensePolynomial::::rand(*degree, &mut rng); + b.iter(|| { + // Per benchmark iteration + poly_one += &poly_two; + }); +} + +fn poly_benches(c: &mut Criterion, name: &'static str) { + if ENABLE_ADD_BENCH { + let cur_name = format!("{:?} - add_polynomial", name.clone()); + setup_bench::(c, &cur_name, bench_poly_add::); + } + if ENABLE_ADD_ASSIGN_BENCH { + let cur_name = format!("{:?} - add_assign_polynomial", name.clone()); + setup_bench::(c, &cur_name, bench_poly_add_assign::); + } + if ENABLE_EVALUATE_BENCH { + let cur_name = format!("{:?} - evaluate_polynomial", name.clone()); + setup_bench::(c, &cur_name, bench_poly_evaluate::); + } + if ENABLE_SPARSE_EVALUATE_BENCH { + let cur_name = format!("{:?} - evaluate_sparse_polynomial", name.clone()); + setup_bench::(c, &cur_name, bench_sparse_poly_evaluate::); + } +} + +fn bench_bls12_381(c: &mut Criterion) { + let name = "bls12_381"; + poly_benches::(c, name); +} + +criterion_group!(benches, bench_bls12_381); +criterion_main!(benches); diff --git a/arkworks/algebra/poly-benches/benches/fft.rs b/arkworks/algebra/poly-benches/benches/fft.rs new file mode 100644 index 00000000..61fe9a40 --- /dev/null +++ b/arkworks/algebra/poly-benches/benches/fft.rs @@ -0,0 +1,138 @@ +extern crate criterion; + +use ark_ff::FftField; +use ark_poly::{polynomial::univariate::DensePolynomial, polynomial::UVPolynomial}; +use ark_poly::{EvaluationDomain, MixedRadixEvaluationDomain, Radix2EvaluationDomain}; +use ark_poly_benches::size_range; +use ark_test_curves::bls12_381::Fr as bls12_381_fr; +use ark_test_curves::mnt4_753::Fq as mnt6_753_fr; +use criterion::BenchmarkId; +use criterion::{criterion_group, criterion_main, Bencher, Criterion}; + +// degree bounds to benchmark on +// e.g. degree bound of 2^{15}, means we do an FFT for a degree (2^{15} - 1) polynomial +const BENCHMARK_MIN_DEGREE: usize = 1 << 15; +const BENCHMARK_MAX_DEGREE_BLS12_381: usize = 1 << 22; +const BENCHMARK_MAX_DEGREE_MNT6_753: usize = 1 << 17; +const BENCHMARK_LOG_INTERVAL_DEGREE: usize = 1; + +const ENABLE_RADIX2_BENCHES: bool = true; +const ENABLE_MIXED_RADIX_BENCHES: bool = true; + +// returns vec![2^{min}, 2^{min + interval}, ..., 2^{max}], where: +// interval = BENCHMARK_LOG_INTERVAL_DEGREE +// min = ceil(log_2(BENCHMARK_MIN_DEGREE)) +// max = ceil(log_2(BENCHMARK_MAX_DEGREE)) +fn default_size_range_bls12_381() -> Vec { + size_range( + BENCHMARK_LOG_INTERVAL_DEGREE, + BENCHMARK_MIN_DEGREE, + BENCHMARK_MAX_DEGREE_BLS12_381, + ) +} + +fn default_size_range_mnt6_753() -> Vec { + size_range( + BENCHMARK_LOG_INTERVAL_DEGREE, + BENCHMARK_MIN_DEGREE, + BENCHMARK_MAX_DEGREE_MNT6_753, + ) +} + +fn setup_bench( + c: &mut Criterion, + name: &str, + bench_fn: fn(&mut Bencher, &usize), + size_range: &[usize], +) { + let mut group = c.benchmark_group(name); + for degree in size_range.iter() { + group.bench_with_input(BenchmarkId::from_parameter(degree), degree, bench_fn); + } + group.finish(); +} + +fn fft_common_setup>(degree: usize) -> (D, Vec) { + let mut rng = &mut ark_std::test_rng(); + let domain = D::new(degree).unwrap(); + let a = DensePolynomial::::rand(degree - 1, &mut rng) + .coeffs() + .to_vec(); + (domain, a) +} + +fn bench_fft_in_place>(b: &mut Bencher, degree: &usize) { + // Per benchmark setup + let (domain, mut a) = fft_common_setup::(*degree); + b.iter(|| { + // Per benchmark iteration + domain.fft_in_place(&mut a); + }); +} + +fn bench_ifft_in_place>(b: &mut Bencher, degree: &usize) { + // Per benchmark setup + let (domain, mut a) = fft_common_setup::(*degree); + b.iter(|| { + // Per benchmark iteration + domain.ifft_in_place(&mut a); + }); +} + +fn bench_coset_fft_in_place>(b: &mut Bencher, degree: &usize) { + // Per benchmark setup + let (domain, mut a) = fft_common_setup::(*degree); + b.iter(|| { + // Per benchmark iteration + domain.coset_fft_in_place(&mut a); + }); +} + +fn bench_coset_ifft_in_place>(b: &mut Bencher, degree: &usize) { + // Per benchmark setup + let (domain, mut a) = fft_common_setup::(*degree); + b.iter(|| { + // Per benchmark iteration + domain.coset_ifft_in_place(&mut a); + }); +} + +fn fft_benches>( + c: &mut Criterion, + name: &'static str, + size_range: &[usize], +) { + let cur_name = format!("{:?} - subgroup_fft_in_place", name.clone()); + setup_bench(c, &cur_name, bench_fft_in_place::, size_range); + let cur_name = format!("{:?} - subgroup_ifft_in_place", name.clone()); + setup_bench(c, &cur_name, bench_ifft_in_place::, size_range); + let cur_name = format!("{:?} - coset_fft_in_place", name.clone()); + setup_bench(c, &cur_name, bench_coset_fft_in_place::, size_range); + let cur_name = format!("{:?} - coset_ifft_in_place", name.clone()); + setup_bench(c, &cur_name, bench_coset_ifft_in_place::, size_range); +} + +fn bench_bls12_381(c: &mut Criterion) { + let name = "bls12_381 - radix2"; + if ENABLE_RADIX2_BENCHES { + fft_benches::>( + c, + name, + &default_size_range_bls12_381(), + ); + } +} + +fn bench_mnt6_753(c: &mut Criterion) { + let name = "mnt6_753 - mixed radix"; + if ENABLE_MIXED_RADIX_BENCHES { + fft_benches::>( + c, + name, + &default_size_range_mnt6_753(), + ); + } +} + +criterion_group!(benches, bench_bls12_381, bench_mnt6_753); +criterion_main!(benches); diff --git a/arkworks/algebra/poly-benches/benches/sparse_multilinear.rs b/arkworks/algebra/poly-benches/benches/sparse_multilinear.rs new file mode 100644 index 00000000..b5ff9e17 --- /dev/null +++ b/arkworks/algebra/poly-benches/benches/sparse_multilinear.rs @@ -0,0 +1,91 @@ +#[macro_use] +extern crate criterion; + +use ark_ff::Field; +use ark_poly::{MultilinearExtension, SparseMultilinearExtension}; +use ark_std::ops::Range; +use ark_std::test_rng; +use ark_test_curves::bls12_381; +use criterion::{black_box, BenchmarkId, Criterion}; + +const NUM_VARIABLES_RANGE: Range = 12..23; + +fn arithmetic_op_bench(c: &mut Criterion) { + let mut rng = test_rng(); + + let mut group = c.benchmark_group("Add"); + for nv in NUM_VARIABLES_RANGE { + let num_nonzero_entries = 1 << (nv / 2); + group.bench_with_input( + BenchmarkId::new("add", num_nonzero_entries), + &num_nonzero_entries, + |b, &num_nonzero_entries| { + let poly1 = SparseMultilinearExtension::::rand_with_config( + nv, + num_nonzero_entries, + &mut rng, + ); + let poly2 = SparseMultilinearExtension::::rand_with_config( + nv, + num_nonzero_entries, + &mut rng, + ); + b.iter(|| black_box(&poly1 + &poly2)) + }, + ); + } + group.finish(); + + let mut group = c.benchmark_group("Sub"); + for nv in NUM_VARIABLES_RANGE { + let num_nonzero_entries = 1 << (nv / 2); + group.bench_with_input( + BenchmarkId::new("sub", num_nonzero_entries), + &num_nonzero_entries, + |b, &num_nonzero_entries| { + let poly1 = SparseMultilinearExtension::::rand_with_config( + nv, + num_nonzero_entries, + &mut rng, + ); + let poly2 = SparseMultilinearExtension::::rand_with_config( + nv, + num_nonzero_entries, + &mut rng, + ); + b.iter(|| black_box(&poly1 - &poly2)) + }, + ); + } + group.finish(); +} + +fn evaluation_op_bench(c: &mut Criterion) { + let mut rng = test_rng(); + let mut group = c.benchmark_group("Evaluate"); + for nv in NUM_VARIABLES_RANGE { + let num_nonzero_entries = 1 << (nv / 2); + group.bench_with_input( + BenchmarkId::new("evaluate", num_nonzero_entries), + &num_nonzero_entries, + |b, &num_nonzero_entries| { + let poly = SparseMultilinearExtension::::rand_with_config( + nv, + num_nonzero_entries, + &mut rng, + ); + let point: Vec<_> = (0..nv).map(|_| F::rand(&mut rng)).collect(); + b.iter(|| black_box(poly.evaluate(&point).unwrap())) + }, + ); + } + group.finish(); +} + +fn bench_bls381(c: &mut Criterion) { + arithmetic_op_bench::(c); + evaluation_op_bench::(c); +} + +criterion_group!(benches, bench_bls381); +criterion_main!(benches); diff --git a/arkworks/algebra/poly-benches/src/lib.rs b/arkworks/algebra/poly-benches/src/lib.rs new file mode 100644 index 00000000..98f4234f --- /dev/null +++ b/arkworks/algebra/poly-benches/src/lib.rs @@ -0,0 +1,16 @@ +use ark_std::cmp::min; + +// Utility function for getting a vector of degrees to benchmark on. +// returns vec![2^{min}, 2^{min + interval}, ..., 2^{max}], where: +// interval = log_interval +// min = ceil(log_2(min_degree)) +// max = ceil(log_2(max_degree)) +pub fn size_range(log_interval: usize, min_degree: usize, max_degree: usize) -> Vec { + let mut to_ret = vec![min_degree.next_power_of_two()]; + let interval = 1 << log_interval; + while *to_ret.last().unwrap() < max_degree { + let next_elem = min(max_degree, interval * to_ret.last().unwrap()); + to_ret.push(next_elem); + } + to_ret +} diff --git a/arkworks/algebra/poly/Cargo.toml b/arkworks/algebra/poly/Cargo.toml new file mode 100644 index 00000000..5d448d1a --- /dev/null +++ b/arkworks/algebra/poly/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "ark-poly" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for efficient polynomial arithmetic via FFTs over finite fields" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-poly/" +keywords = ["cryptography", "finite-fields", "fft", "polynomials"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", path = "../ff", default-features = false } +ark-serialize = { version = "^0.3.0", path = "../serialize", default-features = false, features = ["derive"] } +ark-std = { path = "../../std", version = "^0.3.0", default-features = false } +rayon = { version = "1", optional = true } +derivative = { version = "2", default-features = false, features = [ "use_core" ] } +hashbrown = { version = "0.11.1"} + +[dev-dependencies] +ark-test-curves = { path = "../test-curves", default-features = false, features = [ "bls12_381_curve", "bn384_small_two_adicity_curve"] } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std" ] +parallel = [ "std", "ark-ff/parallel", "rayon", "ark-std/parallel" ] diff --git a/arkworks/algebra/poly/LICENSE-APACHE b/arkworks/algebra/poly/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/poly/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/poly/LICENSE-MIT b/arkworks/algebra/poly/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/poly/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/poly/README.md b/arkworks/algebra/poly/README.md new file mode 100644 index 00000000..171a816f --- /dev/null +++ b/arkworks/algebra/poly/README.md @@ -0,0 +1,61 @@ +

ark-poly

+

+ + + + +

+ +This crate implements traits and implementations for polynomials, FFT-friendly subsets of a field (dubbed "domains"), and FFTs for these domains. + +### Polynomials + +The `polynomial` module provides the following traits for defining polynomials in coefficient form: + +- [`Polynomial`](./src/polynomial/mod.rs#L16): +Requires implementors to support common operations on polynomials, +such as `Add`, `Sub`, `Zero`, evaluation at a point, degree, etc, +and defines methods to serialize to and from the coefficient representation of the polynomial. +- [`UVPolynomial`](./src/polynomial/mod.rs#L43) : +Specifies that a `Polynomial` is actually a *univariate* polynomial. +- [`MVPolynomial`](./src/polynomial/mod.rs#L59): +Specifies that a `Polynomial` is actually a *multivariate* polynomial. + +This crate also provides the following data structures that implement these traits: + +- [`univariate/DensePolynomial`](./src/polynomial/univariate/dense.rs#L22): +Represents degree `d` univariate polynomials via a list of `d + 1` coefficients. +This struct implements the [`UVPolynomial`](./src/polynomial/mod.rs#L43) trait. +- [`univariate/SparsePolynomial`](./src/polynomial/univariate/sparse.rs#L15): +Represents degree `d` univariate polynomials via a list containing all non-zero monomials. +This should only be used when most coefficients of the polynomial are zero. +This struct implements the [`Polynomial`](./src/polynomial/mod.rs#L16) trait +(but *not* the `UVPolynomial` trait). +- [`multivariate/SparsePolynomial`](./src/polynomial/multivariate/sparse.rs#L21): +Represents multivariate polynomials via a list containing all non-zero monomials. + +This crate also provides the [`univariate/DenseOrSparsePolynomial`](./src/polynomial/univariate/mod.rs#L16) enum, which allows the user to abstract over the type of underlying univariate polynomial (dense or sparse). + +### Evaluations + +The `evaluations` module provides data structures to represent univariate polynomials in lagrange form. + +- [`univariate/Evaluations`](./src/evaluations/univariate/mod.rs#L18) +Represents a univariate polynomial in evaluation form, which can be used for FFT. + +The `evaluations` module also provides the following traits for defining multivariate polynomials in lagrange form: + +- [`multivariate/multilinear/MultilinearExtension`](./src/evaluations/multivariate/multilinear/mod.rs#L23) +Specifies a multilinear polynomial evaluated over boolean hypercube. + +This crate provides some data structures to implement these traits. + +- [`multivariate/multilinear/DenseMultilinearExtension`](./src/evaluations/multivariate/multilinear/dense.rs#L17) +Represents multilinear extension via a list of evaluations over boolean hypercube. + +- [`multivariate/multilinear/SparseMultilinearExtension`](./src/evaluations/multivariate/multilinear/sparse.rs#L20) +Represents multilinear extension via a list of non-zero evaluations over boolean hypercube. + +### Domains + +TODO diff --git a/arkworks/algebra/poly/src/domain/general.rs b/arkworks/algebra/poly/src/domain/general.rs new file mode 100644 index 00000000..cc0630a6 --- /dev/null +++ b/arkworks/algebra/poly/src/domain/general.rs @@ -0,0 +1,319 @@ +//! This module contains a `GeneralEvaluationDomain` for +//! performing various kinds of polynomial arithmetic on top of +//! a FFT-friendly finite field. +//! +//! It is a wrapper around specific implementations of `EvaluationDomain` that +//! automatically chooses the most efficient implementation +//! depending on the number of coefficients and the two-adicity of the prime. + +pub use crate::domain::utils::Elements; +use crate::domain::{ + DomainCoeff, EvaluationDomain, MixedRadixEvaluationDomain, Radix2EvaluationDomain, +}; +use ark_ff::{FftField, FftParameters}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + io::{Read, Write}, + vec::Vec, +}; + +/// Defines a domain over which finite field (I)FFTs can be performed. +/// Generally tries to build a radix-2 domain and falls back to a mixed-radix +/// domain if the radix-2 multiplicative subgroup is too small. +#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] +pub enum GeneralEvaluationDomain { + /// Radix-2 domain + Radix2(Radix2EvaluationDomain), + /// Mixed-radix domain + MixedRadix(MixedRadixEvaluationDomain), +} + +macro_rules! map { + ($self:expr, $f1:ident $(, $x:expr)*) => { + match $self { + Self::Radix2(domain) => EvaluationDomain::$f1(domain, $($x)*), + Self::MixedRadix(domain) => EvaluationDomain::$f1(domain, $($x)*), + } + } +} + +impl CanonicalSerialize for GeneralEvaluationDomain { + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + let type_id = match self { + GeneralEvaluationDomain::Radix2(_) => 0u8, + GeneralEvaluationDomain::MixedRadix(_) => 1u8, + }; + type_id.serialize(&mut writer)?; + + match self { + GeneralEvaluationDomain::Radix2(domain) => domain.serialize(&mut writer), + GeneralEvaluationDomain::MixedRadix(domain) => domain.serialize(&mut writer), + } + } + + fn serialized_size(&self) -> usize { + let type_id = match self { + GeneralEvaluationDomain::Radix2(_) => 0u8, + GeneralEvaluationDomain::MixedRadix(_) => 1u8, + }; + + type_id.serialized_size() + + match self { + GeneralEvaluationDomain::Radix2(domain) => domain.serialized_size(), + GeneralEvaluationDomain::MixedRadix(domain) => domain.serialized_size(), + } + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + let type_id = match self { + GeneralEvaluationDomain::Radix2(_) => 0u8, + GeneralEvaluationDomain::MixedRadix(_) => 1u8, + }; + type_id.serialize_uncompressed(&mut writer)?; + + match self { + GeneralEvaluationDomain::Radix2(domain) => domain.serialize_uncompressed(&mut writer), + GeneralEvaluationDomain::MixedRadix(domain) => { + domain.serialize_uncompressed(&mut writer) + } + } + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + let type_id = match self { + GeneralEvaluationDomain::Radix2(_) => 0u8, + GeneralEvaluationDomain::MixedRadix(_) => 1u8, + }; + type_id.serialize_unchecked(&mut writer)?; + + match self { + GeneralEvaluationDomain::Radix2(domain) => domain.serialize_unchecked(&mut writer), + GeneralEvaluationDomain::MixedRadix(domain) => domain.serialize_unchecked(&mut writer), + } + } + + fn uncompressed_size(&self) -> usize { + let type_id = match self { + GeneralEvaluationDomain::Radix2(_) => 0u8, + GeneralEvaluationDomain::MixedRadix(_) => 1u8, + }; + + type_id.uncompressed_size() + + match self { + GeneralEvaluationDomain::Radix2(domain) => domain.uncompressed_size(), + GeneralEvaluationDomain::MixedRadix(domain) => domain.uncompressed_size(), + } + } +} + +impl CanonicalDeserialize for GeneralEvaluationDomain { + fn deserialize(mut reader: R) -> Result { + let type_id = u8::deserialize(&mut reader)?; + + if type_id == 0u8 { + Ok(Self::Radix2(Radix2EvaluationDomain::::deserialize( + &mut reader, + )?)) + } else if type_id == 1u8 { + Ok(Self::MixedRadix( + MixedRadixEvaluationDomain::::deserialize(&mut reader)?, + )) + } else { + Err(SerializationError::InvalidData) + } + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let type_id = u8::deserialize_uncompressed(&mut reader)?; + + if type_id == 0u8 { + Ok(Self::Radix2( + Radix2EvaluationDomain::::deserialize_uncompressed(&mut reader)?, + )) + } else if type_id == 1u8 { + Ok(Self::MixedRadix( + MixedRadixEvaluationDomain::::deserialize_uncompressed(&mut reader)?, + )) + } else { + Err(SerializationError::InvalidData) + } + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let type_id = u8::deserialize_unchecked(&mut reader)?; + + if type_id == 0u8 { + Ok(Self::Radix2( + Radix2EvaluationDomain::::deserialize_unchecked(&mut reader)?, + )) + } else if type_id == 1u8 { + Ok(Self::MixedRadix( + MixedRadixEvaluationDomain::::deserialize_unchecked(&mut reader)?, + )) + } else { + Err(SerializationError::InvalidData) + } + } +} + +impl EvaluationDomain for GeneralEvaluationDomain { + type Elements = GeneralElements; + + /// Construct a domain that is large enough for evaluations of a polynomial + /// having `num_coeffs` coefficients. + /// + /// If the field specifies a small subgroup for a mixed-radix FFT and + /// the radix-2 FFT cannot be constructed, this method tries + /// constructing a mixed-radix FFT instead. + fn new(num_coeffs: usize) -> Option { + let domain = Radix2EvaluationDomain::new(num_coeffs); + if let Some(domain) = domain { + return Some(GeneralEvaluationDomain::Radix2(domain)); + } + + if F::FftParams::SMALL_SUBGROUP_BASE.is_some() { + return Some(GeneralEvaluationDomain::MixedRadix( + MixedRadixEvaluationDomain::new(num_coeffs)?, + )); + } + + None + } + + fn compute_size_of_domain(num_coeffs: usize) -> Option { + let domain_size = Radix2EvaluationDomain::::compute_size_of_domain(num_coeffs); + if let Some(domain_size) = domain_size { + return Some(domain_size); + } + + if F::FftParams::SMALL_SUBGROUP_BASE.is_some() { + return Some(MixedRadixEvaluationDomain::::compute_size_of_domain( + num_coeffs, + )?); + } + + None + } + + #[inline] + fn size(&self) -> usize { + map!(self, size) + } + + #[inline] + fn fft_in_place>(&self, coeffs: &mut Vec) { + map!(self, fft_in_place, coeffs) + } + + #[inline] + fn ifft_in_place>(&self, evals: &mut Vec) { + map!(self, ifft_in_place, evals) + } + + #[inline] + fn coset_fft_in_place>(&self, coeffs: &mut Vec) { + map!(self, coset_fft_in_place, coeffs) + } + + #[inline] + fn coset_ifft_in_place>(&self, evals: &mut Vec) { + map!(self, coset_ifft_in_place, evals) + } + + #[inline] + fn evaluate_all_lagrange_coefficients(&self, tau: F) -> Vec { + map!(self, evaluate_all_lagrange_coefficients, tau) + } + + #[inline] + fn vanishing_polynomial(&self) -> crate::univariate::SparsePolynomial { + map!(self, vanishing_polynomial) + } + + #[inline] + fn evaluate_vanishing_polynomial(&self, tau: F) -> F { + map!(self, evaluate_vanishing_polynomial, tau) + } + + /// Returns the `i`-th element of the domain. + fn element(&self, i: usize) -> F { + map!(self, element, i) + } + + /// Return an iterator over the elements of the domain. + fn elements(&self) -> GeneralElements { + GeneralElements(map!(self, elements)) + } +} + +/// A generalized version of an iterator over the elements of a domain. +pub struct GeneralElements(Elements); + +impl Iterator for GeneralElements { + type Item = F; + + #[inline] + fn next(&mut self) -> Option { + self.0.next() + } +} + +#[cfg(test)] +mod tests { + use crate::polynomial::Polynomial; + use crate::{EvaluationDomain, GeneralEvaluationDomain}; + use ark_ff::Zero; + use ark_std::rand::Rng; + use ark_std::test_rng; + use ark_test_curves::bls12_381::Fr; + use ark_test_curves::bn384_small_two_adicity::Fr as BNFr; + + #[test] + fn vanishing_polynomial_evaluation() { + let rng = &mut test_rng(); + for coeffs in 0..10 { + let domain = GeneralEvaluationDomain::::new(coeffs).unwrap(); + let z = domain.vanishing_polynomial(); + for _ in 0..100 { + let point = rng.gen(); + assert_eq!( + z.evaluate(&point), + domain.evaluate_vanishing_polynomial(point) + ) + } + } + + for coeffs in 15..17 { + let domain = GeneralEvaluationDomain::::new(coeffs).unwrap(); + let z = domain.vanishing_polynomial(); + for _ in 0..100 { + let point = rng.gen(); + assert_eq!( + z.evaluate(&point), + domain.evaluate_vanishing_polynomial(point) + ) + } + } + } + + #[test] + fn vanishing_polynomial_vanishes_on_domain() { + for coeffs in 0..1000 { + let domain = GeneralEvaluationDomain::::new(coeffs).unwrap(); + let z = domain.vanishing_polynomial(); + for point in domain.elements() { + assert!(z.evaluate(&point).is_zero()) + } + } + } + + #[test] + fn size_of_elements() { + for coeffs in 1..10 { + let size = 1 << coeffs; + let domain = GeneralEvaluationDomain::::new(size).unwrap(); + let domain_size = domain.size(); + assert_eq!(domain_size, domain.elements().count()); + } + } +} diff --git a/arkworks/algebra/poly/src/domain/mixed_radix.rs b/arkworks/algebra/poly/src/domain/mixed_radix.rs new file mode 100644 index 00000000..7851e53a --- /dev/null +++ b/arkworks/algebra/poly/src/domain/mixed_radix.rs @@ -0,0 +1,503 @@ +//! This module contains a `MixedRadixEvaluationDomain` for +//! performing various kinds of polynomial arithmetic on top of +//! fields that are FFT-friendly but do not have high-enough +//! two-adicity to perform the FFT efficiently, i.e. the multiplicative +//! subgroup `G` generated by `F::TWO_ADIC_ROOT_OF_UNITY` is not large enough. +//! `MixedRadixEvaluationDomain` resolves +//! this issue by using a larger subgroup obtained by combining +//! `G` with another subgroup of size +//! `F::SMALL_SUBGROUP_BASE^(F::SMALL_SUBGROUP_BASE_ADICITY)`, +//! to obtain a subgroup generated by `F::LARGE_SUBGROUP_ROOT_OF_UNITY`. + +pub use crate::domain::utils::Elements; +use crate::domain::{ + utils::{best_fft, bitreverse}, + DomainCoeff, EvaluationDomain, +}; +use ark_ff::{fields::utils::k_adicity, FftField, FftParameters}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + cmp::min, + convert::TryFrom, + fmt, + io::{Read, Write}, + vec::Vec, +}; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Defines a domain over which finite field (I)FFTs can be performed. Works +/// only for fields that have a multiplicative subgroup of size that is +/// a power-of-2 and another small subgroup over a different base defined. +#[derive(Copy, Clone, Hash, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct MixedRadixEvaluationDomain { + /// The size of the domain. + pub size: u64, + /// `log_2(self.size)`. + pub log_size_of_group: u32, + /// Size of the domain as a field element. + pub size_as_field_element: F, + /// Inverse of the size in the field. + pub size_inv: F, + /// A generator of the subgroup. + pub group_gen: F, + /// Inverse of the generator of the subgroup. + pub group_gen_inv: F, + /// Multiplicative generator of the finite field. + pub generator_inv: F, +} + +impl fmt::Debug for MixedRadixEvaluationDomain { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Mixed-radix multiplicative subgroup of size {}", + self.size + ) + } +} + +impl EvaluationDomain for MixedRadixEvaluationDomain { + type Elements = Elements; + + /// Construct a domain that is large enough for evaluations of a polynomial + /// having `num_coeffs` coefficients. + fn new(num_coeffs: usize) -> Option { + let small_subgroup_base = F::FftParams::SMALL_SUBGROUP_BASE?; + + // Compute the best size of our evaluation domain. + let num_coeffs = best_mixed_domain_size::(num_coeffs); + + // Compute the size of our evaluation domain + let q = usize::try_from(small_subgroup_base).unwrap(); + let q_adicity = k_adicity(q, num_coeffs); + let q_part = q.pow(q_adicity); + + let two_adicity = k_adicity(2, num_coeffs); + let two_part = 1 << two_adicity; + + let size = u64::try_from(num_coeffs).unwrap(); + let log_size_of_group = two_adicity; + + if num_coeffs != q_part * two_part { + return None; + } + + // Compute the generator for the multiplicative subgroup. + // It should be the num_coeffs root of unity. + let group_gen = F::get_root_of_unity(num_coeffs)?; + // Check that it is indeed the requested root of unity. + debug_assert_eq!(group_gen.pow([size]), F::one()); + let size_as_field_element = F::from(size); + let size_inv = size_as_field_element.inverse()?; + + Some(MixedRadixEvaluationDomain { + size, + log_size_of_group, + size_as_field_element, + size_inv, + group_gen, + group_gen_inv: group_gen.inverse()?, + generator_inv: F::multiplicative_generator().inverse()?, + }) + } + + fn compute_size_of_domain(num_coeffs: usize) -> Option { + let small_subgroup_base = F::FftParams::SMALL_SUBGROUP_BASE?; + + // Compute the best size of our evaluation domain. + let num_coeffs = best_mixed_domain_size::(num_coeffs); + + let q = usize::try_from(small_subgroup_base).unwrap(); + let q_adicity = k_adicity(q, num_coeffs); + let q_part = q.pow(q_adicity); + + let two_adicity = k_adicity(2, num_coeffs); + let two_part = 1 << two_adicity; + + if num_coeffs == q_part * two_part { + Some(num_coeffs) + } else { + None + } + } + + #[inline] + fn size(&self) -> usize { + usize::try_from(self.size).unwrap() + } + + #[inline] + fn fft_in_place>(&self, coeffs: &mut Vec) { + coeffs.resize(self.size(), T::zero()); + best_fft( + coeffs, + self.group_gen, + self.log_size_of_group, + serial_mixed_radix_fft::, + ) + } + + #[inline] + fn ifft_in_place>(&self, evals: &mut Vec) { + evals.resize(self.size(), T::zero()); + best_fft( + evals, + self.group_gen_inv, + self.log_size_of_group, + serial_mixed_radix_fft::, + ); + ark_std::cfg_iter_mut!(evals).for_each(|val| *val *= self.size_inv); + } + + #[inline] + fn coset_ifft_in_place>(&self, evals: &mut Vec) { + self.ifft_in_place(evals); + Self::distribute_powers(evals, self.generator_inv); + } + + fn evaluate_all_lagrange_coefficients(&self, tau: F) -> Vec { + // Evaluate all Lagrange polynomials + let size = self.size(); + let t_size = tau.pow(&[self.size]); + let one = F::one(); + if t_size.is_one() { + let mut u = vec![F::zero(); size]; + let mut omega_i = one; + for u_i in u.iter_mut().take(size) { + if omega_i == tau { + *u_i = one; + break; + } + omega_i *= &self.group_gen; + } + u + } else { + use ark_ff::fields::batch_inversion; + + let mut l = (t_size - one) * self.size_inv; + let mut r = one; + let mut u = vec![F::zero(); size]; + let mut ls = vec![F::zero(); size]; + for i in 0..size { + u[i] = tau - r; + ls[i] = l; + l *= &self.group_gen; + r *= &self.group_gen; + } + + batch_inversion(u.as_mut_slice()); + + ark_std::cfg_iter_mut!(u) + .zip(ls) + .for_each(|(tau_minus_r, l)| { + *tau_minus_r = l * *tau_minus_r; + }); + + u + } + } + + fn vanishing_polynomial(&self) -> crate::univariate::SparsePolynomial { + let coeffs = vec![(0, -F::one()), (self.size(), F::one())]; + crate::univariate::SparsePolynomial::from_coefficients_vec(coeffs) + } + + /// This evaluates the vanishing polynomial for this domain at tau. + /// For multiplicative subgroups, this polynomial is `z(X) = X^self.size - + /// 1`. + fn evaluate_vanishing_polynomial(&self, tau: F) -> F { + tau.pow(&[self.size]) - F::one() + } + + /// Returns the `i`-th element of the domain, where elements are ordered by + /// their power of the generator which they correspond to. + /// e.g. the `i`-th element is g^i + fn element(&self, i: usize) -> F { + // TODO: Consider precomputed exponentiation tables if we need this to be faster. + self.group_gen.pow(&[i as u64]) + } + + /// Return an iterator over the elements of the domain. + fn elements(&self) -> Elements { + Elements { + cur_elem: F::one(), + cur_pow: 0, + size: self.size, + group_gen: self.group_gen, + } + } +} + +fn mixed_radix_fft_permute( + two_adicity: u32, + q_adicity: u32, + q: usize, + n: usize, + mut i: usize, +) -> usize { + // This is the permutation obtained by splitting into 2 groups two_adicity times + // and then q groups q_adicity many times. It can be efficiently described + // as follows i = 2^0 b_0 + 2^1 b_1 + ... + 2^{two_adicity - 1} + // b_{two_adicity - 1} + 2^two_adicity ( x_0 + q^1 x_1 + .. + + // q^{q_adicity-1} x_{q_adicity-1}) We want to return + // j = b_0 (n/2) + b_1 (n/ 2^2) + ... + b_{two_adicity-1} (n/ 2^two_adicity) + // + x_0 (n / 2^two_adicity / q) + .. + x_{q_adicity-1} (n / 2^two_adicity / + // q^q_adicity) + let mut res = 0; + let mut shift = n; + + for _ in 0..two_adicity { + shift /= 2; + res += (i % 2) * shift; + i /= 2; + } + + for _ in 0..q_adicity { + shift /= q; + res += (i % q) * shift; + i /= q; + } + + res +} + +fn best_mixed_domain_size(min_size: usize) -> usize { + let mut best = usize::max_value(); + let small_subgroup_base_adicity = F::FftParams::SMALL_SUBGROUP_BASE_ADICITY.unwrap(); + let small_subgroup_base = usize::try_from(F::FftParams::SMALL_SUBGROUP_BASE.unwrap()).unwrap(); + + for b in 0..=small_subgroup_base_adicity { + let mut r = small_subgroup_base.pow(b); + + let mut two_adicity = 0; + while r < min_size { + r *= 2; + two_adicity += 1; + } + + if two_adicity <= F::FftParams::TWO_ADICITY { + best = min(best, r); + } + } + + best +} + +pub(crate) fn serial_mixed_radix_fft, F: FftField>( + a: &mut [T], + omega: F, + two_adicity: u32, +) { + // Conceptually, this FFT first splits into 2 sub-arrays two_adicity many times, + // and then splits into q sub-arrays q_adicity many times. + + let n = a.len(); + let q = usize::try_from(F::FftParams::SMALL_SUBGROUP_BASE.unwrap()).unwrap(); + + let q_adicity = k_adicity(q, n); + let q_part = q.pow(q_adicity); + let two_part = 1 << two_adicity; + + assert_eq!(n, q_part * two_part); + + let mut m = 1; // invariant: m = 2^{s-1} + + if q_adicity > 0 { + // If we're using the other radix, we have to do two things differently than in + // the radix 2 case. 1. Applying the index permutation is a bit more + // complicated. It isn't an involution (like it is in the radix 2 case) + // so we need to remember which elements we've moved as we go along + // and can't use the trick of just swapping when processing the first element of + // a 2-cycle. + // + // 2. We need to do q_adicity many merge passes, each of which is a bit more + // complicated than the specialized q=2 case. + + // Applying the permutation + let mut seen = vec![false; n]; + for k in 0..n { + let mut i = k; + let mut a_i = a[i]; + while !seen[i] { + let dest = mixed_radix_fft_permute(two_adicity, q_adicity, q, n, i); + + let a_dest = a[dest]; + a[dest] = a_i; + + seen[i] = true; + + a_i = a_dest; + i = dest; + } + } + + let omega_q = omega.pow(&[(n / q) as u64]); + let mut qth_roots = Vec::with_capacity(q); + qth_roots.push(F::one()); + for i in 1..q { + qth_roots.push(qth_roots[i - 1] * omega_q); + } + + let mut terms = vec![T::zero(); q - 1]; + + // Doing the q_adicity passes. + for _ in 0..q_adicity { + let w_m = omega.pow(&[(n / (q * m)) as u64]); + let mut k = 0; + while k < n { + let mut w_j = F::one(); // w_j is omega_m ^ j + for j in 0..m { + let base_term = a[k + j]; + let mut w_j_i = w_j; + for i in 1..q { + terms[i - 1] = a[k + j + i * m]; + terms[i - 1] *= w_j_i; + w_j_i *= w_j; + } + + for i in 0..q { + a[k + j + i * m] = base_term; + for l in 1..q { + let mut tmp = terms[l - 1]; + tmp *= qth_roots[(i * l) % q]; + a[k + j + i * m] += tmp; + } + } + + w_j *= w_m; + } + + k += q * m; + } + m *= q; + } + } else { + // swapping in place (from Storer's book) + for k in 0..n { + let rk = bitreverse(k as u32, two_adicity) as usize; + if k < rk { + a.swap(k, rk); + } + } + } + + for _ in 0..two_adicity { + // w_m is 2^s-th root of unity now + let w_m = omega.pow(&[(n / (2 * m)) as u64]); + + let mut k = 0; + while k < n { + let mut w = F::one(); + for j in 0..m { + let mut t = a[(k + m) + j]; + t *= w; + a[(k + m) + j] = a[k + j]; + a[(k + m) + j] -= t; + a[k + j] += t; + w *= w_m; + } + k += 2 * m; + } + m *= 2; + } +} + +#[cfg(test)] +mod tests { + use crate::polynomial::Polynomial; + use crate::{EvaluationDomain, MixedRadixEvaluationDomain}; + use ark_ff::{Field, Zero}; + use ark_std::rand::Rng; + use ark_std::test_rng; + use ark_test_curves::bn384_small_two_adicity::Fq as Fr; + + #[test] + fn vanishing_polynomial_evaluation() { + let rng = &mut test_rng(); + for coeffs in 0..12 { + let domain = MixedRadixEvaluationDomain::::new(coeffs).unwrap(); + let z = domain.vanishing_polynomial(); + for _ in 0..100 { + let point: Fr = rng.gen(); + assert_eq!( + z.evaluate(&point), + domain.evaluate_vanishing_polynomial(point) + ) + } + } + } + + #[test] + fn vanishing_polynomial_vanishes_on_domain() { + for coeffs in 0..1000 { + let domain = MixedRadixEvaluationDomain::::new(coeffs).unwrap(); + let z = domain.vanishing_polynomial(); + for point in domain.elements() { + assert!(z.evaluate(&point).is_zero()) + } + } + } + + #[test] + fn size_of_elements() { + for coeffs in 1..12 { + let size = 1 << coeffs; + let domain = MixedRadixEvaluationDomain::::new(size).unwrap(); + let domain_size = domain.size(); + assert_eq!(domain_size, domain.elements().count()); + } + } + + #[test] + fn elements_contents() { + for coeffs in 1..12 { + let size = 1 << coeffs; + let domain = MixedRadixEvaluationDomain::::new(size).unwrap(); + for (i, element) in domain.elements().enumerate() { + assert_eq!(element, domain.group_gen.pow([i as u64])); + } + } + } + + #[test] + #[cfg(feature = "parallel")] + fn parallel_fft_consistency() { + use super::serial_mixed_radix_fft; + use crate::domain::utils::parallel_fft; + use ark_ff::PrimeField; + use ark_std::{test_rng, vec::Vec}; + use ark_test_curves::bn384_small_two_adicity::Fq as Fr; + use core::cmp::min; + + fn test_consistency(rng: &mut R, max_coeffs: u32) { + for _ in 0..5 { + for log_d in 0..max_coeffs { + let d = 1 << log_d; + + let mut v1 = (0..d).map(|_| F::rand(rng)).collect::>(); + let mut v2 = v1.clone(); + + let domain = MixedRadixEvaluationDomain::new(v1.len()).unwrap(); + + for log_cpus in log_d..min(log_d + 1, 3) { + parallel_fft::( + &mut v1, + domain.group_gen, + log_d, + log_cpus, + serial_mixed_radix_fft::, + ); + serial_mixed_radix_fft::(&mut v2, domain.group_gen, log_d); + + assert_eq!(v1, v2); + } + } + } + } + + let rng = &mut test_rng(); + + test_consistency::(rng, 16); + } +} diff --git a/arkworks/algebra/poly/src/domain/mod.rs b/arkworks/algebra/poly/src/domain/mod.rs new file mode 100644 index 00000000..8e5a1cb2 --- /dev/null +++ b/arkworks/algebra/poly/src/domain/mod.rs @@ -0,0 +1,258 @@ +//! This module contains an `EvaluationDomain` abstraction for +//! performing various kinds of polynomial arithmetic on top of +//! fields that are friendly to fast-fourier-transforms (FFTs). +//! +//! A field is FFT-friendly if it contains enough +//! roots of unity to perform the FFT in O(n log n) time. +//! These roots of unity comprise the domain over which +//! polynomial arithmetic is performed. + +use ark_ff::FftField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::rand::Rng; +use ark_std::{fmt, hash, vec::Vec}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +pub mod general; +pub mod mixed_radix; +pub mod radix2; +pub(crate) mod utils; + +pub use general::GeneralEvaluationDomain; +pub use mixed_radix::MixedRadixEvaluationDomain; +pub use radix2::Radix2EvaluationDomain; + +/// Defines a domain over which finite field (I)FFTs can be performed. The +/// size of the supported FFT depends on the size of the multiplicative +/// subgroup. For efficiency, we recommend that the field has at least one large +/// subgroup generated by a root of unity. +pub trait EvaluationDomain: + Copy + Clone + hash::Hash + Eq + PartialEq + fmt::Debug + CanonicalSerialize + CanonicalDeserialize +{ + /// The type of the elements iterator. + type Elements: Iterator + Sized; + + /// Sample an element that is *not* in the domain. + fn sample_element_outside_domain(&self, rng: &mut R) -> F { + let mut t = F::rand(rng); + while self.evaluate_vanishing_polynomial(t).is_zero() { + t = F::rand(rng); + } + t + } + + /// Construct a domain that is large enough for evaluations of a polynomial + /// having `num_coeffs` coefficients. + fn new(num_coeffs: usize) -> Option; + + /// Return the size of a domain that is large enough for evaluations of a + /// polynomial having `num_coeffs` coefficients. + fn compute_size_of_domain(num_coeffs: usize) -> Option; + + /// Return the size of `self`. + fn size(&self) -> usize; + + /// Return the size of `self` as a field element. + fn size_as_field_element(&self) -> F { + F::from(self.size() as u64) + } + + /// Compute a FFT. + #[inline] + fn fft>(&self, coeffs: &[T]) -> Vec { + let mut coeffs = coeffs.to_vec(); + self.fft_in_place(&mut coeffs); + coeffs + } + + /// Compute a FFT, modifying the vector in place. + fn fft_in_place>(&self, coeffs: &mut Vec); + + /// Compute a IFFT. + #[inline] + fn ifft>(&self, evals: &[T]) -> Vec { + let mut evals = evals.to_vec(); + self.ifft_in_place(&mut evals); + evals + } + + /// Compute a IFFT, modifying the vector in place. + fn ifft_in_place>(&self, evals: &mut Vec); + + /// Multiply the `i`-th element of `coeffs` with `g^i`. + fn distribute_powers>(coeffs: &mut [T], g: F) { + Self::distribute_powers_and_mul_by_const(coeffs, g, F::one()); + } + + /// Multiply the `i`-th element of `coeffs` with `c*g^i`. + #[cfg(not(feature = "parallel"))] + fn distribute_powers_and_mul_by_const>(coeffs: &mut [T], g: F, c: F) { + // invariant: pow = c*g^i at the ith iteration of the loop + let mut pow = c; + coeffs.iter_mut().for_each(|coeff| { + *coeff *= pow; + pow *= &g + }) + } + + /// Multiply the `i`-th element of `coeffs` with `c*g^i`. + #[cfg(feature = "parallel")] + fn distribute_powers_and_mul_by_const>(coeffs: &mut [T], g: F, c: F) { + use ark_std::cmp::max; + let min_parallel_chunk_size = 1024; + let num_cpus_available = rayon::current_num_threads(); + let num_elem_per_thread = max(coeffs.len() / num_cpus_available, min_parallel_chunk_size); + + ark_std::cfg_chunks_mut!(coeffs, num_elem_per_thread) + .enumerate() + .for_each(|(i, chunk)| { + let offset = c * g.pow([(i * num_elem_per_thread) as u64]); + let mut pow = offset; + chunk.iter_mut().for_each(|coeff| { + *coeff *= pow; + pow *= &g + }) + }); + } + + /// Compute a FFT over a coset of the domain. + #[inline] + fn coset_fft>(&self, coeffs: &[T]) -> Vec { + let mut coeffs = coeffs.to_vec(); + self.coset_fft_in_place(&mut coeffs); + coeffs + } + + /// Compute a FFT over a coset of the domain, modifying the input vector + /// in place. + #[inline] + fn coset_fft_in_place>(&self, coeffs: &mut Vec) { + Self::distribute_powers(coeffs, F::multiplicative_generator()); + self.fft_in_place(coeffs); + } + + /// Compute a IFFT over a coset of the domain. + #[inline] + fn coset_ifft>(&self, evals: &[T]) -> Vec { + let mut evals = evals.to_vec(); + self.coset_ifft_in_place(&mut evals); + evals + } + + /// Compute a IFFT over a coset of the domain, modifying the input vector in + /// place. + #[inline] + fn coset_ifft_in_place>(&self, evals: &mut Vec) { + self.ifft_in_place(evals); + Self::distribute_powers(evals, F::multiplicative_generator().inverse().unwrap()); + } + + /// Evaluate all the lagrange polynomials defined by this domain at the + /// point `tau`. This is computed in time O(|domain|). + /// Then given the evaluations of a degree d polynomial P over this domain, + /// where d < |domain|, `P(tau)` can be computed as + /// `P(tau) = sum_{i in [|Domain|]} L_{i, Domain}(tau) * P(g^i)`. + /// `L_{i, Domain}` is the value of the i-th lagrange coefficient + /// in the returned vector. + fn evaluate_all_lagrange_coefficients(&self, tau: F) -> Vec; + + /// Return the sparse vanishing polynomial. + fn vanishing_polynomial(&self) -> crate::univariate::SparsePolynomial; + + /// This evaluates the vanishing polynomial for this domain at tau. + fn evaluate_vanishing_polynomial(&self, tau: F) -> F; + + /// Returns the `i`-th element of the domain. + fn element(&self, i: usize) -> F; + + /// Return an iterator over the elements of the domain. + fn elements(&self) -> Self::Elements; + + /// The target polynomial is the zero polynomial in our + /// evaluation domain, so we must perform division over + /// a coset. + fn divide_by_vanishing_poly_on_coset_in_place(&self, evals: &mut [F]) { + let i = self + .evaluate_vanishing_polynomial(F::multiplicative_generator()) + .inverse() + .unwrap(); + + ark_std::cfg_iter_mut!(evals).for_each(|eval| *eval *= &i); + } + + /// Given an index which assumes the first elements of this domain are the + /// elements of another (sub)domain, + /// this returns the actual index into this domain. + fn reindex_by_subdomain(&self, other: Self, index: usize) -> usize { + assert!(self.size() >= other.size()); + // Let this subgroup be G, and the subgroup we're re-indexing by be S. + // Since its a subgroup, the 0th element of S is at index 0 in G, the first + // element of S is at index |G|/|S|, the second at 2*|G|/|S|, etc. + // Thus for an index i that corresponds to S, the index in G is i*|G|/|S| + let period = self.size() / other.size(); + if index < other.size() { + index * period + } else { + // Let i now be the index of this element in G \ S + // Let x be the number of elements in G \ S, for every element in S. Then x = + // (|G|/|S| - 1). At index i in G \ S, the number of elements in S + // that appear before the index in G to which i corresponds to, is + // floor(i / x) + 1. The +1 is because index 0 of G is S_0, so the + // position is offset by at least one. The floor(i / x) term is + // because after x elements in G \ S, there is one more element from S + // that will have appeared in G. + let i = index - other.size(); + let x = period - 1; + i + (i / x) + 1 + } + } + + /// Perform O(n) multiplication of two polynomials that are presented by + /// their evaluations in the domain. + /// Returns the evaluations of the product over the domain. + /// + /// Assumes that the domain is large enough to allow for successful + /// interpolation after multiplication. + #[must_use] + fn mul_polynomials_in_evaluation_domain(&self, self_evals: &[F], other_evals: &[F]) -> Vec { + assert_eq!(self_evals.len(), other_evals.len()); + let mut result = self_evals.to_vec(); + + ark_std::cfg_iter_mut!(result) + .zip(other_evals) + .for_each(|(a, b)| *a *= b); + + result + } +} + +/// Types that can be FFT-ed must implement this trait. +pub trait DomainCoeff: + Copy + + Send + + Sync + + core::ops::Add + + core::ops::Sub + + core::ops::AddAssign + + core::ops::SubAssign + + ark_ff::Zero + + core::ops::MulAssign +{ +} + +impl DomainCoeff for T +where + F: FftField, + T: Copy + + Send + + Sync + + core::ops::Add + + core::ops::Sub + + core::ops::AddAssign + + core::ops::SubAssign + + ark_ff::Zero + + core::ops::MulAssign, +{ +} diff --git a/arkworks/algebra/poly/src/domain/radix2/fft.rs b/arkworks/algebra/poly/src/domain/radix2/fft.rs new file mode 100644 index 00000000..8450d8f9 --- /dev/null +++ b/arkworks/algebra/poly/src/domain/radix2/fft.rs @@ -0,0 +1,307 @@ +// The code below is a port of the excellent library of https://github.com/kwantam/fffft by Riad S. Wahby +// to the arkworks APIs + +use crate::domain::utils::compute_powers_serial; +use crate::domain::{radix2::*, DomainCoeff}; +use ark_ff::FftField; +use ark_std::{cfg_chunks_mut, vec::Vec}; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +#[derive(PartialEq, Eq, Debug)] +enum FFTOrder { + /// Both the input and the output of the FFT must be in-order. + II, + /// The input of the FFT must be in-order, but the output does not have to be. + IO, + /// The input of the FFT can be out of order, but the output must be in-order. + OI, +} + +impl Radix2EvaluationDomain { + pub(crate) fn in_order_fft_in_place>(&self, x_s: &mut [T]) { + self.fft_helper_in_place(x_s, FFTOrder::II) + } + + pub(crate) fn in_order_ifft_in_place>(&self, x_s: &mut [T]) { + self.ifft_helper_in_place(x_s, FFTOrder::II); + ark_std::cfg_iter_mut!(x_s).for_each(|val| *val *= self.size_inv); + } + + pub(crate) fn in_order_coset_ifft_in_place>(&self, x_s: &mut [T]) { + self.ifft_helper_in_place(x_s, FFTOrder::II); + let coset_shift = self.generator_inv; + Self::distribute_powers_and_mul_by_const(x_s, coset_shift, self.size_inv); + } + + fn fft_helper_in_place>(&self, x_s: &mut [T], ord: FFTOrder) { + use FFTOrder::*; + + let log_len = ark_std::log2(x_s.len()); + + if ord == OI { + self.oi_helper(x_s, self.group_gen); + } else { + self.io_helper(x_s, self.group_gen); + } + + if ord == II { + derange(x_s, log_len); + } + } + + // Handles doing an IFFT with handling of being in order and out of order. + // The results here must all be divided by |x_s|, + // which is left up to the caller to do. + fn ifft_helper_in_place>(&self, x_s: &mut [T], ord: FFTOrder) { + use FFTOrder::*; + + let log_len = ark_std::log2(x_s.len()); + + if ord == II { + derange(x_s, log_len); + } + + if ord == IO { + self.io_helper(x_s, self.group_gen_inv); + } else { + self.oi_helper(x_s, self.group_gen_inv); + } + } + + /// Computes the first `self.size / 2` roots of unity for the entire domain. + /// e.g. for the domain [1, g, g^2, ..., g^{n - 1}], it computes + // [1, g, g^2, ..., g^{(n/2) - 1}] + #[cfg(not(feature = "parallel"))] + pub(super) fn roots_of_unity(&self, root: F) -> Vec { + compute_powers_serial((self.size as usize) / 2, root) + } + + /// Computes the first `self.size / 2` roots of unity. + #[cfg(feature = "parallel")] + pub(super) fn roots_of_unity(&self, root: F) -> Vec { + // TODO: check if this method can replace parallel compute powers. + let log_size = ark_std::log2(self.size as usize); + // early exit for short inputs + if log_size <= LOG_ROOTS_OF_UNITY_PARALLEL_SIZE { + compute_powers_serial((self.size as usize) / 2, root) + } else { + let mut temp = root; + // w, w^2, w^4, w^8, ..., w^(2^(log_size - 1)) + let log_powers: Vec = (0..(log_size - 1)) + .map(|_| { + let old_value = temp; + temp.square_in_place(); + old_value + }) + .collect(); + + // allocate the return array and start the recursion + let mut powers = vec![F::zero(); 1 << (log_size - 1)]; + Self::roots_of_unity_recursive(&mut powers, &log_powers); + powers + } + } + + #[cfg(feature = "parallel")] + fn roots_of_unity_recursive(out: &mut [F], log_powers: &[F]) { + assert_eq!(out.len(), 1 << log_powers.len()); + // base case: just compute the powers sequentially, + // g = log_powers[0], out = [1, g, g^2, ...] + if log_powers.len() <= LOG_ROOTS_OF_UNITY_PARALLEL_SIZE as usize { + out[0] = F::one(); + for idx in 1..out.len() { + out[idx] = out[idx - 1] * log_powers[0]; + } + return; + } + + // recursive case: + // 1. split log_powers in half + let (lr_lo, lr_hi) = log_powers.split_at((1 + log_powers.len()) / 2); + let mut scr_lo = vec![F::default(); 1 << lr_lo.len()]; + let mut scr_hi = vec![F::default(); 1 << lr_hi.len()]; + // 2. compute each half individually + rayon::join( + || Self::roots_of_unity_recursive(&mut scr_lo, lr_lo), + || Self::roots_of_unity_recursive(&mut scr_hi, lr_hi), + ); + // 3. recombine halves + // At this point, out is a blank slice. + out.par_chunks_mut(scr_lo.len()) + .zip(&scr_hi) + .for_each(|(out_chunk, scr_hi)| { + for (out_elem, scr_lo) in out_chunk.iter_mut().zip(&scr_lo) { + *out_elem = *scr_hi * scr_lo; + } + }); + } + + #[inline(always)] + fn butterfly_fn_io>(((lo, hi), root): ((&mut T, &mut T), &F)) { + let neg = *lo - *hi; + *lo += *hi; + *hi = neg; + *hi *= *root; + } + + #[inline(always)] + fn butterfly_fn_oi>(((lo, hi), root): ((&mut T, &mut T), &F)) { + *hi *= *root; + let neg = *lo - *hi; + *lo += *hi; + *hi = neg; + } + + fn apply_butterfly, G: Fn(((&mut T, &mut T), &F)) + Copy + Sync + Send>( + g: G, + xi: &mut [T], + roots: &[F], + step: usize, + chunk_size: usize, + num_chunks: usize, + max_threads: usize, + gap: usize, + ) { + cfg_chunks_mut!(xi, chunk_size).for_each(|cxi| { + let (lo, hi) = cxi.split_at_mut(gap); + // If the chunk is sufficiently big that parallelism helps, + // we parallelize the butterfly operation within the chunk. + + if gap > MIN_GAP_SIZE_FOR_PARALLELISATION && num_chunks < max_threads { + cfg_iter_mut!(lo) + .zip(hi) + .zip(cfg_iter!(roots).step_by(step)) + .for_each(g); + } else { + lo.iter_mut() + .zip(hi) + .zip(roots.iter().step_by(step)) + .for_each(g); + } + }); + } + + fn io_helper>(&self, xi: &mut [T], root: F) { + let mut roots = self.roots_of_unity(root); + let mut step = 1; + let mut first = true; + + #[cfg(feature = "parallel")] + let max_threads = rayon::current_num_threads(); + #[cfg(not(feature = "parallel"))] + let max_threads = 1; + + let mut gap = xi.len() / 2; + while gap > 0 { + // each butterfly cluster uses 2*gap positions + let chunk_size = 2 * gap; + let num_chunks = xi.len() / chunk_size; + + // Only compact roots to achieve cache locality/compactness if + // the roots lookup is done a significant amount of times + // Which also implies a large lookup stride. + if num_chunks >= MIN_NUM_CHUNKS_FOR_COMPACTION { + if !first { + roots = cfg_into_iter!(roots).step_by(step * 2).collect() + } + step = 1; + roots.shrink_to_fit(); + } else { + step = num_chunks; + } + first = false; + + Self::apply_butterfly( + Self::butterfly_fn_io, + xi, + &roots[..], + step, + chunk_size, + num_chunks, + max_threads, + gap, + ); + + gap /= 2; + } + } + + fn oi_helper>(&self, xi: &mut [T], root: F) { + let roots_cache = self.roots_of_unity(root); + + // The `cmp::min` is only necessary for the case where + // `MIN_NUM_CHUNKS_FOR_COMPACTION = 1`. Else, notice that we compact + // the roots cache by a stride of at least `MIN_NUM_CHUNKS_FOR_COMPACTION`. + + let compaction_max_size = core::cmp::min( + roots_cache.len() / 2, + roots_cache.len() / MIN_NUM_CHUNKS_FOR_COMPACTION, + ); + let mut compacted_roots = vec![F::default(); compaction_max_size]; + + #[cfg(feature = "parallel")] + let max_threads = rayon::current_num_threads(); + #[cfg(not(feature = "parallel"))] + let max_threads = 1; + + let mut gap = 1; + while gap < xi.len() { + // each butterfly cluster uses 2*gap positions + let chunk_size = 2 * gap; + let num_chunks = xi.len() / chunk_size; + + // Only compact roots to achieve cache locality/compactness if + // the roots lookup is done a significant amount of times + // Which also implies a large lookup stride. + let (roots, step) = if num_chunks >= MIN_NUM_CHUNKS_FOR_COMPACTION && gap < xi.len() / 2 + { + cfg_iter_mut!(compacted_roots[..gap]) + .zip(cfg_iter!(roots_cache[..(gap * num_chunks)]).step_by(num_chunks)) + .for_each(|(a, b)| *a = *b); + (&compacted_roots[..gap], 1) + } else { + (&roots_cache[..], num_chunks) + }; + + Self::apply_butterfly( + Self::butterfly_fn_oi, + xi, + roots, + step, + chunk_size, + num_chunks, + max_threads, + gap, + ); + + gap *= 2; + } + } +} + +/// The minimum number of chunks at which root compaction +/// is beneficial. +const MIN_NUM_CHUNKS_FOR_COMPACTION: usize = 1 << 7; + +/// The minimum size of a chunk at which parallelization of `butterfly`s is beneficial. +/// This value was chosen empirically. +const MIN_GAP_SIZE_FOR_PARALLELISATION: usize = 1 << 10; + +// minimum size at which to parallelize. +#[cfg(feature = "parallel")] +const LOG_ROOTS_OF_UNITY_PARALLEL_SIZE: u32 = 7; + +#[inline] +fn bitrev(a: u64, log_len: u32) -> u64 { + a.reverse_bits() >> (64 - log_len) +} + +fn derange(xi: &mut [T], log_len: u32) { + for idx in 1..(xi.len() as u64 - 1) { + let ridx = bitrev(idx, log_len); + if idx < ridx { + xi.swap(idx as usize, ridx as usize); + } + } +} diff --git a/arkworks/algebra/poly/src/domain/radix2/mod.rs b/arkworks/algebra/poly/src/domain/radix2/mod.rs new file mode 100644 index 00000000..2eeebf86 --- /dev/null +++ b/arkworks/algebra/poly/src/domain/radix2/mod.rs @@ -0,0 +1,489 @@ +//! This module defines `Radix2EvaluationDomain`, an `EvaluationDomain` +//! for performing various kinds of polynomial arithmetic on top of +//! fields that are FFT-friendly. `Radix2EvaluationDomain` supports +//! FFTs of size at most `2^F::TWO_ADICITY`. + +pub use crate::domain::utils::Elements; +use crate::domain::{DomainCoeff, EvaluationDomain}; +use ark_ff::{FftField, FftParameters}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + convert::TryFrom, + fmt, + io::{Read, Write}, + vec::Vec, +}; + +mod fft; + +/// Defines a domain over which finite field (I)FFTs can be performed. Works +/// only for fields that have a large multiplicative subgroup of size that is +/// a power-of-2. +#[derive(Copy, Clone, Hash, Eq, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Radix2EvaluationDomain { + /// The size of the domain. + pub size: u64, + /// `log_2(self.size)`. + pub log_size_of_group: u32, + /// Size of the domain as a field element. + pub size_as_field_element: F, + /// Inverse of the size in the field. + pub size_inv: F, + /// A generator of the subgroup. + pub group_gen: F, + /// Inverse of the generator of the subgroup. + pub group_gen_inv: F, + /// Multiplicative generator of the finite field. + pub generator_inv: F, +} + +impl fmt::Debug for Radix2EvaluationDomain { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Radix-2 multiplicative subgroup of size {}", self.size) + } +} + +impl EvaluationDomain for Radix2EvaluationDomain { + type Elements = Elements; + + /// Construct a domain that is large enough for evaluations of a polynomial + /// having `num_coeffs` coefficients. + fn new(num_coeffs: usize) -> Option { + // Compute the size of our evaluation domain + let size = if num_coeffs.is_power_of_two() { + num_coeffs as u64 + } else { + num_coeffs.next_power_of_two() as u64 + }; + let log_size_of_group = size.trailing_zeros(); + + // libfqfft uses > https://github.com/scipr-lab/libfqfft/blob/e0183b2cef7d4c5deb21a6eaf3fe3b586d738fe0/libfqfft/evaluation_domain/domains/basic_radix2_domain.tcc#L33 + if log_size_of_group > F::FftParams::TWO_ADICITY { + return None; + } + + // Compute the generator for the multiplicative subgroup. + // It should be the 2^(log_size_of_group) root of unity. + let group_gen = F::get_root_of_unity(usize::try_from(size).unwrap())?; + // Check that it is indeed the 2^(log_size_of_group) root of unity. + debug_assert_eq!(group_gen.pow([size]), F::one()); + let size_as_field_element = F::from(size); + let size_inv = size_as_field_element.inverse()?; + + Some(Radix2EvaluationDomain { + size, + log_size_of_group, + size_as_field_element, + size_inv, + group_gen, + group_gen_inv: group_gen.inverse()?, + generator_inv: F::multiplicative_generator().inverse()?, + }) + } + + fn compute_size_of_domain(num_coeffs: usize) -> Option { + let size = num_coeffs.next_power_of_two(); + if size.trailing_zeros() > F::FftParams::TWO_ADICITY { + None + } else { + Some(size) + } + } + + #[inline] + fn size(&self) -> usize { + usize::try_from(self.size).unwrap() + } + + #[inline] + fn fft_in_place>(&self, coeffs: &mut Vec) { + coeffs.resize(self.size(), T::zero()); + self.in_order_fft_in_place(&mut *coeffs) + } + + #[inline] + fn ifft_in_place>(&self, evals: &mut Vec) { + evals.resize(self.size(), T::zero()); + self.in_order_ifft_in_place(&mut *evals); + } + + #[inline] + fn coset_ifft_in_place>(&self, evals: &mut Vec) { + evals.resize(self.size(), T::zero()); + self.in_order_coset_ifft_in_place(&mut *evals); + } + + fn evaluate_all_lagrange_coefficients(&self, tau: F) -> Vec { + // Evaluate all Lagrange polynomials at tau to get the lagrange coefficients. + // Define the following as + // - H: The coset we are in, with generator g and offset h + // - m: The size of the coset H + // - Z_H: The vanishing polynomial for H. Z_H(x) = prod_{i in m} (x - hg^i) = x^m - h^m + // - v_i: A sequence of values, where v_0 = 1/(m * h^(m-1)), and v_{i + 1} = g * v_i + // + // We then compute L_{i,H}(tau) as `L_{i,H}(tau) = Z_H(tau) * v_i / (tau - h g^i)` + // + // However, if tau in H, both the numerator and denominator equal 0 + // when i corresponds to the value tau equals, and the coefficient is 0 everywhere else. + // We handle this case separately, and we can easily detect by checking if the vanishing poly is 0. + let size = self.size(); + // TODO: Make this use the vanishing polynomial + let z_h_at_tau = tau.pow(&[self.size]) - F::one(); + let domain_offset = F::one(); + if z_h_at_tau.is_zero() { + // In this case, we know that tau = hg^i, for some value i. + // Then i-th lagrange coefficient in this case is then simply 1, + // and all other lagrange coefficients are 0. + // Thus we find i by brute force. + let mut u = vec![F::zero(); size]; + let mut omega_i = domain_offset; + for u_i in u.iter_mut().take(size) { + if omega_i == tau { + *u_i = F::one(); + break; + } + omega_i *= &self.group_gen; + } + u + } else { + // In this case we have to compute `Z_H(tau) * v_i / (tau - h g^i)` + // for i in 0..size + // We actually compute this by computing (Z_H(tau) * v_i)^{-1} * (tau - h g^i) + // and then batch inverting to get the correct lagrange coefficients. + // We let `l_i = (Z_H(tau) * v_i)^-1` and `r_i = tau - h g^i` + // Notice that since Z_H(tau) is i-independent, + // and v_i = g * v_{i-1}, it follows that + // l_i = g^-1 * l_{i-1} + // TODO: consider caching the computation of l_i to save N multiplications + use ark_ff::fields::batch_inversion; + + // v_0_inv = m * h^(m-1) + let v_0_inv = F::from(self.size) * domain_offset.pow(&[self.size - 1]); + let mut l_i = z_h_at_tau.inverse().unwrap() * v_0_inv; + let mut negative_cur_elem = -domain_offset; + let mut lagrange_coefficients_inverse = vec![F::zero(); size]; + for i in 0..size { + let r_i = tau + negative_cur_elem; + lagrange_coefficients_inverse[i] = l_i * r_i; + // Increment l_i and negative_cur_elem + l_i *= &self.group_gen_inv; + negative_cur_elem *= &self.group_gen; + } + + // Invert the lagrange coefficients inverse, to get the actual coefficients, + // and return these + batch_inversion(lagrange_coefficients_inverse.as_mut_slice()); + lagrange_coefficients_inverse + } + } + + fn vanishing_polynomial(&self) -> crate::univariate::SparsePolynomial { + let coeffs = vec![(0, -F::one()), (self.size(), F::one())]; + crate::univariate::SparsePolynomial::from_coefficients_vec(coeffs) + } + + /// This evaluates the vanishing polynomial for this domain at tau. + /// For multiplicative subgroups, this polynomial is `z(X) = X^self.size - + /// 1`. + fn evaluate_vanishing_polynomial(&self, tau: F) -> F { + tau.pow(&[self.size]) - F::one() + } + + /// Returns the `i`-th element of the domain, where elements are ordered by + /// their power of the generator which they correspond to. + /// e.g. the `i`-th element is g^i + fn element(&self, i: usize) -> F { + // TODO: Consider precomputed exponentiation tables if we need this to be faster. + self.group_gen.pow(&[i as u64]) + } + + /// Return an iterator over the elements of the domain. + fn elements(&self) -> Elements { + Elements { + cur_elem: F::one(), + cur_pow: 0, + size: self.size, + group_gen: self.group_gen, + } + } +} + +#[cfg(test)] +mod tests { + use crate::domain::Vec; + use crate::polynomial::{univariate::*, Polynomial, UVPolynomial}; + use crate::{EvaluationDomain, Radix2EvaluationDomain}; + use ark_ff::{FftField, Field, One, UniformRand, Zero}; + use ark_std::rand::Rng; + use ark_std::test_rng; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn vanishing_polynomial_evaluation() { + let rng = &mut test_rng(); + for coeffs in 0..10 { + let domain = Radix2EvaluationDomain::::new(coeffs).unwrap(); + let z = domain.vanishing_polynomial(); + for _ in 0..100 { + let point: Fr = rng.gen(); + assert_eq!( + z.evaluate(&point), + domain.evaluate_vanishing_polynomial(point) + ) + } + } + } + + #[test] + fn vanishing_polynomial_vanishes_on_domain() { + for coeffs in 0..1000 { + let domain = Radix2EvaluationDomain::::new(coeffs).unwrap(); + let z = domain.vanishing_polynomial(); + for point in domain.elements() { + assert!(z.evaluate(&point).is_zero()) + } + } + } + + #[test] + fn size_of_elements() { + for coeffs in 1..10 { + let size = 1 << coeffs; + let domain = Radix2EvaluationDomain::::new(size).unwrap(); + let domain_size = domain.size(); + assert_eq!(domain_size, domain.elements().count()); + } + } + + #[test] + fn elements_contents() { + for coeffs in 1..10 { + let size = 1 << coeffs; + let domain = Radix2EvaluationDomain::::new(size).unwrap(); + for (i, element) in domain.elements().enumerate() { + assert_eq!(element, domain.group_gen.pow([i as u64])); + } + } + } + + /// Test that lagrange interpolation for a random polynomial at a random point works. + #[test] + fn non_systematic_lagrange_coefficients_test() { + for domain_dim in 1..10 { + let domain_size = 1 << domain_dim; + let domain = Radix2EvaluationDomain::::new(domain_size).unwrap(); + // Get random pt + lagrange coefficients + let rand_pt = Fr::rand(&mut test_rng()); + let lagrange_coeffs = domain.evaluate_all_lagrange_coefficients(rand_pt); + + // Sample the random polynomial, evaluate it over the domain and the random point. + let rand_poly = DensePolynomial::::rand(domain_size - 1, &mut test_rng()); + let poly_evals = domain.fft(rand_poly.coeffs()); + let actual_eval = rand_poly.evaluate(&rand_pt); + + // Do lagrange interpolation, and compare against the actual evaluation + let mut interpolated_eval = Fr::zero(); + for i in 0..domain_size { + interpolated_eval += lagrange_coeffs[i] * poly_evals[i]; + } + assert_eq!(actual_eval, interpolated_eval); + } + } + + /// Test that lagrange coefficients for a point in the domain is correct + #[test] + fn systematic_lagrange_coefficients_test() { + // This runs in time O(N^2) in the domain size, so keep the domain dimension low. + // We generate lagrange coefficients for each element in the domain. + for domain_dim in 1..5 { + let domain_size = 1 << domain_dim; + let domain = Radix2EvaluationDomain::::new(domain_size).unwrap(); + let all_domain_elements: Vec = domain.elements().collect(); + for i in 0..domain_size { + let lagrange_coeffs = + domain.evaluate_all_lagrange_coefficients(all_domain_elements[i]); + for j in 0..domain_size { + // Lagrange coefficient for the evaluation point, which should be 1 + if i == j { + assert_eq!(lagrange_coeffs[j], Fr::one()); + } else { + assert_eq!(lagrange_coeffs[j], Fr::zero()); + } + } + } + } + } + + #[test] + fn test_fft_correctness() { + // Tests that the ffts output the correct result. + // This assumes a correct polynomial evaluation at point procedure. + // It tests consistency of FFT/IFFT, and coset_fft/coset_ifft, + // along with testing that each individual evaluation is correct. + + // Runs in time O(degree^2) + let log_degree = 5; + let degree = 1 << log_degree; + let rand_poly = DensePolynomial::::rand(degree - 1, &mut test_rng()); + + for log_domain_size in log_degree..(log_degree + 2) { + let domain_size = 1 << log_domain_size; + let domain = Radix2EvaluationDomain::::new(domain_size).unwrap(); + let poly_evals = domain.fft(&rand_poly.coeffs); + let poly_coset_evals = domain.coset_fft(&rand_poly.coeffs); + for (i, x) in domain.elements().enumerate() { + let coset_x = Fr::multiplicative_generator() * x; + + assert_eq!(poly_evals[i], rand_poly.evaluate(&x)); + assert_eq!(poly_coset_evals[i], rand_poly.evaluate(&coset_x)); + } + + let rand_poly_from_subgroup = + DensePolynomial::from_coefficients_vec(domain.ifft(&poly_evals)); + let rand_poly_from_coset = + DensePolynomial::from_coefficients_vec(domain.coset_ifft(&poly_coset_evals)); + + assert_eq!( + rand_poly, rand_poly_from_subgroup, + "degree = {}, domain size = {}", + degree, domain_size + ); + assert_eq!( + rand_poly, rand_poly_from_coset, + "degree = {}, domain size = {}", + degree, domain_size + ); + } + } + + #[test] + fn test_roots_of_unity() { + // Tests that the roots of unity result is the same as domain.elements() + let max_degree = 10; + for log_domain_size in 0..max_degree { + let domain_size = 1 << log_domain_size; + let domain = Radix2EvaluationDomain::::new(domain_size).unwrap(); + let actual_roots = domain.roots_of_unity(domain.group_gen); + for &value in &actual_roots { + assert!(domain.evaluate_vanishing_polynomial(value).is_zero()); + } + let expected_roots_elements = domain.elements(); + for (expected, &actual) in expected_roots_elements.zip(&actual_roots) { + assert_eq!(expected, actual); + } + assert_eq!(actual_roots.len(), domain_size / 2); + } + } + + #[test] + #[cfg(feature = "parallel")] + fn parallel_fft_consistency() { + use ark_std::{test_rng, vec::Vec}; + use ark_test_curves::bls12_381::Fr; + + // This implements the Cooley-Turkey FFT, derived from libfqfft + // The libfqfft implementation uses pseudocode from [CLRS 2n Ed, pp. 864]. + fn serial_radix2_fft(a: &mut [Fr], omega: Fr, log_n: u32) { + use ark_std::convert::TryFrom; + let n = u32::try_from(a.len()) + .expect("cannot perform FFTs larger on vectors of len > (1 << 32)"); + assert_eq!(n, 1 << log_n); + + // swap coefficients in place + for k in 0..n { + let rk = crate::domain::utils::bitreverse(k, log_n); + if k < rk { + a.swap(rk as usize, k as usize); + } + } + + let mut m = 1; + for _i in 1..=log_n { + // w_m is 2^i-th root of unity + let w_m = omega.pow(&[(n / (2 * m)) as u64]); + + let mut k = 0; + while k < n { + // w = w_m^j at the start of every loop iteration + let mut w = Fr::one(); + for j in 0..m { + let mut t = a[(k + j + m) as usize]; + t *= w; + let mut tmp = a[(k + j) as usize]; + tmp -= t; + a[(k + j + m) as usize] = tmp; + a[(k + j) as usize] += t; + w *= &w_m; + } + + k += 2 * m; + } + + m *= 2; + } + } + + fn serial_radix2_ifft(a: &mut [Fr], omega: Fr, log_n: u32) { + serial_radix2_fft(a, omega.inverse().unwrap(), log_n); + let domain_size_inv = Fr::from(a.len() as u64).inverse().unwrap(); + for coeff in a.iter_mut() { + *coeff *= Fr::from(domain_size_inv); + } + } + + fn serial_radix2_coset_fft(a: &mut [Fr], omega: Fr, log_n: u32) { + let coset_shift = Fr::multiplicative_generator(); + let mut cur_pow = Fr::one(); + for coeff in a.iter_mut() { + *coeff *= cur_pow; + cur_pow *= coset_shift; + } + serial_radix2_fft(a, omega, log_n); + } + + fn serial_radix2_coset_ifft(a: &mut [Fr], omega: Fr, log_n: u32) { + serial_radix2_ifft(a, omega, log_n); + let coset_shift = Fr::multiplicative_generator().inverse().unwrap(); + let mut cur_pow = Fr::one(); + for coeff in a.iter_mut() { + *coeff *= cur_pow; + cur_pow *= coset_shift; + } + } + + fn test_consistency(rng: &mut R, max_coeffs: u32) { + for _ in 0..5 { + for log_d in 0..max_coeffs { + let d = 1 << log_d; + + let expected_poly = (0..d).map(|_| Fr::rand(rng)).collect::>(); + let mut expected_vec = expected_poly.clone(); + let mut actual_vec = expected_vec.clone(); + + let domain = Radix2EvaluationDomain::new(d).unwrap(); + + serial_radix2_fft(&mut expected_vec, domain.group_gen, log_d); + domain.fft_in_place(&mut actual_vec); + assert_eq!(expected_vec, actual_vec); + + serial_radix2_ifft(&mut expected_vec, domain.group_gen, log_d); + domain.ifft_in_place(&mut actual_vec); + assert_eq!(expected_vec, actual_vec); + assert_eq!(expected_vec, expected_poly); + + serial_radix2_coset_fft(&mut expected_vec, domain.group_gen, log_d); + domain.coset_fft_in_place(&mut actual_vec); + assert_eq!(expected_vec, actual_vec); + + serial_radix2_coset_ifft(&mut expected_vec, domain.group_gen, log_d); + domain.coset_ifft_in_place(&mut actual_vec); + assert_eq!(expected_vec, actual_vec); + } + } + } + + let rng = &mut test_rng(); + + test_consistency(rng, 10); + } +} diff --git a/arkworks/algebra/poly/src/domain/utils.rs b/arkworks/algebra/poly/src/domain/utils.rs new file mode 100644 index 00000000..3199a572 --- /dev/null +++ b/arkworks/algebra/poly/src/domain/utils.rs @@ -0,0 +1,207 @@ +use crate::domain::DomainCoeff; +use ark_ff::{FftField, Field}; +use ark_std::vec::Vec; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +// minimum size of a parallelized chunk +#[allow(unused)] +#[cfg(feature = "parallel")] +const MIN_PARALLEL_CHUNK_SIZE: usize = 1 << 7; + +#[inline] +pub(crate) fn bitreverse(mut n: u32, l: u32) -> u32 { + let mut r = 0; + for _ in 0..l { + r = (r << 1) | (n & 1); + n >>= 1; + } + r +} + +pub(crate) fn compute_powers_serial(size: usize, root: F) -> Vec { + compute_powers_and_mul_by_const_serial(size, root, F::one()) +} + +pub(crate) fn compute_powers_and_mul_by_const_serial( + size: usize, + root: F, + c: F, +) -> Vec { + let mut value = c; + (0..size) + .map(|_| { + let old_value = value; + value *= root; + old_value + }) + .collect() +} + +#[allow(unused)] +#[cfg(feature = "parallel")] +pub(crate) fn compute_powers(size: usize, g: F) -> Vec { + if size < MIN_PARALLEL_CHUNK_SIZE { + return compute_powers_serial(size, g); + } + // compute the number of threads we will be using. + use ark_std::cmp::{max, min}; + let num_cpus_available = rayon::current_num_threads(); + let num_elem_per_thread = max(size / num_cpus_available, MIN_PARALLEL_CHUNK_SIZE); + let num_cpus_used = size / num_elem_per_thread; + + // Split up the powers to compute across each thread evenly. + let res: Vec = (0..num_cpus_used) + .into_par_iter() + .flat_map(|i| { + let offset = g.pow(&[(i * num_elem_per_thread) as u64]); + // Compute the size that this chunks' output should be + // (num_elem_per_thread, unless there are less than num_elem_per_thread elements remaining) + let num_elements_to_compute = min(size - i * num_elem_per_thread, num_elem_per_thread); + let res = compute_powers_and_mul_by_const_serial(num_elements_to_compute, g, offset); + res + }) + .collect(); + res +} + +#[cfg(feature = "parallel")] +fn log2_floor(num: usize) -> u32 { + if num == 0 { + 0 + } else { + 1usize.leading_zeros() - num.leading_zeros() + } +} + +#[cfg(feature = "parallel")] +pub(crate) fn best_fft, F: FftField>( + a: &mut [T], + omega: F, + log_n: u32, + serial_fft: fn(&mut [T], F, u32), +) { + let num_cpus = rayon::current_num_threads(); + let log_cpus = log2_floor(num_cpus); + if log_n <= log_cpus { + serial_fft(a, omega, log_n); + } else { + parallel_fft(a, omega, log_n, log_cpus, serial_fft); + } +} + +#[cfg(not(feature = "parallel"))] +#[inline] +pub(crate) fn best_fft, F: FftField>( + a: &mut [T], + omega: F, + log_n: u32, + serial_fft: fn(&mut [T], F, u32), +) { + serial_fft(a, omega, log_n) +} + +#[cfg(feature = "parallel")] +pub(crate) fn parallel_fft, F: FftField>( + a: &mut [T], + omega: F, + log_n: u32, + log_cpus: u32, + serial_fft: fn(&mut [T], F, u32), +) { + assert!(log_n >= log_cpus); + // For documentation purposes, comments explain things + // as though `a` is a polynomial that we are trying to evaluate. + + // Partition `a` equally into the number of threads. + // each partition is then of size m / num_threads. + let m = a.len(); + let num_threads = 1 << (log_cpus as usize); + let num_cosets = num_threads; + assert_eq!(m % num_threads, 0); + let coset_size = m / num_threads; + + // We compute the FFT non-mutatively first in tmp first, + // and then shuffle it back into a. + // The evaluations are going to be arranged in cosets, each of size |a| / num_threads. + // so the first coset is (1, g^{num_cosets}, g^{2*num_cosets}, etc.) + // the second coset is (g, g^{1 + num_cosets}, g^{1 + 2*num_cosets}, etc.) + // These are cosets with generator g^{num_cosets}, and varying shifts. + let mut tmp = vec![vec![T::zero(); coset_size]; num_cosets]; + let new_omega = omega.pow(&[num_cosets as u64]); + let new_two_adicity = ark_ff::utils::k_adicity(2, coset_size); + + // For each coset, we first build a polynomial of degree |coset size|, + // whose evaluations over coset k will agree with the evaluations of a over the coset. + // Denote the kth such polynomial as poly_k + tmp.par_iter_mut() + .enumerate() + .for_each(|(k, kth_poly_coeffs)| { + // Shuffle into a sub-FFT + let omega_k = omega.pow(&[k as u64]); + let omega_step = omega.pow(&[(k * coset_size) as u64]); + + let mut elt = F::one(); + // Construct kth_poly_coeffs, which is a polynomial whose evaluations on this coset + // should equal the evaluations of a on this coset. + // `kth_poly_coeffs[i] = sum_{c in num_cosets} g^{k * (i + c * |coset|)} * a[i + c * |coset|]` + // Where c represents the index of the coset being considered. + // multiplying by g^{k*i} corresponds to the shift for just being in a different coset. + // + // TODO: Come back and improve the speed, and make this a more 'normal' Cooley-Tukey. + // This appears to be an FFT of the polynomial + // `P(x) = sum_{c in |coset|} a[i + c |coset|] * x^c` + // onto this coset. + // However this is being evaluated in time O(N) instead of time O(|coset|log(|coset|)). + // If this understanding is the case, its not doing standard Cooley-Tukey. + // At the moment, this has time complexity of at least 2*N field mul's per thread, + // so we will be getting pretty bad parallelism. + // Exact complexity per thread atm is `2N + (N/num threads)log(N/num threads)` field muls + // Compare to the time complexity of serial is Nlog(N) field muls), with log(N) in [15, 25] + for i in 0..coset_size { + for c in 0..num_threads { + let idx = i + (c * coset_size); + // t = the value of a corresponding to the ith element of the sth coset. + let mut t = a[idx]; + // elt = g^{k * idx} + t *= elt; + kth_poly_coeffs[i] += t; + elt *= &omega_step; + } + elt *= &omega_k; + } + + // Perform sub-FFT + // Since the sub-FFT is mutative, after this point + // `kth_poly_coeffs` should be renamed `kth_coset_evals` + serial_fft(kth_poly_coeffs, new_omega, new_two_adicity); + }); + + // shuffle the values computed above into a + // The evaluations of a should be ordered as (1, g, g^2, ...) + a.iter_mut() + .enumerate() + .for_each(|(i, a)| *a = tmp[i % num_cosets][i / num_cosets]); +} + +/// An iterator over the elements of a domain. +pub struct Elements { + pub(crate) cur_elem: F, + pub(crate) cur_pow: u64, + pub(crate) size: u64, + pub(crate) group_gen: F, +} + +impl Iterator for Elements { + type Item = F; + fn next(&mut self) -> Option { + if self.cur_pow == self.size { + None + } else { + let cur_elem = self.cur_elem; + self.cur_elem *= &self.group_gen; + self.cur_pow += 1; + Some(cur_elem) + } + } +} diff --git a/arkworks/algebra/poly/src/evaluations/mod.rs b/arkworks/algebra/poly/src/evaluations/mod.rs new file mode 100644 index 00000000..4d645338 --- /dev/null +++ b/arkworks/algebra/poly/src/evaluations/mod.rs @@ -0,0 +1,2 @@ +pub mod multivariate; +pub mod univariate; diff --git a/arkworks/algebra/poly/src/evaluations/multivariate/mod.rs b/arkworks/algebra/poly/src/evaluations/multivariate/mod.rs new file mode 100644 index 00000000..7e411e29 --- /dev/null +++ b/arkworks/algebra/poly/src/evaluations/multivariate/mod.rs @@ -0,0 +1,4 @@ +pub mod multilinear; +pub use multilinear::{ + DenseMultilinearExtension, MultilinearExtension, SparseMultilinearExtension, +}; diff --git a/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/dense.rs b/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/dense.rs new file mode 100644 index 00000000..2baf1372 --- /dev/null +++ b/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/dense.rs @@ -0,0 +1,393 @@ +//! Multilinear polynomial represented in dense evaluation form. + +use crate::evaluations::multivariate::multilinear::{swap_bits, MultilinearExtension}; +use ark_ff::{Field, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_std::fmt; +use ark_std::fmt::Formatter; +use ark_std::ops::{Add, AddAssign, Index, Neg, Sub, SubAssign}; +use ark_std::rand::Rng; +use ark_std::slice::{Iter, IterMut}; +use ark_std::vec::Vec; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Stores a multilinear polynomial in dense evaluation form. +#[derive(Clone, PartialEq, Eq, Hash, Default, CanonicalSerialize, CanonicalDeserialize)] +pub struct DenseMultilinearExtension { + /// The evaluation over {0,1}^`num_vars` + pub evaluations: Vec, + /// Number of variables + pub num_vars: usize, +} + +impl DenseMultilinearExtension { + /// Construct a new polynomial from a list of evaluations where the index + /// represents a point in {0,1}^`num_vars` in little endian form. For example, `0b1011` represents `P(1,1,0,1)` + pub fn from_evaluations_slice(num_vars: usize, evaluations: &[F]) -> Self { + Self::from_evaluations_vec(num_vars, evaluations.to_vec()) + } + + /// Construct a new polynomial from a list of evaluations where the index + /// represents a point in {0,1}^`num_vars` in little endian form. For example, `0b1011` represents `P(1,1,0,1)` + pub fn from_evaluations_vec(num_vars: usize, evaluations: Vec) -> Self { + // assert that the number of variables matches the size of evaluations + assert_eq!( + evaluations.len(), + 1 << num_vars, + "The size of evaluations should be 2^num_vars." + ); + + Self { + num_vars, + evaluations, + } + } + /// Relabel the point inplace by switching `k` scalars from position `a` to position `b`, and from position `b` to position `a` in vector. + /// + /// This function turns `P(x_1,...,x_a,...,x_{a+k - 1},...,x_b,...,x_{b+k - 1},...,x_n)` + /// to `P(x_1,...,x_b,...,x_{b+k - 1},...,x_a,...,x_{a+k - 1},...,x_n)` + pub fn relabel_inplace(&mut self, mut a: usize, mut b: usize, k: usize) { + // enforce order of a and b + if a > b { + ark_std::mem::swap(&mut a, &mut b); + } + assert!( + a + k < self.num_vars && b + k < self.num_vars, + "invalid relabel argument" + ); + if a == b || k == 0 { + return; + } + assert!(a + k <= b, "overlapped swap window is not allowed"); + for i in 0..self.evaluations.len() { + let j = swap_bits(i, a, b, k); + if i < j { + self.evaluations.swap(i, j); + } + } + } + + /// Returns an iterator that iterates over the evaluations over {0,1}^`num_vars` + pub fn iter(&self) -> Iter<'_, F> { + self.evaluations.iter() + } + + /// Returns a mutable iterator that iterates over the evaluations over {0,1}^`num_vars` + pub fn iter_mut(&mut self) -> IterMut<'_, F> { + self.evaluations.iter_mut() + } +} + +impl MultilinearExtension for DenseMultilinearExtension { + fn num_vars(&self) -> usize { + self.num_vars + } + + fn evaluate(&self, point: &[F]) -> Option { + if point.len() == self.num_vars { + Some(self.fix_variables(point)[0]) + } else { + None + } + } + + fn rand(num_vars: usize, rng: &mut R) -> Self { + Self::from_evaluations_vec( + num_vars, + (0..(1 << num_vars)).map(|_| F::rand(rng)).collect(), + ) + } + + fn relabel(&self, a: usize, b: usize, k: usize) -> Self { + let mut copied = self.clone(); + copied.relabel_inplace(a, b, k); + copied + } + + fn fix_variables(&self, partial_point: &[F]) -> Self { + assert!( + partial_point.len() <= self.num_vars, + "invalid size of partial point" + ); + let mut poly = self.evaluations.to_vec(); + let nv = self.num_vars; + let dim = partial_point.len(); + // evaluate single variable of partial point from left to right + for i in 1..dim + 1 { + let r = partial_point[i - 1]; + for b in 0..(1 << (nv - i)) { + poly[b] = poly[b << 1] * (F::one() - r) + poly[(b << 1) + 1] * r; + } + } + Self::from_evaluations_slice(nv - dim, &poly[..(1 << (nv - dim))]) + } + + fn to_evaluations(&self) -> Vec { + self.evaluations.to_vec() + } +} + +impl Index for DenseMultilinearExtension { + type Output = F; + + /// Returns the evaluation of the polynomial at a point represented by index. + /// + /// Index represents a vector in {0,1}^`num_vars` in little endian form. For example, `0b1011` represents `P(1,1,0,1)` + /// + /// For dense multilinear polynomial, `index` takes constant time. + fn index(&self, index: usize) -> &Self::Output { + &self.evaluations[index] + } +} + +impl Add for DenseMultilinearExtension { + type Output = DenseMultilinearExtension; + + fn add(self, other: DenseMultilinearExtension) -> Self { + &self + &other + } +} + +impl<'a, 'b, F: Field> Add<&'a DenseMultilinearExtension> for &'b DenseMultilinearExtension { + type Output = DenseMultilinearExtension; + + fn add(self, rhs: &'a DenseMultilinearExtension) -> Self::Output { + // handle constant zero case + if rhs.is_zero() { + return self.clone(); + } + if self.is_zero() { + return rhs.clone(); + } + assert_eq!(self.num_vars, rhs.num_vars); + let result: Vec = cfg_iter!(self.evaluations) + .zip(cfg_iter!(rhs.evaluations)) + .map(|(a, b)| *a + *b) + .collect(); + + Self::Output::from_evaluations_vec(self.num_vars, result) + } +} + +impl AddAssign for DenseMultilinearExtension { + fn add_assign(&mut self, other: Self) { + *self = &*self + &other; + } +} + +impl<'a, 'b, F: Field> AddAssign<&'a DenseMultilinearExtension> + for DenseMultilinearExtension +{ + fn add_assign(&mut self, other: &'a DenseMultilinearExtension) { + *self = &*self + other; + } +} + +impl<'a, 'b, F: Field> AddAssign<(F, &'a DenseMultilinearExtension)> + for DenseMultilinearExtension +{ + fn add_assign(&mut self, (f, other): (F, &'a DenseMultilinearExtension)) { + let other = Self { + num_vars: other.num_vars, + evaluations: cfg_iter!(other.evaluations).map(|x| f * x).collect(), + }; + *self = &*self + &other; + } +} + +impl Neg for DenseMultilinearExtension { + type Output = DenseMultilinearExtension; + + fn neg(self) -> Self::Output { + Self::Output { + num_vars: self.num_vars, + evaluations: cfg_iter!(self.evaluations).map(|x| -*x).collect(), + } + } +} + +impl Sub for DenseMultilinearExtension { + type Output = DenseMultilinearExtension; + + fn sub(self, other: DenseMultilinearExtension) -> Self { + &self - &other + } +} + +impl<'a, 'b, F: Field> Sub<&'a DenseMultilinearExtension> for &'b DenseMultilinearExtension { + type Output = DenseMultilinearExtension; + + fn sub(self, rhs: &'a DenseMultilinearExtension) -> Self::Output { + self + &rhs.clone().neg() + } +} + +impl SubAssign for DenseMultilinearExtension { + fn sub_assign(&mut self, other: Self) { + *self = &*self - &other; + } +} + +impl<'a, 'b, F: Field> SubAssign<&'a DenseMultilinearExtension> + for DenseMultilinearExtension +{ + fn sub_assign(&mut self, other: &'a DenseMultilinearExtension) { + *self = &*self - other; + } +} + +impl fmt::Debug for DenseMultilinearExtension { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "DenseML(nv = {}, evaluations = [", self.num_vars)?; + for i in 0..ark_std::cmp::min(4, self.evaluations.len()) { + write!(f, "{:?} ", self.evaluations[i])?; + } + if self.evaluations.len() < 4 { + write!(f, "])")?; + } else { + write!(f, "...])")?; + } + Ok(()) + } +} + +impl Zero for DenseMultilinearExtension { + fn zero() -> Self { + Self { + num_vars: 0, + evaluations: vec![F::zero()], + } + } + + fn is_zero(&self) -> bool { + self.num_vars == 0 && self.evaluations[0].is_zero() + } +} + +#[cfg(test)] +mod tests { + use crate::DenseMultilinearExtension; + use crate::MultilinearExtension; + use ark_ff::{Field, Zero}; + use ark_std::ops::Neg; + use ark_std::vec::Vec; + use ark_std::{test_rng, UniformRand}; + use ark_test_curves::bls12_381::Fr; + + /// utility: evaluate multilinear extension (in form of data array) at a random point + fn evaluate_data_array(data: &[F], point: &[F]) -> F { + if data.len() != (1 << point.len()) { + panic!("Data size mismatch with number of variables. ") + } + + let nv = point.len(); + let mut a = data.to_vec(); + + for i in 1..nv + 1 { + let r = point[i - 1]; + for b in 0..(1 << (nv - i)) { + a[b] = a[b << 1] * (F::one() - r) + a[(b << 1) + 1] * r; + } + } + a[0] + } + + #[test] + fn evaluate_at_a_point() { + let mut rng = test_rng(); + let poly = DenseMultilinearExtension::rand(10, &mut rng); + for _ in 0..10 { + let point: Vec<_> = (0..10).map(|_| Fr::rand(&mut rng)).collect(); + assert_eq!( + evaluate_data_array(&poly.evaluations, &point), + poly.evaluate(&point).unwrap() + ) + } + } + + #[test] + fn relabel_polynomial() { + let mut rng = test_rng(); + for _ in 0..20 { + let mut poly = DenseMultilinearExtension::rand(10, &mut rng); + let mut point: Vec<_> = (0..10).map(|_| Fr::rand(&mut rng)).collect(); + + let expected = poly.evaluate(&point); + + poly.relabel_inplace(2, 2, 1); // should have no effect + assert_eq!(expected, poly.evaluate(&point)); + + poly.relabel_inplace(3, 4, 1); // should switch 3 and 4 + point.swap(3, 4); + assert_eq!(expected, poly.evaluate(&point)); + + poly.relabel_inplace(7, 5, 1); + point.swap(7, 5); + assert_eq!(expected, poly.evaluate(&point)); + + poly.relabel_inplace(2, 5, 3); + point.swap(2, 5); + point.swap(3, 6); + point.swap(4, 7); + assert_eq!(expected, poly.evaluate(&point)); + + poly.relabel_inplace(7, 0, 2); + point.swap(0, 7); + point.swap(1, 8); + assert_eq!(expected, poly.evaluate(&point)); + } + } + + #[test] + fn arithmetic() { + const NV: usize = 10; + let mut rng = test_rng(); + for _ in 0..20 { + let point: Vec<_> = (0..NV).map(|_| Fr::rand(&mut rng)).collect(); + let poly1 = DenseMultilinearExtension::rand(NV, &mut rng); + let poly2 = DenseMultilinearExtension::rand(NV, &mut rng); + let v1 = poly1.evaluate(&point).unwrap(); + let v2 = poly2.evaluate(&point).unwrap(); + // test add + assert_eq!((&poly1 + &poly2).evaluate(&point).unwrap(), v1 + v2); + // test sub + assert_eq!((&poly1 - &poly2).evaluate(&point).unwrap(), v1 - v2); + // test negate + assert_eq!(poly1.clone().neg().evaluate(&point).unwrap(), -v1); + // test add assign + { + let mut poly1 = poly1.clone(); + poly1 += &poly2; + assert_eq!(poly1.evaluate(&point).unwrap(), v1 + v2) + } + // test sub assign + { + let mut poly1 = poly1.clone(); + poly1 -= &poly2; + assert_eq!(poly1.evaluate(&point).unwrap(), v1 - v2) + } + // test add assign with scalar + { + let mut poly1 = poly1.clone(); + let scalar = Fr::rand(&mut rng); + poly1 += (scalar, &poly2); + assert_eq!(poly1.evaluate(&point).unwrap(), v1 + scalar * v2) + } + // test additive identity + { + assert_eq!(&poly1 + &DenseMultilinearExtension::zero(), poly1); + assert_eq!(&DenseMultilinearExtension::zero() + &poly1, poly1); + { + let mut poly1_cloned = poly1.clone(); + poly1_cloned += &DenseMultilinearExtension::zero(); + assert_eq!(&poly1_cloned, &poly1); + let mut zero = DenseMultilinearExtension::zero(); + let scalar = Fr::rand(&mut rng); + zero += (scalar, &poly1); + assert_eq!(zero.evaluate(&point).unwrap(), scalar * v1); + } + } + } + } +} diff --git a/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/mod.rs b/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/mod.rs new file mode 100644 index 00000000..2f355007 --- /dev/null +++ b/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/mod.rs @@ -0,0 +1,71 @@ +mod dense; +mod sparse; + +pub use dense::DenseMultilinearExtension; +pub use sparse::SparseMultilinearExtension; + +use ark_std::fmt::Debug; +use ark_std::hash::Hash; +use ark_std::ops::{Add, AddAssign, Index, Neg, SubAssign}; +use ark_std::vec::Vec; + +use ark_ff::{Field, Zero}; + +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::rand::Rng; + +/// This trait describes an interface for the multilinear extension +/// of an array. +/// The latter is a multilinear polynomial represented in terms of its evaluations over +/// the domain {0,1}^`num_vars` (i.e. the Boolean hypercube). +/// +/// Index represents a point, which is a vector in {0,1}^`num_vars` in little endian form. For example, `0b1011` represents `P(1,1,0,1)` +pub trait MultilinearExtension: + Sized + + Clone + + Debug + + Hash + + PartialEq + + Eq + + Add + + Neg + + Zero + + CanonicalSerialize + + CanonicalDeserialize + + for<'a> AddAssign<&'a Self> + + for<'a> AddAssign<(F, &'a Self)> + + for<'a> SubAssign<&'a Self> + + Index +{ + /// Returns the number of variables in `self` + fn num_vars(&self) -> usize; + + /// Evaluates `self` at the given the vector `point` in slice. + /// If the number of variables does not match, return `None`. + fn evaluate(&self, point: &[F]) -> Option; + + /// Outputs an `l`-variate multilinear extension where value of evaluations are sampled uniformly at random. + fn rand(num_vars: usize, rng: &mut R) -> Self; + + /// Relabel the point by swapping `k` scalars from positions `a..a+k` to positions `b..b+k`, + /// and from position `b..b+k` to position `a..a+k` in vector. + /// + /// This function turns `P(x_1,...,x_a,...,x_{a+k - 1},...,x_b,...,x_{b+k - 1},...,x_n)` + /// to `P(x_1,...,x_b,...,x_{b+k - 1},...,x_a,...,x_{a+k - 1},...,x_n)` + fn relabel(&self, a: usize, b: usize, k: usize) -> Self; + + /// Reduce the number of variables of `self` by fixing the `partial_point.len()` variables at `partial_point`. + fn fix_variables(&self, partial_point: &[F]) -> Self; + + /// Returns a list of evaluations over the domain, which is the boolean hypercube. + fn to_evaluations(&self) -> Vec; +} + +/// swap the bits of `x` from position `a..a+n` to `b..b+n` and from `b..b+n` to `a..a+n` in little endian order +pub(crate) fn swap_bits(x: usize, a: usize, b: usize, n: usize) -> usize { + let a_bits = (x >> a) & ((1usize << n) - 1); + let b_bits = (x >> b) & ((1usize << n) - 1); + let local_xor_mask = a_bits ^ b_bits; + let global_xor_mask = (local_xor_mask << a) | (local_xor_mask << b); + x ^ global_xor_mask +} diff --git a/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/sparse.rs b/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/sparse.rs new file mode 100644 index 00000000..659fc65a --- /dev/null +++ b/arkworks/algebra/poly/src/evaluations/multivariate/multilinear/sparse.rs @@ -0,0 +1,591 @@ +//! multilinear polynomial represented in sparse evaluation form. + +use crate::evaluations::multivariate::multilinear::swap_bits; +use crate::{DenseMultilinearExtension, MultilinearExtension}; +use ark_ff::{Field, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_std::collections::BTreeMap; +use ark_std::fmt::{Debug, Formatter}; +use ark_std::iter::FromIterator; +use ark_std::ops::{Add, AddAssign, Index, Neg, Sub, SubAssign}; +use ark_std::rand::Rng; +use ark_std::vec::Vec; +use ark_std::{fmt, UniformRand}; +use hashbrown::HashMap; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Stores a multilinear polynomial in sparse evaluation form. +#[derive(Clone, PartialEq, Eq, Hash, Default, CanonicalSerialize, CanonicalDeserialize)] +pub struct SparseMultilinearExtension { + /// tuples of index and value + pub evaluations: BTreeMap, + /// number of variables + pub num_vars: usize, + zero: F, +} + +impl SparseMultilinearExtension { + pub fn from_evaluations<'a>( + num_vars: usize, + evaluations: impl IntoIterator, + ) -> Self { + let bit_mask = 1 << num_vars; + // check + let evaluations = evaluations.into_iter(); + let evaluations: Vec<_> = evaluations + .map(|(i, v): &(usize, F)| { + assert!(*i < bit_mask, "index out of range"); + (*i, *v) + }) + .collect(); + + Self { + evaluations: tuples_to_treemap(&evaluations), + num_vars, + zero: F::zero(), + } + } + + /// Outputs an `l`-variate multilinear extension where value of evaluations are sampled uniformly at random. + /// The number of nonzero entries is `num_nonzero_entries` and indices of those nonzero entries are distributed uniformly at random. + /// + /// Note that this function uses rejection sampling. As number of nonzero entries approach `2 ^ num_vars`, + /// sampling will be very slow due to large number of collisions. + pub fn rand_with_config( + num_vars: usize, + num_nonzero_entries: usize, + rng: &mut R, + ) -> Self { + assert!(num_nonzero_entries <= (1 << num_vars)); + + let mut map = HashMap::new(); + for _ in 0..num_nonzero_entries { + let mut index = usize::rand(rng) & ((1 << num_vars) - 1); + while let Some(_) = map.get(&index) { + index = usize::rand(rng) & ((1 << num_vars) - 1); + } + map.entry(index).or_insert(F::rand(rng)); + } + let mut buf = Vec::new(); + for (arg, v) in map.iter() { + if *v != F::zero() { + buf.push((*arg, *v)); + } + } + let evaluations = hashmap_to_treemap(&map); + Self { + num_vars, + evaluations, + zero: F::zero(), + } + } + + /// Convert the sparse multilinear polynomial to dense form. + pub fn to_dense_multilinear_extension(&self) -> DenseMultilinearExtension { + let mut evaluations: Vec<_> = (0..(1 << self.num_vars)).map(|_| F::zero()).collect(); + for (&i, &v) in self.evaluations.iter() { + evaluations[i] = v; + } + DenseMultilinearExtension::from_evaluations_vec(self.num_vars, evaluations) + } +} + +/// utility: precompute f(x) = eq(g,x) +fn precompute_eq(g: &[F]) -> Vec { + let dim = g.len(); + let mut dp = Vec::with_capacity(1 << dim); + dp.resize(1 << dim, F::zero()); + dp[0] = F::one() - g[0]; + dp[1] = g[0]; + for i in 1..dim { + let dp_prev = (&dp[0..(1 << i)]).to_vec(); + for b in 0..(1 << i) { + dp[b] = dp_prev[b] * (F::one() - g[i]); + dp[b + (1 << i)] = dp_prev[b] * g[i]; + } + } + dp +} + +impl MultilinearExtension for SparseMultilinearExtension { + fn num_vars(&self) -> usize { + self.num_vars + } + + fn evaluate(&self, point: &[F]) -> Option { + if point.len() == self.num_vars { + Some(self.fix_variables(&point)[0]) + } else { + None + } + } + + /// Outputs an `l`-variate multilinear extension where value of evaluations are sampled uniformly at random. + /// The number of nonzero entries is `sqrt(2^num_vars)` and indices of those nonzero entries are distributed uniformly at random. + fn rand(num_vars: usize, rng: &mut R) -> Self { + Self::rand_with_config(num_vars, 1 << (num_vars / 2), rng) + } + + fn relabel(&self, mut a: usize, mut b: usize, k: usize) -> Self { + if a > b { + // swap + let t = a; + a = b; + b = t; + } + // sanity check + assert!( + a + k < self.num_vars && b + k < self.num_vars, + "invalid relabel argument" + ); + if a == b || k == 0 { + return self.clone(); + } + assert!(a + k <= b, "overlapped swap window is not allowed"); + let ev: Vec<_> = cfg_iter!(self.evaluations) + .map(|(&i, &v)| (swap_bits(i, a, b, k), v)) + .collect(); + Self { + num_vars: self.num_vars, + evaluations: tuples_to_treemap(&ev), + zero: F::zero(), + } + } + + fn fix_variables(&self, partial_point: &[F]) -> Self { + let dim = partial_point.len(); + assert!(dim <= self.num_vars, "invalid partial point dimension"); + + let window = ark_std::log2(self.evaluations.len()) as usize; + let mut point = partial_point; + let mut last = treemap_to_hashmap(&self.evaluations); + + // batch evaluation + while !point.is_empty() { + let focus_length = if window > 0 && point.len() > window { + window + } else { + point.len() + }; + let focus = &point[..focus_length]; + point = &point[focus_length..]; + let pre = precompute_eq(focus); + let dim = focus.len(); + let mut result = HashMap::new(); + for src_entry in last.iter() { + let old_idx = *src_entry.0; + let gz = pre[old_idx & ((1 << dim) - 1)]; + let new_idx = old_idx >> dim; + let dst_entry = result.entry(new_idx).or_insert(F::zero()); + *dst_entry += gz * src_entry.1; + } + last = result; + } + let evaluations = hashmap_to_treemap(&last); + Self { + num_vars: self.num_vars - dim, + evaluations, + zero: F::zero(), + } + } + + fn to_evaluations(&self) -> Vec { + let mut evaluations: Vec<_> = (0..1 << self.num_vars).map(|_| F::zero()).collect(); + self.evaluations + .iter() + .map(|(&i, &v)| evaluations[i] = v) + .last(); + evaluations + } +} + +impl Index for SparseMultilinearExtension { + type Output = F; + + /// Returns the evaluation of the polynomial at a point represented by index. + /// + /// Index represents a vector in {0,1}^`num_vars` in little endian form. For example, `0b1011` represents `P(1,1,0,1)` + /// + /// For Sparse multilinear polynomial, Lookup_evaluation takes log time to the size of polynomial. + fn index(&self, index: usize) -> &Self::Output { + if let Some(v) = self.evaluations.get(&index) { + v + } else { + &self.zero + } + } +} + +impl Add for SparseMultilinearExtension { + type Output = SparseMultilinearExtension; + + fn add(self, other: SparseMultilinearExtension) -> Self { + &self + &other + } +} + +impl<'a, 'b, F: Field> Add<&'a SparseMultilinearExtension> + for &'b SparseMultilinearExtension +{ + type Output = SparseMultilinearExtension; + + fn add(self, rhs: &'a SparseMultilinearExtension) -> Self::Output { + // handle zero case + if self.is_zero() { + return rhs.clone(); + } + if rhs.is_zero() { + return self.clone(); + } + + assert_eq!( + rhs.num_vars, self.num_vars, + "trying to add non-zero polynomial with different number of variables" + ); + // simply merge the evaluations + let mut evaluations = HashMap::new(); + for (&i, &v) in self.evaluations.iter().chain(rhs.evaluations.iter()) { + *(evaluations.entry(i).or_insert(F::zero())) += v; + } + let evaluations: Vec<_> = evaluations + .into_iter() + .filter(|(_, v)| !v.is_zero()) + .collect(); + + Self::Output { + evaluations: tuples_to_treemap(&evaluations), + num_vars: self.num_vars, + zero: F::zero(), + } + } +} + +impl AddAssign for SparseMultilinearExtension { + fn add_assign(&mut self, other: Self) { + *self = &*self + &other; + } +} + +impl<'a, 'b, F: Field> AddAssign<&'a SparseMultilinearExtension> + for SparseMultilinearExtension +{ + fn add_assign(&mut self, other: &'a SparseMultilinearExtension) { + *self = &*self + other; + } +} + +impl<'a, 'b, F: Field> AddAssign<(F, &'a SparseMultilinearExtension)> + for SparseMultilinearExtension +{ + fn add_assign(&mut self, (f, other): (F, &'a SparseMultilinearExtension)) { + if !self.is_zero() && !other.is_zero() { + assert_eq!( + other.num_vars, self.num_vars, + "trying to add non-zero polynomial with different number of variables" + ); + } + let ev: Vec<_> = cfg_iter!(other.evaluations) + .map(|(i, v)| (*i, f * v)) + .collect(); + let other = Self { + num_vars: other.num_vars, + evaluations: tuples_to_treemap(&ev), + zero: F::zero(), + }; + *self += &other; + } +} + +impl Neg for SparseMultilinearExtension { + type Output = SparseMultilinearExtension; + + fn neg(self) -> Self::Output { + let ev: Vec<_> = cfg_iter!(self.evaluations) + .map(|(i, v)| (*i, -*v)) + .collect(); + Self::Output { + num_vars: self.num_vars, + evaluations: tuples_to_treemap(&ev), + zero: F::zero(), + } + } +} + +impl Sub for SparseMultilinearExtension { + type Output = SparseMultilinearExtension; + + fn sub(self, other: SparseMultilinearExtension) -> Self { + &self - &other + } +} + +impl<'a, 'b, F: Field> Sub<&'a SparseMultilinearExtension> + for &'b SparseMultilinearExtension +{ + type Output = SparseMultilinearExtension; + + fn sub(self, rhs: &'a SparseMultilinearExtension) -> Self::Output { + self + &rhs.clone().neg() + } +} + +impl SubAssign for SparseMultilinearExtension { + fn sub_assign(&mut self, other: Self) { + *self = &*self - &other; + } +} + +impl<'a, 'b, F: Field> SubAssign<&'a SparseMultilinearExtension> + for SparseMultilinearExtension +{ + fn sub_assign(&mut self, other: &'a SparseMultilinearExtension) { + *self = &*self - other; + } +} + +impl Zero for SparseMultilinearExtension { + fn zero() -> Self { + Self { + num_vars: 0, + evaluations: tuples_to_treemap(&Vec::new()), + zero: F::zero(), + } + } + + fn is_zero(&self) -> bool { + self.num_vars == 0 && self.evaluations.is_empty() + } +} + +impl Debug for SparseMultilinearExtension { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!( + f, + "SparseMultilinearPolynomial(num_vars = {}, evaluations = [", + self.num_vars + )?; + let mut ev_iter = self.evaluations.iter(); + for _ in 0..ark_std::cmp::min(8, self.evaluations.len()) { + write!(f, "{:?}", ev_iter.next())?; + } + if self.evaluations.len() > 8 { + write!(f, "...")?; + } + write!(f, "])")?; + Ok(()) + } +} + +/// Utility: Convert tuples to hashmap. +fn tuples_to_treemap(tuples: &[(usize, F)]) -> BTreeMap { + BTreeMap::from_iter(tuples.iter().map(|(i, v)| (*i, *v))) +} + +fn treemap_to_hashmap(map: &BTreeMap) -> HashMap { + HashMap::from_iter(map.iter().map(|(i, v)| (*i, *v))) +} + +fn hashmap_to_treemap(map: &HashMap) -> BTreeMap { + BTreeMap::from_iter(map.iter().map(|(i, v)| (*i, *v))) +} + +#[cfg(test)] +mod tests { + use crate::evaluations::multivariate::multilinear::MultilinearExtension; + use crate::SparseMultilinearExtension; + use ark_ff::{One, Zero}; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + use ark_std::ops::Neg; + use ark_std::vec::Vec; + use ark_std::{test_rng, UniformRand}; + use ark_test_curves::bls12_381::Fr; + /// Some sanity test to ensure random sparse polynomial make sense. + #[test] + fn random_poly() { + const NV: usize = 16; + + let mut rng = test_rng(); + // two random poly should be different + let poly1 = SparseMultilinearExtension::::rand(NV, &mut rng); + let poly2 = SparseMultilinearExtension::::rand(NV, &mut rng); + assert_ne!(poly1, poly2); + // test sparsity + assert!( + ((1 << (NV / 2)) >> 1) <= poly1.evaluations.len() + && poly1.evaluations.len() <= ((1 << (NV / 2)) << 1), + "polynomial size out of range: expected: [{},{}] ,actual: {}", + ((1 << (NV / 2)) >> 1), + ((1 << (NV / 2)) << 1), + poly1.evaluations.len() + ); + } + + #[test] + /// Test if sparse multilinear polynomial evaluates correctly. + /// This function assumes dense multilinear polynomial functions correctly. + fn evaluate() { + const NV: usize = 12; + let mut rng = test_rng(); + for _ in 0..20 { + let sparse = SparseMultilinearExtension::::rand(NV, &mut rng); + let dense = sparse.to_dense_multilinear_extension(); + let point: Vec<_> = (0..NV).map(|_| Fr::rand(&mut rng)).collect(); + assert_eq!(sparse.evaluate(&point), dense.evaluate(&point)); + let sparse_partial = sparse.fix_variables(&point[..3].to_vec()); + let dense_partial = dense.fix_variables(&point[..3].to_vec()); + let point2: Vec<_> = (0..(NV - 3)).map(|_| Fr::rand(&mut rng)).collect(); + assert_eq!( + sparse_partial.evaluate(&point2), + dense_partial.evaluate(&point2) + ); + } + } + + #[test] + fn evaluate_edge_cases() { + // test constant polynomial + let mut rng = test_rng(); + let ev1 = Fr::rand(&mut rng); + let poly1 = SparseMultilinearExtension::from_evaluations(0, &vec![(0, ev1)]); + assert_eq!(poly1.evaluate(&vec![]).unwrap(), ev1); + + // test single-variate polynomial + let ev2 = vec![Fr::rand(&mut rng), Fr::rand(&mut rng)]; + let poly2 = + SparseMultilinearExtension::from_evaluations(1, &vec![(0, ev2[0]), (1, ev2[1])]); + + let x = Fr::rand(&mut rng); + assert_eq!( + poly2.evaluate(&vec![x]).unwrap(), + x * ev2[1] + (Fr::one() - x) * ev2[0] + ); + + // test single-variate polynomial with one entry missing + let ev3 = Fr::rand(&mut rng); + let poly2 = SparseMultilinearExtension::from_evaluations(1, &vec![(1, ev3)]); + + let x = Fr::rand(&mut rng); + assert_eq!(poly2.evaluate(&vec![x]).unwrap(), x * ev3); + } + + #[test] + fn index() { + let mut rng = test_rng(); + let points = vec![ + (11, Fr::rand(&mut rng)), + (117, Fr::rand(&mut rng)), + (213, Fr::rand(&mut rng)), + (255, Fr::rand(&mut rng)), + ]; + let poly = SparseMultilinearExtension::from_evaluations(8, &points); + points + .into_iter() + .map(|(i, v)| assert_eq!(poly[i], v)) + .last(); + assert_eq!(poly[0], Fr::zero()); + assert_eq!(poly[1], Fr::zero()); + } + + #[test] + fn arithmetic() { + const NV: usize = 18; + let mut rng = test_rng(); + for _ in 0..20 { + let point: Vec<_> = (0..NV).map(|_| Fr::rand(&mut rng)).collect(); + let poly1 = SparseMultilinearExtension::rand(NV, &mut rng); + let poly2 = SparseMultilinearExtension::rand(NV, &mut rng); + let v1 = poly1.evaluate(&point).unwrap(); + let v2 = poly2.evaluate(&point).unwrap(); + // test add + assert_eq!((&poly1 + &poly2).evaluate(&point).unwrap(), v1 + v2); + // test sub + assert_eq!((&poly1 - &poly2).evaluate(&point).unwrap(), v1 - v2); + // test negate + assert_eq!(poly1.clone().neg().evaluate(&point).unwrap(), -v1); + // test add assign + { + let mut poly1 = poly1.clone(); + poly1 += &poly2; + assert_eq!(poly1.evaluate(&point).unwrap(), v1 + v2) + } + // test sub assign + { + let mut poly1 = poly1.clone(); + poly1 -= &poly2; + assert_eq!(poly1.evaluate(&point).unwrap(), v1 - v2) + } + // test add assign with scalar + { + let mut poly1 = poly1.clone(); + let scalar = Fr::rand(&mut rng); + poly1 += (scalar, &poly2); + assert_eq!(poly1.evaluate(&point).unwrap(), v1 + scalar * v2) + } + // test additive identity + { + assert_eq!(&poly1 + &SparseMultilinearExtension::zero(), poly1); + assert_eq!(&SparseMultilinearExtension::zero() + &poly1, poly1); + { + let mut poly1_cloned = poly1.clone(); + poly1_cloned += &SparseMultilinearExtension::zero(); + assert_eq!(&poly1_cloned, &poly1); + let mut zero = SparseMultilinearExtension::zero(); + let scalar = Fr::rand(&mut rng); + zero += (scalar, &poly1); + assert_eq!(zero.evaluate(&point).unwrap(), scalar * v1); + } + } + } + } + + #[test] + fn relabel() { + let mut rng = test_rng(); + for _ in 0..20 { + let mut poly = SparseMultilinearExtension::rand(10, &mut rng); + let mut point: Vec<_> = (0..10).map(|_| Fr::rand(&mut rng)).collect(); + + let expected = poly.evaluate(&point).unwrap(); + + poly = poly.relabel(2, 2, 1); // should have no effect + assert_eq!(expected, poly.evaluate(&point).unwrap()); + + poly = poly.relabel(3, 4, 1); // should switch 3 and 4 + point.swap(3, 4); + assert_eq!(expected, poly.evaluate(&point).unwrap()); + + poly = poly.relabel(7, 5, 1); + point.swap(7, 5); + assert_eq!(expected, poly.evaluate(&point).unwrap()); + + poly = poly.relabel(2, 5, 3); + point.swap(2, 5); + point.swap(3, 6); + point.swap(4, 7); + assert_eq!(expected, poly.evaluate(&point).unwrap()); + + poly = poly.relabel(7, 0, 2); + point.swap(0, 7); + point.swap(1, 8); + assert_eq!(expected, poly.evaluate(&point).unwrap()); + } + } + + #[test] + fn serialize() { + let mut rng = test_rng(); + for _ in 0..20 { + let mut buf = Vec::new(); + let poly = SparseMultilinearExtension::::rand(10, &mut rng); + let point: Vec<_> = (0..10).map(|_| Fr::rand(&mut rng)).collect(); + let expected = poly.evaluate(&point); + + poly.serialize(&mut buf).unwrap(); + + let poly2: SparseMultilinearExtension = + SparseMultilinearExtension::deserialize(&buf[..]).unwrap(); + assert_eq!(poly2.evaluate(&point), expected); + } + } +} diff --git a/arkworks/algebra/poly/src/evaluations/univariate/mod.rs b/arkworks/algebra/poly/src/evaluations/univariate/mod.rs new file mode 100644 index 00000000..ffb06cba --- /dev/null +++ b/arkworks/algebra/poly/src/evaluations/univariate/mod.rs @@ -0,0 +1,157 @@ +//! A univariate polynomial represented in evaluations form. + +use crate::univariate::DensePolynomial; +use crate::{EvaluationDomain, GeneralEvaluationDomain, UVPolynomial}; +use ark_ff::{batch_inversion, FftField}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + io::{Read, Write}, + ops::{Add, AddAssign, Div, DivAssign, Index, Mul, MulAssign, Sub, SubAssign}, + vec::Vec, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Stores a UV polynomial in evaluation form. +#[derive(Clone, PartialEq, Eq, Hash, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct Evaluations = GeneralEvaluationDomain> { + /// The evaluations of a polynomial over the domain `D` + pub evals: Vec, + #[doc(hidden)] + domain: D, +} + +impl> Evaluations { + /// Construct `Self` from evaluations and a domain. + pub fn from_vec_and_domain(evals: Vec, domain: D) -> Self { + Self { evals, domain } + } + + /// Interpolate a polynomial from a list of evaluations + pub fn interpolate_by_ref(&self) -> DensePolynomial { + DensePolynomial::from_coefficients_vec(self.domain.ifft(&self.evals)) + } + + /// Interpolate a polynomial from a list of evaluations + pub fn interpolate(self) -> DensePolynomial { + let Self { mut evals, domain } = self; + domain.ifft_in_place(&mut evals); + DensePolynomial::from_coefficients_vec(evals) + } + + /// Return the domain `self` is defined over + pub fn domain(&self) -> D { + self.domain + } +} + +impl> Index for Evaluations { + type Output = F; + + fn index(&self, index: usize) -> &F { + &self.evals[index] + } +} + +impl<'a, 'b, F: FftField, D: EvaluationDomain> Mul<&'a Evaluations> + for &'b Evaluations +{ + type Output = Evaluations; + + #[inline] + fn mul(self, other: &'a Evaluations) -> Evaluations { + let mut result = self.clone(); + result *= other; + result + } +} + +impl<'a, F: FftField, D: EvaluationDomain> MulAssign<&'a Evaluations> + for Evaluations +{ + #[inline] + fn mul_assign(&mut self, other: &'a Evaluations) { + assert_eq!(self.domain, other.domain, "domains are unequal"); + ark_std::cfg_iter_mut!(self.evals) + .zip(&other.evals) + .for_each(|(a, b)| *a *= b); + } +} + +impl<'a, 'b, F: FftField, D: EvaluationDomain> Add<&'a Evaluations> + for &'b Evaluations +{ + type Output = Evaluations; + + #[inline] + fn add(self, other: &'a Evaluations) -> Evaluations { + let mut result = self.clone(); + result += other; + result + } +} + +impl<'a, F: FftField, D: EvaluationDomain> AddAssign<&'a Evaluations> + for Evaluations +{ + #[inline] + fn add_assign(&mut self, other: &'a Evaluations) { + assert_eq!(self.domain, other.domain, "domains are unequal"); + ark_std::cfg_iter_mut!(self.evals) + .zip(&other.evals) + .for_each(|(a, b)| *a += b); + } +} + +impl<'a, 'b, F: FftField, D: EvaluationDomain> Sub<&'a Evaluations> + for &'b Evaluations +{ + type Output = Evaluations; + + #[inline] + fn sub(self, other: &'a Evaluations) -> Evaluations { + let mut result = self.clone(); + result -= other; + result + } +} + +impl<'a, F: FftField, D: EvaluationDomain> SubAssign<&'a Evaluations> + for Evaluations +{ + #[inline] + fn sub_assign(&mut self, other: &'a Evaluations) { + assert_eq!(self.domain, other.domain, "domains are unequal"); + ark_std::cfg_iter_mut!(self.evals) + .zip(&other.evals) + .for_each(|(a, b)| *a -= b); + } +} + +impl<'a, 'b, F: FftField, D: EvaluationDomain> Div<&'a Evaluations> + for &'b Evaluations +{ + type Output = Evaluations; + + #[inline] + fn div(self, other: &'a Evaluations) -> Evaluations { + let mut result = self.clone(); + result /= other; + result + } +} + +impl<'a, F: FftField, D: EvaluationDomain> DivAssign<&'a Evaluations> + for Evaluations +{ + #[inline] + fn div_assign(&mut self, other: &'a Evaluations) { + assert_eq!(self.domain, other.domain, "domains are unequal"); + let mut other_copy = other.clone(); + batch_inversion(other_copy.evals.as_mut_slice()); + ark_std::cfg_iter_mut!(self.evals) + .zip(&other_copy.evals) + .for_each(|(a, b)| *a *= b); + } +} diff --git a/arkworks/algebra/poly/src/lib.rs b/arkworks/algebra/poly/src/lib.rs new file mode 100644 index 00000000..f37c4e1e --- /dev/null +++ b/arkworks/algebra/poly/src/lib.rs @@ -0,0 +1,33 @@ +//! This crate implements functions for manipulating polynomials over finite +//! fields, including FFTs. +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] +#![forbid(unsafe_code)] +#![allow( + clippy::many_single_char_names, + clippy::suspicious_op_assign_impl, + clippy::suspicious_arithmetic_impl +)] + +#[macro_use] +extern crate derivative; + +#[macro_use] +extern crate ark_std; + +pub mod domain; + +pub mod evaluations; +pub mod polynomial; + +pub use domain::{ + EvaluationDomain, GeneralEvaluationDomain, MixedRadixEvaluationDomain, Radix2EvaluationDomain, +}; +pub use evaluations::multivariate::multilinear::{ + DenseMultilinearExtension, MultilinearExtension, SparseMultilinearExtension, +}; +pub use evaluations::univariate::Evaluations; +pub use polynomial::{multivariate, univariate, MVPolynomial, Polynomial, UVPolynomial}; + +#[cfg(test)] +mod test; diff --git a/arkworks/algebra/poly/src/polynomial/mod.rs b/arkworks/algebra/poly/src/polynomial/mod.rs new file mode 100644 index 00000000..ccc29651 --- /dev/null +++ b/arkworks/algebra/poly/src/polynomial/mod.rs @@ -0,0 +1,80 @@ +//! Modules for working with univariate or multivariate polynomials. +use ark_ff::{Field, Zero}; +use ark_serialize::*; +use ark_std::rand::Rng; +use ark_std::{ + fmt::Debug, + hash::Hash, + ops::{Add, AddAssign, Neg, SubAssign}, + vec::Vec, +}; + +pub mod multivariate; +pub mod univariate; + +/// Describes the common interface for univariate and multivariate polynomials +pub trait Polynomial: + Sized + + Clone + + Debug + + Hash + + PartialEq + + Eq + + Add + + Neg + + Zero + + CanonicalSerialize + + CanonicalDeserialize + + for<'a> AddAssign<&'a Self> + + for<'a> AddAssign<(F, &'a Self)> + + for<'a> SubAssign<&'a Self> +{ + /// The type of evaluation points for this polynomial. + type Point: Sized + Clone + Ord + Debug + Sync + Hash; + + /// Returns the total degree of the polynomial + fn degree(&self) -> usize; + + /// Evaluates `self` at the given `point` in `Self::Point`. + fn evaluate(&self, point: &Self::Point) -> F; +} + +/// Describes the interface for univariate polynomials +pub trait UVPolynomial: Polynomial { + /// Constructs a new polynomial from a list of coefficients. + fn from_coefficients_slice(coeffs: &[F]) -> Self; + + /// Constructs a new polynomial from a list of coefficients. + fn from_coefficients_vec(coeffs: Vec) -> Self; + + /// Returns the coefficients of `self` + fn coeffs(&self) -> &[F]; + + /// Returns a univariate polynomial of degree `d` where each + /// coefficient is sampled uniformly at random. + fn rand(d: usize, rng: &mut R) -> Self; +} + +/// Describes the interface for multivariate polynomials +pub trait MVPolynomial: Polynomial { + /// The type of the terms of `self` + type Term: multivariate::Term; + + /// Constructs a new polynomial from a list of tuples of the form `(Self::Term, coeff)` + fn from_coefficients_slice(num_vars: usize, terms: &[(F, Self::Term)]) -> Self { + Self::from_coefficients_vec(num_vars, terms.to_vec()) + } + + /// Constructs a new polynomial from a list of tuples of the form `(Self::Term, coeff)` + fn from_coefficients_vec(num_vars: usize, terms: Vec<(F, Self::Term)>) -> Self; + + /// Returns the terms of a `self` as a list of tuples of the form `(Self::Term, coeff)` + fn terms(&self) -> &[(F, Self::Term)]; + + /// Returns the number of variables in `self` + fn num_vars(&self) -> usize; + + /// Outputs an `l`-variate polynomial which is the sum of `l` `d`-degree univariate + /// polynomials where each coefficient is sampled uniformly at random. + fn rand(d: usize, num_vars: usize, rng: &mut R) -> Self; +} diff --git a/arkworks/algebra/poly/src/polynomial/multivariate/mod.rs b/arkworks/algebra/poly/src/polynomial/multivariate/mod.rs new file mode 100644 index 00000000..a40e6946 --- /dev/null +++ b/arkworks/algebra/poly/src/polynomial/multivariate/mod.rs @@ -0,0 +1,170 @@ +//! Work with sparse multivariate polynomials. +use ark_ff::Field; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + cmp::Ordering, + fmt::{Debug, Error, Formatter}, + hash::Hash, + io::{Read, Write}, + ops::Deref, + vec::Vec, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +mod sparse; +pub use sparse::SparsePolynomial; + +/// Describes the interface for a term (monomial) of a multivariate polynomial. +pub trait Term: + Clone + + PartialOrd + + Ord + + PartialEq + + Eq + + Hash + + Default + + Debug + + Deref + + Send + + Sync + + CanonicalSerialize + + CanonicalDeserialize +{ + /// Create a new `Term` from a list of tuples of the form `(variable, power)` + fn new(term: Vec<(usize, usize)>) -> Self; + + /// Returns the total degree of `self`. This is the sum of all variable powers in `self` + fn degree(&self) -> usize; + + /// Returns a list of variables in `self` + fn vars(&self) -> Vec; + + /// Returns a list of the powers of each variable in `self` + fn powers(&self) -> Vec; + + /// Returns whether `self` is a constant + fn is_constant(&self) -> bool; + + /// Evaluates `self` at the point `p`. + fn evaluate(&self, p: &[F]) -> F; +} + +/// Stores a term (monomial) in a multivariate polynomial. +/// Each element is of the form `(variable, power)`. +#[derive(Clone, PartialEq, Eq, Hash, Default, CanonicalSerialize, CanonicalDeserialize)] +pub struct SparseTerm(Vec<(usize, usize)>); + +impl SparseTerm { + /// Sums the powers of any duplicated variables. Assumes `term` is sorted. + fn combine(term: &[(usize, usize)]) -> Vec<(usize, usize)> { + let mut term_dedup: Vec<(usize, usize)> = Vec::new(); + for (var, pow) in term { + match term_dedup.last_mut() { + Some(prev) => { + if prev.0 == *var { + prev.1 += pow; + continue; + } + } + _ => {} + }; + term_dedup.push((*var, *pow)); + } + term_dedup + } +} + +impl Term for SparseTerm { + /// Create a new `Term` from a list of tuples of the form `(variable, power)` + fn new(mut term: Vec<(usize, usize)>) -> Self { + // Remove any terms with power 0 + term.retain(|(_, pow)| *pow != 0); + // If there are more than one variables, make sure they are + // in order and combine any duplicates + if term.len() > 1 { + term.sort_by(|(v1, _), (v2, _)| v1.cmp(v2)); + term = Self::combine(&term); + } + Self(term) + } + + /// Returns the sum of all variable powers in `self` + fn degree(&self) -> usize { + self.iter().fold(0, |sum, acc| sum + acc.1) + } + + /// Returns a list of variables in `self` + fn vars(&self) -> Vec { + self.iter().map(|(v, _)| *v).collect() + } + + /// Returns a list of variable powers in `self` + fn powers(&self) -> Vec { + self.iter().map(|(_, p)| *p).collect() + } + + /// Returns whether `self` is a constant + fn is_constant(&self) -> bool { + self.len() == 0 + } + + /// Evaluates `self` at the given `point` in the field. + fn evaluate(&self, point: &[F]) -> F { + cfg_into_iter!(self) + .map(|(var, power)| point[*var].pow(&[*power as u64])) + .product() + } +} + +impl Debug for SparseTerm { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + for variable in self.iter() { + if variable.1 == 1 { + write!(f, " * x_{}", variable.0)?; + } else { + write!(f, " * x_{}^{}", variable.0, variable.1)?; + } + } + Ok(()) + } +} + +impl Deref for SparseTerm { + type Target = [(usize, usize)]; + + fn deref(&self) -> &[(usize, usize)] { + &self.0 + } +} + +impl PartialOrd for SparseTerm { + /// Sort by total degree. If total degree is equal then ordering + /// is given by exponent weight in lower-numbered variables + /// ie. `x_1 > x_2`, `x_1^2 > x_1 * x_2`, etc. + fn partial_cmp(&self, other: &Self) -> Option { + if self.degree() != other.degree() { + Some(self.degree().cmp(&other.degree())) + } else { + // Iterate through all variables and return the corresponding ordering + // if they differ in variable numbering or power + for (cur, other) in self.iter().zip(other.iter()) { + if other.0 == cur.0 { + if cur.1 != other.1 { + return Some((cur.1).cmp(&other.1)); + } + } else { + return Some((other.0).cmp(&cur.0)); + } + } + Some(Ordering::Equal) + } + } +} + +impl Ord for SparseTerm { + fn cmp(&self, other: &Self) -> Ordering { + self.partial_cmp(other).unwrap() + } +} diff --git a/arkworks/algebra/poly/src/polynomial/multivariate/sparse.rs b/arkworks/algebra/poly/src/polynomial/multivariate/sparse.rs new file mode 100644 index 00000000..a5f48695 --- /dev/null +++ b/arkworks/algebra/poly/src/polynomial/multivariate/sparse.rs @@ -0,0 +1,419 @@ +//! A sparse multivariate polynomial represented in coefficient form. +use crate::{ + multivariate::{SparseTerm, Term}, + MVPolynomial, Polynomial, +}; +use ark_ff::{Field, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::rand::Rng; +use ark_std::{ + cmp::Ordering, + fmt, + io::{Read, Write}, + ops::{Add, AddAssign, Neg, Sub, SubAssign}, + vec::Vec, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Stores a sparse multivariate polynomial in coefficient form. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Clone, PartialEq, Eq, Hash, Default)] +pub struct SparsePolynomial { + /// The number of variables the polynomial supports + #[derivative(PartialEq = "ignore")] + pub num_vars: usize, + /// List of each term along with its coefficient + pub terms: Vec<(F, T)>, +} + +impl SparsePolynomial { + fn remove_zeros(&mut self) { + self.terms.retain(|(c, _)| !c.is_zero()); + } +} + +impl Polynomial for SparsePolynomial { + type Point = Vec; + + /// Returns the total degree of the polynomial + fn degree(&self) -> usize { + self.terms + .iter() + .map(|(_, term)| term.degree()) + .max() + .unwrap_or(0) + } + + /// Evaluates `self` at the given `point` in `Self::Point`. + fn evaluate(&self, point: &Vec) -> F { + assert!(point.len() >= self.num_vars, "Invalid evaluation domain"); + if self.is_zero() { + return F::zero(); + } + cfg_into_iter!(&self.terms) + .map(|(coeff, term)| *coeff * term.evaluate(point)) + .sum() + } +} + +impl MVPolynomial for SparsePolynomial { + /// Returns the number of variables in `self` + fn num_vars(&self) -> usize { + self.num_vars + } + + /// Outputs an `l`-variate polynomial which is the sum of `l` `d`-degree + /// univariate polynomials where each coefficient is sampled uniformly at random. + fn rand(d: usize, l: usize, rng: &mut R) -> Self { + let mut random_terms = Vec::new(); + random_terms.push((F::rand(rng), SparseTerm::new(vec![]))); + for var in 0..l { + for deg in 1..=d { + random_terms.push((F::rand(rng), SparseTerm::new(vec![(var, deg)]))); + } + } + Self::from_coefficients_vec(l, random_terms) + } + + type Term = SparseTerm; + + /// Constructs a new polynomial from a list of tuples of the form `(coeff, Self::Term)` + fn from_coefficients_vec(num_vars: usize, mut terms: Vec<(F, SparseTerm)>) -> Self { + // Ensure that terms are in ascending order. + terms.sort_by(|(_, t1), (_, t2)| t1.cmp(t2)); + // If any terms are duplicated, add them together + let mut terms_dedup: Vec<(F, SparseTerm)> = Vec::new(); + for term in terms { + if let Some(prev) = terms_dedup.last_mut() { + if prev.1 == term.1 { + *prev = (prev.0 + term.0, prev.1.clone()); + continue; + } + }; + // Assert correct number of indeterminates + assert!( + term.1.iter().all(|(var, _)| *var < num_vars), + "Invalid number of indeterminates" + ); + terms_dedup.push(term); + } + let mut result = Self { + num_vars, + terms: terms_dedup, + }; + // Remove any terms with zero coefficients + result.remove_zeros(); + result + } + + /// Returns the terms of a `self` as a list of tuples of the form `(coeff, Self::Term)` + fn terms(&self) -> &[(F, Self::Term)] { + self.terms.as_slice() + } +} + +impl Add for SparsePolynomial { + type Output = SparsePolynomial; + + fn add(self, other: SparsePolynomial) -> Self { + &self + &other + } +} + +impl<'a, 'b, F: Field, T: Term> Add<&'a SparsePolynomial> for &'b SparsePolynomial { + type Output = SparsePolynomial; + + fn add(self, other: &'a SparsePolynomial) -> SparsePolynomial { + let mut result = Vec::new(); + let mut cur_iter = self.terms.iter().peekable(); + let mut other_iter = other.terms.iter().peekable(); + // Since both polynomials are sorted, iterate over them in ascending order, + // combining any common terms + loop { + // Peek at iterators to decide which to take from + let which = match (cur_iter.peek(), other_iter.peek()) { + (Some(cur), Some(other)) => Some((cur.1).cmp(&other.1)), + (Some(_), None) => Some(Ordering::Less), + (None, Some(_)) => Some(Ordering::Greater), + (None, None) => None, + }; + // Push the smallest element to the `result` coefficient vec + let smallest = match which { + Some(Ordering::Less) => cur_iter.next().unwrap().clone(), + Some(Ordering::Equal) => { + let other = other_iter.next().unwrap(); + let cur = cur_iter.next().unwrap(); + (cur.0 + other.0, cur.1.clone()) + } + Some(Ordering::Greater) => other_iter.next().unwrap().clone(), + None => break, + }; + result.push(smallest); + } + // Remove any zero terms + result.retain(|(c, _)| !c.is_zero()); + SparsePolynomial { + num_vars: core::cmp::max(self.num_vars, other.num_vars), + terms: result, + } + } +} + +impl<'a, 'b, F: Field, T: Term> AddAssign<&'a SparsePolynomial> for SparsePolynomial { + fn add_assign(&mut self, other: &'a SparsePolynomial) { + *self = &*self + other; + } +} + +impl<'a, 'b, F: Field, T: Term> AddAssign<(F, &'a SparsePolynomial)> + for SparsePolynomial +{ + fn add_assign(&mut self, (f, other): (F, &'a SparsePolynomial)) { + let other = Self { + num_vars: other.num_vars, + terms: other + .terms + .iter() + .map(|(coeff, term)| (*coeff * &f, term.clone())) + .collect(), + }; + // Note the call to `Add` will remove also any duplicates + *self = &*self + &other; + } +} + +impl Neg for SparsePolynomial { + type Output = SparsePolynomial; + + #[inline] + fn neg(mut self) -> SparsePolynomial { + for coeff in &mut self.terms { + (*coeff).0 = -coeff.0; + } + self + } +} + +impl<'a, 'b, F: Field, T: Term> Sub<&'a SparsePolynomial> for &'b SparsePolynomial { + type Output = SparsePolynomial; + + #[inline] + fn sub(self, other: &'a SparsePolynomial) -> SparsePolynomial { + let neg_other = other.clone().neg(); + self + &neg_other + } +} + +impl<'a, 'b, F: Field, T: Term> SubAssign<&'a SparsePolynomial> for SparsePolynomial { + #[inline] + fn sub_assign(&mut self, other: &'a SparsePolynomial) { + *self = &*self - other; + } +} + +impl fmt::Debug for SparsePolynomial { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + for (coeff, term) in self.terms.iter().filter(|(c, _)| !c.is_zero()) { + if term.is_constant() { + write!(f, "\n{:?}", coeff)?; + } else { + write!(f, "\n{:?} {:?}", coeff, term)?; + } + } + Ok(()) + } +} + +impl Zero for SparsePolynomial { + /// Returns the zero polynomial. + fn zero() -> Self { + Self { + num_vars: 0, + terms: Vec::new(), + } + } + + /// Checks if the given polynomial is zero. + fn is_zero(&self) -> bool { + self.terms.is_empty() || self.terms.iter().all(|(c, _)| c.is_zero()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_ff::{Field, UniformRand, Zero}; + use ark_std::test_rng; + use ark_test_curves::bls12_381::Fr; + + // TODO: Make tests generic over term type + + /// Generate random `l`-variate polynomial of maximum individual degree `d` + fn rand_poly(l: usize, d: usize, rng: &mut R) -> SparsePolynomial { + let mut random_terms = Vec::new(); + let num_terms = rng.gen_range(1..1000); + // For each term, randomly select up to `l` variables with degree + // in [1,d] and random coefficient + random_terms.push((Fr::rand(rng), SparseTerm::new(vec![]))); + for _ in 1..num_terms { + let term = (0..l) + .map(|i| { + if rng.gen_bool(0.5) { + Some((i, rng.gen_range(1..(d + 1)))) + } else { + None + } + }) + .filter(|t| t.is_some()) + .map(|t| t.unwrap()) + .collect(); + let coeff = Fr::rand(rng); + random_terms.push((coeff, SparseTerm::new(term))); + } + SparsePolynomial::from_coefficients_slice(l, &random_terms) + } + + /// Perform a naive n^2 multiplication of `self` by `other`. + fn naive_mul( + cur: &SparsePolynomial, + other: &SparsePolynomial, + ) -> SparsePolynomial { + if cur.is_zero() || other.is_zero() { + SparsePolynomial::zero() + } else { + let mut result_terms = Vec::new(); + for (cur_coeff, cur_term) in cur.terms.iter() { + for (other_coeff, other_term) in other.terms.iter() { + let mut term = cur_term.0.clone(); + term.extend(other_term.0.clone()); + result_terms.push((*cur_coeff * *other_coeff, SparseTerm::new(term))); + } + } + SparsePolynomial::from_coefficients_slice(cur.num_vars, result_terms.as_slice()) + } + } + + #[test] + fn add_polynomials() { + let rng = &mut test_rng(); + let max_degree = 10; + for a_var_count in 1..20 { + for b_var_count in 1..20 { + let p1 = rand_poly(a_var_count, max_degree, rng); + let p2 = rand_poly(b_var_count, max_degree, rng); + let res1 = &p1 + &p2; + let res2 = &p2 + &p1; + assert_eq!(res1, res2); + } + } + } + + #[test] + fn sub_polynomials() { + let rng = &mut test_rng(); + let max_degree = 10; + for a_var_count in 1..20 { + for b_var_count in 1..20 { + let p1 = rand_poly(a_var_count, max_degree, rng); + let p2 = rand_poly(b_var_count, max_degree, rng); + let res1 = &p1 - &p2; + let res2 = &p2 - &p1; + assert_eq!(&res1 + &p2, p1); + assert_eq!(res1, -res2); + } + } + } + + #[test] + fn evaluate_polynomials() { + let rng = &mut test_rng(); + let max_degree = 10; + for var_count in 1..20 { + let p = rand_poly(var_count, max_degree, rng); + let mut point = Vec::with_capacity(var_count); + for _ in 0..var_count { + point.push(Fr::rand(rng)); + } + let mut total = Fr::zero(); + for (coeff, term) in p.terms.iter() { + let mut summand = *coeff; + for var in term.iter() { + let eval = point.get(var.0).unwrap(); + summand *= eval.pow(&[var.1 as u64]); + } + total += summand; + } + assert_eq!(p.evaluate(&point), total); + } + } + + #[test] + fn add_and_evaluate_polynomials() { + let rng = &mut test_rng(); + let max_degree = 10; + for a_var_count in 1..20 { + for b_var_count in 1..20 { + let p1 = rand_poly(a_var_count, max_degree, rng); + let p2 = rand_poly(b_var_count, max_degree, rng); + let mut point = Vec::new(); + for _ in 0..core::cmp::max(a_var_count, b_var_count) { + point.push(Fr::rand(rng)); + } + // Evaluate both polynomials at a given point + let eval1 = p1.evaluate(&point); + let eval2 = p2.evaluate(&point); + // Add polynomials + let sum = &p1 + &p2; + // Evaluate result at same point + let eval3 = sum.evaluate(&point); + assert_eq!(eval1 + eval2, eval3); + } + } + } + + #[test] + /// This is just to make sure naive_mul works as expected + fn mul_polynomials_fixed() { + let a = SparsePolynomial::from_coefficients_slice( + 4, + &[ + ("2".parse().unwrap(), SparseTerm(vec![])), + ("4".parse().unwrap(), SparseTerm(vec![(0, 1), (1, 2)])), + ("8".parse().unwrap(), SparseTerm(vec![(0, 1), (0, 1)])), + ("1".parse().unwrap(), SparseTerm(vec![(3, 0)])), + ], + ); + let b = SparsePolynomial::from_coefficients_slice( + 4, + &[ + ("1".parse().unwrap(), SparseTerm(vec![(0, 1), (1, 2)])), + ("2".parse().unwrap(), SparseTerm(vec![(2, 1)])), + ("1".parse().unwrap(), SparseTerm(vec![(3, 1)])), + ], + ); + let result = naive_mul(&a, &b); + let expected = SparsePolynomial::from_coefficients_slice( + 4, + &[ + ("3".parse().unwrap(), SparseTerm(vec![(0, 1), (1, 2)])), + ("6".parse().unwrap(), SparseTerm(vec![(2, 1)])), + ("3".parse().unwrap(), SparseTerm(vec![(3, 1)])), + ("4".parse().unwrap(), SparseTerm(vec![(0, 2), (1, 4)])), + ( + "8".parse().unwrap(), + SparseTerm(vec![(0, 1), (1, 2), (2, 1)]), + ), + ( + "4".parse().unwrap(), + SparseTerm(vec![(0, 1), (1, 2), (3, 1)]), + ), + ("8".parse().unwrap(), SparseTerm(vec![(0, 3), (1, 2)])), + ("16".parse().unwrap(), SparseTerm(vec![(0, 2), (2, 1)])), + ("8".parse().unwrap(), SparseTerm(vec![(0, 2), (3, 1)])), + ], + ); + assert_eq!(expected, result); + } +} diff --git a/arkworks/algebra/poly/src/polynomial/univariate/dense.rs b/arkworks/algebra/poly/src/polynomial/univariate/dense.rs new file mode 100644 index 00000000..f4967174 --- /dev/null +++ b/arkworks/algebra/poly/src/polynomial/univariate/dense.rs @@ -0,0 +1,845 @@ +//! A dense univariate polynomial represented in coefficient form. +use crate::univariate::DenseOrSparsePolynomial; +use crate::{univariate::SparsePolynomial, Polynomial, UVPolynomial}; +use crate::{EvaluationDomain, Evaluations, GeneralEvaluationDomain}; +use ark_ff::{FftField, Field, Zero}; +use ark_serialize::*; +use ark_std::rand::Rng; +use ark_std::{ + fmt, + ops::{Add, AddAssign, Deref, DerefMut, Div, Mul, Neg, Sub, SubAssign}, + vec::Vec, +}; + +#[cfg(feature = "parallel")] +use ark_std::cmp::max; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Stores a polynomial in coefficient form. +#[derive(Clone, PartialEq, Eq, Hash, Default, CanonicalSerialize, CanonicalDeserialize)] +pub struct DensePolynomial { + /// The coefficient of `x^i` is stored at location `i` in `self.coeffs`. + pub coeffs: Vec, +} + +impl Polynomial for DensePolynomial { + type Point = F; + + /// Returns the total degree of the polynomial + fn degree(&self) -> usize { + if self.is_zero() { + 0 + } else { + assert!(self.coeffs.last().map_or(false, |coeff| !coeff.is_zero())); + self.coeffs.len() - 1 + } + } + + /// Evaluates `self` at the given `point` in `Self::Point`. + fn evaluate(&self, point: &F) -> F { + if self.is_zero() { + return F::zero(); + } else if point.is_zero() { + return self.coeffs[0]; + } + self.internal_evaluate(point) + } +} + +#[cfg(feature = "parallel")] +// Set some minimum number of field elements to be worked on per thread +// to avoid per-thread costs dominating parallel execution time. +const MIN_ELEMENTS_PER_THREAD: usize = 16; + +impl DensePolynomial { + #[inline] + // Horner's method for polynomial evaluation + fn horner_evaluate(poly_coeffs: &[F], point: &F) -> F { + poly_coeffs + .iter() + .rfold(F::zero(), move |result, coeff| result * point + coeff) + } + + #[cfg(not(feature = "parallel"))] + fn internal_evaluate(&self, point: &F) -> F { + Self::horner_evaluate(&self.coeffs, point) + } + + #[cfg(feature = "parallel")] + fn internal_evaluate(&self, point: &F) -> F { + // Horners method - parallel method + // compute the number of threads we will be using. + let num_cpus_available = rayon::current_num_threads(); + let num_coeffs = self.coeffs.len(); + let num_elem_per_thread = max(num_coeffs / num_cpus_available, MIN_ELEMENTS_PER_THREAD); + + // run Horners method on each thread as follows: + // 1) Split up the coefficients across each thread evenly. + // 2) Do polynomial evaluation via horner's method for the thread's coefficeints + // 3) Scale the result point^{thread coefficient start index} + // Then obtain the final polynomial evaluation by summing each threads result. + let result = self + .coeffs + .par_chunks(num_elem_per_thread) + .enumerate() + .map(|(i, chunk)| { + let mut thread_result = Self::horner_evaluate(&chunk, point); + thread_result *= point.pow(&[(i * num_elem_per_thread) as u64]); + thread_result + }) + .sum(); + result + } +} + +impl UVPolynomial for DensePolynomial { + /// Constructs a new polynomial from a list of coefficients. + fn from_coefficients_slice(coeffs: &[F]) -> Self { + Self::from_coefficients_vec(coeffs.to_vec()) + } + + /// Constructs a new polynomial from a list of coefficients. + fn from_coefficients_vec(coeffs: Vec) -> Self { + let mut result = Self { coeffs }; + // While there are zeros at the end of the coefficient vector, pop them off. + result.truncate_leading_zeros(); + // Check that either the coefficients vec is empty or that the last coeff is + // non-zero. + assert!(result.coeffs.last().map_or(true, |coeff| !coeff.is_zero())); + result + } + + /// Returns the coefficients of `self` + fn coeffs(&self) -> &[F] { + &self.coeffs + } + + /// Outputs a univariate polynomial of degree `d` where + /// each coefficient is sampled uniformly at random. + fn rand(d: usize, rng: &mut R) -> Self { + let mut random_coeffs = Vec::new(); + for _ in 0..=d { + random_coeffs.push(F::rand(rng)); + } + Self::from_coefficients_vec(random_coeffs) + } +} + +impl DensePolynomial { + /// Multiply `self` by the vanishing polynomial for the domain `domain`. + /// Returns the result of the multiplication. + pub fn mul_by_vanishing_poly>(&self, domain: D) -> DensePolynomial { + let mut shifted = vec![F::zero(); domain.size()]; + shifted.extend_from_slice(&self.coeffs); + cfg_iter_mut!(shifted) + .zip(&self.coeffs) + .for_each(|(s, c)| *s -= c); + DensePolynomial::from_coefficients_vec(shifted) + } + + /// Divide `self` by the vanishing polynomial for the domain `domain`. + /// Returns the quotient and remainder of the division. + pub fn divide_by_vanishing_poly>( + &self, + domain: D, + ) -> Option<(DensePolynomial, DensePolynomial)> { + let self_poly = DenseOrSparsePolynomial::from(self); + let vanishing_poly = DenseOrSparsePolynomial::from(domain.vanishing_polynomial()); + self_poly.divide_with_q_and_r(&vanishing_poly) + } +} + +impl DensePolynomial { + fn truncate_leading_zeros(&mut self) { + while self.coeffs.last().map_or(false, |c| c.is_zero()) { + self.coeffs.pop(); + } + } + + /// Perform a naive n^2 multiplication of `self` by `other`. + pub fn naive_mul(&self, other: &Self) -> Self { + if self.is_zero() || other.is_zero() { + DensePolynomial::zero() + } else { + let mut result = vec![F::zero(); self.degree() + other.degree() + 1]; + for (i, self_coeff) in self.coeffs.iter().enumerate() { + for (j, other_coeff) in other.coeffs.iter().enumerate() { + result[i + j] += &(*self_coeff * other_coeff); + } + } + DensePolynomial::from_coefficients_vec(result) + } + } +} + +impl DensePolynomial { + /// Evaluate `self` over `domain`. + pub fn evaluate_over_domain_by_ref>( + &self, + domain: D, + ) -> Evaluations { + let poly: DenseOrSparsePolynomial<'_, F> = self.into(); + DenseOrSparsePolynomial::::evaluate_over_domain(poly, domain) + } + + /// Evaluate `self` over `domain`. + pub fn evaluate_over_domain>(self, domain: D) -> Evaluations { + let poly: DenseOrSparsePolynomial<'_, F> = self.into(); + DenseOrSparsePolynomial::::evaluate_over_domain(poly, domain) + } +} + +impl fmt::Debug for DensePolynomial { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + for (i, coeff) in self.coeffs.iter().enumerate().filter(|(_, c)| !c.is_zero()) { + if i == 0 { + write!(f, "\n{:?}", coeff)?; + } else if i == 1 { + write!(f, " + \n{:?} * x", coeff)?; + } else { + write!(f, " + \n{:?} * x^{}", coeff, i)?; + } + } + Ok(()) + } +} + +impl Deref for DensePolynomial { + type Target = [F]; + + fn deref(&self) -> &[F] { + &self.coeffs + } +} + +impl DerefMut for DensePolynomial { + fn deref_mut(&mut self) -> &mut [F] { + &mut self.coeffs + } +} + +impl Add for DensePolynomial { + type Output = DensePolynomial; + + fn add(self, other: DensePolynomial) -> Self { + &self + &other + } +} + +impl<'a, 'b, F: Field> Add<&'a DensePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; + + fn add(self, other: &'a DensePolynomial) -> DensePolynomial { + let mut result = if self.is_zero() { + other.clone() + } else if other.is_zero() { + self.clone() + } else if self.degree() >= other.degree() { + let mut result = self.clone(); + result + .coeffs + .iter_mut() + .zip(&other.coeffs) + .for_each(|(a, b)| { + *a += b; + }); + result + } else { + let mut result = other.clone(); + result + .coeffs + .iter_mut() + .zip(&self.coeffs) + .for_each(|(a, b)| { + *a += b; + }); + result + }; + result.truncate_leading_zeros(); + result + } +} + +impl<'a, 'b, F: Field> Add<&'a SparsePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; + + #[inline] + fn add(self, other: &'a SparsePolynomial) -> DensePolynomial { + let result = if self.is_zero() { + other.clone().into() + } else if other.is_zero() { + self.clone() + } else { + let mut result = self.clone(); + // If `other` has higher degree than `self`, create a dense vector + // storing the upper coefficients of the addition + let mut upper_coeffs = match other.degree() > result.degree() { + true => vec![F::zero(); other.degree() - result.degree()], + false => Vec::new(), + }; + for (pow, coeff) in other.iter() { + if *pow <= result.degree() { + result.coeffs[*pow] += coeff; + } else { + upper_coeffs[*pow - result.degree() - 1] = *coeff; + } + } + result.coeffs.extend(upper_coeffs); + result + }; + result + } +} + +impl<'a, 'b, F: Field> AddAssign<&'a DensePolynomial> for DensePolynomial { + fn add_assign(&mut self, other: &'a DensePolynomial) { + if self.is_zero() { + self.coeffs.truncate(0); + self.coeffs.extend_from_slice(&other.coeffs); + } else if other.is_zero() { + } else if self.degree() >= other.degree() { + self.coeffs + .iter_mut() + .zip(&other.coeffs) + .for_each(|(a, b)| { + *a += b; + }); + } else { + // Add the necessary number of zero coefficients. + self.coeffs.resize(other.coeffs.len(), F::zero()); + self.coeffs + .iter_mut() + .zip(&other.coeffs) + .for_each(|(a, b)| { + *a += b; + }); + self.truncate_leading_zeros(); + } + } +} + +impl<'a, 'b, F: Field> AddAssign<(F, &'a DensePolynomial)> for DensePolynomial { + fn add_assign(&mut self, (f, other): (F, &'a DensePolynomial)) { + if self.is_zero() { + self.coeffs.truncate(0); + self.coeffs.extend_from_slice(&other.coeffs); + self.coeffs.iter_mut().for_each(|c| *c *= &f); + return; + } else if other.is_zero() { + return; + } else if self.degree() >= other.degree() { + } else { + // Add the necessary number of zero coefficients. + self.coeffs.resize(other.coeffs.len(), F::zero()); + } + self.coeffs + .iter_mut() + .zip(&other.coeffs) + .for_each(|(a, b)| { + *a += &(f * b); + }); + // If the leading coefficient ends up being zero, pop it off. + // This can happen if they were the same degree, or if a + // polynomial's coefficients were constructed with leading zeros. + self.truncate_leading_zeros(); + } +} + +impl<'a, F: Field> AddAssign<&'a SparsePolynomial> for DensePolynomial { + #[inline] + fn add_assign(&mut self, other: &'a SparsePolynomial) { + if self.is_zero() { + self.coeffs.truncate(0); + self.coeffs.resize(other.degree() + 1, F::zero()); + + for (i, coeff) in other.iter() { + self.coeffs[*i] = *coeff; + } + return; + } else if other.is_zero() { + return; + } else { + // If `other` has higher degree than `self`, create a dense vector + // storing the upper coefficients of the addition + let mut upper_coeffs = match other.degree() > self.degree() { + true => vec![F::zero(); other.degree() - self.degree()], + false => Vec::new(), + }; + for (pow, coeff) in other.iter() { + if *pow <= self.degree() { + self.coeffs[*pow] += coeff; + } else { + upper_coeffs[*pow - self.degree() - 1] = *coeff; + } + } + self.coeffs.extend(upper_coeffs); + } + } +} + +impl Neg for DensePolynomial { + type Output = DensePolynomial; + + #[inline] + fn neg(mut self) -> DensePolynomial { + self.coeffs.iter_mut().for_each(|coeff| { + *coeff = -*coeff; + }); + self + } +} + +impl<'a, 'b, F: Field> Sub<&'a DensePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; + + #[inline] + fn sub(self, other: &'a DensePolynomial) -> DensePolynomial { + let mut result = if self.is_zero() { + let mut result = other.clone(); + result.coeffs.iter_mut().for_each(|c| *c = -(*c)); + result + } else if other.is_zero() { + self.clone() + } else if self.degree() >= other.degree() { + let mut result = self.clone(); + result + .coeffs + .iter_mut() + .zip(&other.coeffs) + .for_each(|(a, b)| *a -= b); + result + } else { + let mut result = self.clone(); + result.coeffs.resize(other.coeffs.len(), F::zero()); + result + .coeffs + .iter_mut() + .zip(&other.coeffs) + .for_each(|(a, b)| *a -= b); + result + }; + result.truncate_leading_zeros(); + result + } +} + +impl<'a, 'b, F: Field> Sub<&'a SparsePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; + + #[inline] + fn sub(self, other: &'a SparsePolynomial) -> DensePolynomial { + let result = if self.is_zero() { + let result = other.clone(); + result.neg().into() + } else if other.is_zero() { + self.clone() + } else { + let mut result = self.clone(); + // If `other` has higher degree than `self`, create a dense vector + // storing the upper coefficients of the subtraction + let mut upper_coeffs = match other.degree() > result.degree() { + true => vec![F::zero(); other.degree() - result.degree()], + false => Vec::new(), + }; + for (pow, coeff) in other.iter() { + if *pow <= result.degree() { + result.coeffs[*pow] -= coeff; + } else { + upper_coeffs[*pow - result.degree() - 1] = -*coeff; + } + } + result.coeffs.extend(upper_coeffs); + result + }; + result + } +} + +impl<'a, 'b, F: Field> SubAssign<&'a DensePolynomial> for DensePolynomial { + #[inline] + fn sub_assign(&mut self, other: &'a DensePolynomial) { + if self.is_zero() { + self.coeffs.resize(other.coeffs.len(), F::zero()); + } else if other.is_zero() { + return; + } else if self.degree() >= other.degree() { + } else { + // Add the necessary number of zero coefficients. + self.coeffs.resize(other.coeffs.len(), F::zero()); + } + self.coeffs + .iter_mut() + .zip(&other.coeffs) + .for_each(|(a, b)| { + *a -= b; + }); + // If the leading coefficient ends up being zero, pop it off. + // This can happen if they were the same degree, or if other's + // coefficients were constructed with leading zeros. + self.truncate_leading_zeros(); + } +} + +impl<'a, F: Field> SubAssign<&'a SparsePolynomial> for DensePolynomial { + #[inline] + fn sub_assign(&mut self, other: &'a SparsePolynomial) { + if self.is_zero() { + self.coeffs.truncate(0); + self.coeffs.resize(other.degree() + 1, F::zero()); + + for (i, coeff) in other.iter() { + self.coeffs[*i] = (*coeff).neg(); + } + return; + } else if other.is_zero() { + return; + } else { + // If `other` has higher degree than `self`, create a dense vector + // storing the upper coefficients of the subtraction + let mut upper_coeffs = match other.degree() > self.degree() { + true => vec![F::zero(); other.degree() - self.degree()], + false => Vec::new(), + }; + for (pow, coeff) in other.iter() { + if *pow <= self.degree() { + self.coeffs[*pow] -= coeff; + } else { + upper_coeffs[*pow - self.degree() - 1] = -*coeff; + } + } + self.coeffs.extend(upper_coeffs); + } + } +} + +impl<'a, 'b, F: Field> Div<&'a DensePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; + + #[inline] + fn div(self, divisor: &'a DensePolynomial) -> DensePolynomial { + let a = DenseOrSparsePolynomial::from(self); + let b = DenseOrSparsePolynomial::from(divisor); + a.divide_with_q_and_r(&b).expect("division failed").0 + } +} + +impl<'a, 'b, F: Field> Mul for &'b DensePolynomial { + type Output = DensePolynomial; + + #[inline] + fn mul(self, elem: F) -> DensePolynomial { + if self.is_zero() || elem.is_zero() { + DensePolynomial::zero() + } else { + let mut result = self.clone(); + cfg_iter_mut!(result).for_each(|e| { + *e *= elem; + }); + result + } + } +} + +/// Performs O(nlogn) multiplication of polynomials if F is smooth. +impl<'a, 'b, F: FftField> Mul<&'a DensePolynomial> for &'b DensePolynomial { + type Output = DensePolynomial; + + #[inline] + fn mul(self, other: &'a DensePolynomial) -> DensePolynomial { + if self.is_zero() || other.is_zero() { + DensePolynomial::zero() + } else { + let domain = GeneralEvaluationDomain::new(self.coeffs.len() + other.coeffs.len()) + .expect("field is not smooth enough to construct domain"); + let mut self_evals = self.evaluate_over_domain_by_ref(domain); + let other_evals = other.evaluate_over_domain_by_ref(domain); + self_evals *= &other_evals; + self_evals.interpolate() + } + } +} + +impl Zero for DensePolynomial { + /// Returns the zero polynomial. + fn zero() -> Self { + Self { coeffs: Vec::new() } + } + + /// Checks if the given polynomial is zero. + fn is_zero(&self) -> bool { + self.coeffs.is_empty() || self.coeffs.iter().all(|coeff| coeff.is_zero()) + } +} + +#[cfg(test)] +mod tests { + use crate::polynomial::univariate::*; + use crate::{EvaluationDomain, GeneralEvaluationDomain}; + use ark_ff::{Field, One, UniformRand, Zero}; + use ark_std::{rand::Rng, test_rng}; + use ark_test_curves::bls12_381::Fr; + + fn rand_sparse_poly(degree: usize, rng: &mut R) -> SparsePolynomial { + // Initialize coeffs so that its guaranteed to have a x^{degree} term + let mut coeffs = vec![(degree, Fr::rand(rng))]; + for i in 0..degree { + if !rng.gen_bool(0.8) { + coeffs.push((i, Fr::rand(rng))); + } + } + SparsePolynomial::from_coefficients_vec(coeffs) + } + + #[test] + fn double_polynomials_random() { + let rng = &mut test_rng(); + for degree in 0..70 { + let p = DensePolynomial::::rand(degree, rng); + let p_double = &p + &p; + let p_quad = &p_double + &p_double; + assert_eq!(&(&(&p + &p) + &p) + &p, p_quad); + } + } + + #[test] + fn add_polynomials() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let p1 = DensePolynomial::::rand(a_degree, rng); + let p2 = DensePolynomial::::rand(b_degree, rng); + let res1 = &p1 + &p2; + let res2 = &p2 + &p1; + assert_eq!(res1, res2); + } + } + } + + #[test] + fn add_sparse_polynomials() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let p1 = DensePolynomial::::rand(a_degree, rng); + let p2 = rand_sparse_poly(b_degree, rng); + let res = &p1 + &p2; + assert_eq!(res, &p1 + &Into::>::into(p2)); + } + } + } + + #[test] + fn add_assign_sparse_polynomials() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let p1 = DensePolynomial::::rand(a_degree, rng); + let p2 = rand_sparse_poly(b_degree, rng); + + let mut res = p1.clone(); + res += &p2; + assert_eq!(res, &p1 + &Into::>::into(p2)); + } + } + } + + #[test] + fn add_polynomials_with_mul() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let mut p1 = DensePolynomial::rand(a_degree, rng); + let p2 = DensePolynomial::rand(b_degree, rng); + let f = Fr::rand(rng); + let f_p2 = DensePolynomial::from_coefficients_vec( + p2.coeffs.iter().map(|c| f * c).collect(), + ); + let res2 = &f_p2 + &p1; + p1 += (f, &p2); + let res1 = p1; + assert_eq!(res1, res2); + } + } + } + + #[test] + fn sub_polynomials() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let p1 = DensePolynomial::::rand(a_degree, rng); + let p2 = DensePolynomial::::rand(b_degree, rng); + let res1 = &p1 - &p2; + let res2 = &p2 - &p1; + assert_eq!(&res1 + &p2, p1); + assert_eq!(res1, -res2); + } + } + } + + #[test] + fn sub_sparse_polynomials() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let p1 = DensePolynomial::::rand(a_degree, rng); + let p2 = rand_sparse_poly(b_degree, rng); + let res = &p1 - &p2; + assert_eq!(res, &p1 - &Into::>::into(p2)); + } + } + } + + #[test] + fn sub_assign_sparse_polynomials() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let p1 = DensePolynomial::::rand(a_degree, rng); + let p2 = rand_sparse_poly(b_degree, rng); + + let mut res = p1.clone(); + res -= &p2; + assert_eq!(res, &p1 - &Into::>::into(p2)); + } + } + } + + #[test] + fn polynomial_additive_identity() { + // Test adding polynomials with its negative equals 0 + let mut rng = test_rng(); + for degree in 0..70 { + let poly = DensePolynomial::::rand(degree, &mut rng); + let neg = -poly.clone(); + let result = poly + neg; + assert!(result.is_zero()); + assert_eq!(result.degree(), 0); + + // Test with SubAssign trait + let poly = DensePolynomial::::rand(degree, &mut rng); + let mut result = poly.clone(); + result -= &poly; + assert!(result.is_zero()); + assert_eq!(result.degree(), 0); + } + } + + #[test] + fn divide_polynomials_fixed() { + let dividend = DensePolynomial::from_coefficients_slice(&[ + "4".parse().unwrap(), + "8".parse().unwrap(), + "5".parse().unwrap(), + "1".parse().unwrap(), + ]); + let divisor = DensePolynomial::from_coefficients_slice(&[Fr::one(), Fr::one()]); // Construct a monic linear polynomial. + let result = ÷nd / &divisor; + let expected_result = DensePolynomial::from_coefficients_slice(&[ + "4".parse().unwrap(), + "4".parse().unwrap(), + "1".parse().unwrap(), + ]); + assert_eq!(expected_result, result); + } + + #[test] + fn divide_polynomials_random() { + let rng = &mut test_rng(); + + for a_degree in 0..50 { + for b_degree in 0..50 { + let dividend = DensePolynomial::::rand(a_degree, rng); + let divisor = DensePolynomial::::rand(b_degree, rng); + if let Some((quotient, remainder)) = DenseOrSparsePolynomial::divide_with_q_and_r( + &(÷nd).into(), + &(&divisor).into(), + ) { + assert_eq!(dividend, &(&divisor * "ient) + &remainder) + } + } + } + } + + #[test] + fn evaluate_polynomials() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + let p = DensePolynomial::rand(a_degree, rng); + let point: Fr = Fr::rand(rng); + let mut total = Fr::zero(); + for (i, coeff) in p.coeffs.iter().enumerate() { + total += &(point.pow(&[i as u64]) * coeff); + } + assert_eq!(p.evaluate(&point), total); + } + } + + #[test] + fn mul_random_element() { + let rng = &mut test_rng(); + for degree in 0..70 { + let a = DensePolynomial::::rand(degree, rng); + let e = Fr::rand(rng); + assert_eq!( + &a * e, + a.naive_mul(&DensePolynomial::from_coefficients_slice(&[e])) + ) + } + } + + #[test] + fn mul_polynomials_random() { + let rng = &mut test_rng(); + for a_degree in 0..70 { + for b_degree in 0..70 { + let a = DensePolynomial::::rand(a_degree, rng); + let b = DensePolynomial::::rand(b_degree, rng); + assert_eq!(&a * &b, a.naive_mul(&b)) + } + } + } + + #[test] + fn mul_by_vanishing_poly() { + let rng = &mut test_rng(); + for size in 1..10 { + let domain = GeneralEvaluationDomain::new(1 << size).unwrap(); + for degree in 0..70 { + let p = DensePolynomial::::rand(degree, rng); + let ans1 = p.mul_by_vanishing_poly(domain); + let ans2 = &p * &domain.vanishing_polynomial().into(); + assert_eq!(ans1, ans2); + } + } + } + + #[test] + fn test_leading_zero() { + let n = 10; + let rand_poly = DensePolynomial::rand(n, &mut test_rng()); + let coefficients = rand_poly.coeffs.clone(); + let leading_coefficient: Fr = coefficients[n]; + + let negative_leading_coefficient = -leading_coefficient; + let inverse_leading_coefficient = leading_coefficient.inverse().unwrap(); + + let mut inverse_coefficients = coefficients.clone(); + inverse_coefficients[n] = inverse_leading_coefficient; + + let mut negative_coefficients = coefficients; + negative_coefficients[n] = negative_leading_coefficient; + + let negative_poly = DensePolynomial::from_coefficients_vec(negative_coefficients); + let inverse_poly = DensePolynomial::from_coefficients_vec(inverse_coefficients); + + let x = &inverse_poly * &rand_poly; + assert_eq!(x.degree(), 2 * n); + assert!(!x.coeffs.last().unwrap().is_zero()); + + let y = &negative_poly + &rand_poly; + assert_eq!(y.degree(), n - 1); + assert!(!y.coeffs.last().unwrap().is_zero()); + } +} diff --git a/arkworks/algebra/poly/src/polynomial/univariate/mod.rs b/arkworks/algebra/poly/src/polynomial/univariate/mod.rs new file mode 100644 index 00000000..ae415d29 --- /dev/null +++ b/arkworks/algebra/poly/src/polynomial/univariate/mod.rs @@ -0,0 +1,164 @@ +//! Work with sparse and dense polynomials. + +use crate::{EvaluationDomain, Evaluations, Polynomial, UVPolynomial}; +use ark_ff::{FftField, Field, Zero}; +use ark_std::{borrow::Cow, convert::TryInto, vec::Vec}; +use DenseOrSparsePolynomial::*; + +mod dense; +mod sparse; + +pub use dense::DensePolynomial; +pub use sparse::SparsePolynomial; + +/// Represents either a sparse polynomial or a dense one. +#[derive(Clone)] +pub enum DenseOrSparsePolynomial<'a, F: Field> { + /// Represents the case where `self` is a sparse polynomial + SPolynomial(Cow<'a, SparsePolynomial>), + /// Represents the case where `self` is a dense polynomial + DPolynomial(Cow<'a, DensePolynomial>), +} + +impl<'a, F: 'a + Field> From> for DenseOrSparsePolynomial<'a, F> { + fn from(other: DensePolynomial) -> Self { + DPolynomial(Cow::Owned(other)) + } +} + +impl<'a, F: 'a + Field> From<&'a DensePolynomial> for DenseOrSparsePolynomial<'a, F> { + fn from(other: &'a DensePolynomial) -> Self { + DPolynomial(Cow::Borrowed(other)) + } +} + +impl<'a, F: 'a + Field> From> for DenseOrSparsePolynomial<'a, F> { + fn from(other: SparsePolynomial) -> Self { + SPolynomial(Cow::Owned(other)) + } +} + +impl<'a, F: Field> From<&'a SparsePolynomial> for DenseOrSparsePolynomial<'a, F> { + fn from(other: &'a SparsePolynomial) -> Self { + SPolynomial(Cow::Borrowed(other)) + } +} + +impl<'a, F: Field> Into> for DenseOrSparsePolynomial<'a, F> { + fn into(self) -> DensePolynomial { + match self { + DPolynomial(p) => p.into_owned(), + SPolynomial(p) => p.into_owned().into(), + } + } +} + +impl<'a, F: 'a + Field> TryInto> for DenseOrSparsePolynomial<'a, F> { + type Error = (); + + fn try_into(self) -> Result, ()> { + match self { + SPolynomial(p) => Ok(p.into_owned()), + _ => Err(()), + } + } +} + +impl<'a, F: Field> DenseOrSparsePolynomial<'a, F> { + /// Checks if the given polynomial is zero. + pub fn is_zero(&self) -> bool { + match self { + SPolynomial(s) => s.is_zero(), + DPolynomial(d) => d.is_zero(), + } + } + + /// Return the degree of `self. + pub fn degree(&self) -> usize { + match self { + SPolynomial(s) => s.degree(), + DPolynomial(d) => d.degree(), + } + } + + #[inline] + fn leading_coefficient(&self) -> Option<&F> { + match self { + SPolynomial(p) => p.last().map(|(_, c)| c), + DPolynomial(p) => p.last(), + } + } + + #[inline] + fn iter_with_index(&self) -> Vec<(usize, F)> { + match self { + SPolynomial(p) => p.to_vec(), + DPolynomial(p) => p.iter().cloned().enumerate().collect(), + } + } + + /// Divide self by another (sparse or dense) polynomial, and returns the + /// quotient and remainder. + pub fn divide_with_q_and_r( + &self, + divisor: &Self, + ) -> Option<(DensePolynomial, DensePolynomial)> { + if self.is_zero() { + Some((DensePolynomial::zero(), DensePolynomial::zero())) + } else if divisor.is_zero() { + panic!("Dividing by zero polynomial") + } else if self.degree() < divisor.degree() { + Some((DensePolynomial::zero(), self.clone().into())) + } else { + // Now we know that self.degree() >= divisor.degree(); + let mut quotient = vec![F::zero(); self.degree() - divisor.degree() + 1]; + let mut remainder: DensePolynomial = self.clone().into(); + // Can unwrap here because we know self is not zero. + let divisor_leading_inv = divisor.leading_coefficient().unwrap().inverse().unwrap(); + while !remainder.is_zero() && remainder.degree() >= divisor.degree() { + let cur_q_coeff = *remainder.coeffs.last().unwrap() * divisor_leading_inv; + let cur_q_degree = remainder.degree() - divisor.degree(); + quotient[cur_q_degree] = cur_q_coeff; + + for (i, div_coeff) in divisor.iter_with_index() { + remainder[cur_q_degree + i] -= &(cur_q_coeff * div_coeff); + } + while let Some(true) = remainder.coeffs.last().map(|c| c.is_zero()) { + remainder.coeffs.pop(); + } + } + Some((DensePolynomial::from_coefficients_vec(quotient), remainder)) + } + } +} +impl<'a, F: 'a + FftField> DenseOrSparsePolynomial<'a, F> { + /// Construct `Evaluations` by evaluating a polynomial over the domain + /// `domain`. + pub fn evaluate_over_domain>( + poly: impl Into, + domain: D, + ) -> Evaluations { + let poly = poly.into(); + poly.eval_over_domain_helper(domain) + } + + fn eval_over_domain_helper>(self, domain: D) -> Evaluations { + match self { + SPolynomial(Cow::Borrowed(s)) => { + let evals = domain.elements().map(|elem| s.evaluate(&elem)).collect(); + Evaluations::from_vec_and_domain(evals, domain) + } + SPolynomial(Cow::Owned(s)) => { + let evals = domain.elements().map(|elem| s.evaluate(&elem)).collect(); + Evaluations::from_vec_and_domain(evals, domain) + } + DPolynomial(Cow::Borrowed(d)) => { + Evaluations::from_vec_and_domain(domain.fft(&d.coeffs), domain) + } + DPolynomial(Cow::Owned(mut d)) => { + domain.fft_in_place(&mut d.coeffs); + Evaluations::from_vec_and_domain(d.coeffs, domain) + } + } + } +} diff --git a/arkworks/algebra/poly/src/polynomial/univariate/sparse.rs b/arkworks/algebra/poly/src/polynomial/univariate/sparse.rs new file mode 100644 index 00000000..3e68dd32 --- /dev/null +++ b/arkworks/algebra/poly/src/polynomial/univariate/sparse.rs @@ -0,0 +1,509 @@ +//! A sparse polynomial represented in coefficient form. +use crate::polynomial::Polynomial; +use crate::univariate::{DenseOrSparsePolynomial, DensePolynomial}; +use crate::{EvaluationDomain, Evaluations, UVPolynomial}; +use ark_ff::{FftField, Field, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + collections::BTreeMap, + fmt, + io::{Read, Write}, + ops::{Add, AddAssign, Deref, DerefMut, Mul, Neg, SubAssign}, + vec::Vec, +}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Stores a sparse polynomial in coefficient form. +#[derive(Clone, PartialEq, Eq, Hash, Default, CanonicalSerialize, CanonicalDeserialize)] +pub struct SparsePolynomial { + /// The coefficient a_i of `x^i` is stored as (i, a_i) in `self.coeffs`. + /// the entries in `self.coeffs` *must* be sorted in increasing order of + /// `i`. + coeffs: Vec<(usize, F)>, +} + +impl fmt::Debug for SparsePolynomial { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + for (i, coeff) in self.coeffs.iter().filter(|(_, c)| !c.is_zero()) { + if *i == 0 { + write!(f, "\n{:?}", coeff)?; + } else if *i == 1 { + write!(f, " + \n{:?} * x", coeff)?; + } else { + write!(f, " + \n{:?} * x^{}", coeff, i)?; + } + } + Ok(()) + } +} + +impl Deref for SparsePolynomial { + type Target = [(usize, F)]; + + fn deref(&self) -> &[(usize, F)] { + &self.coeffs + } +} + +impl DerefMut for SparsePolynomial { + fn deref_mut(&mut self) -> &mut [(usize, F)] { + &mut self.coeffs + } +} + +impl Polynomial for SparsePolynomial { + type Point = F; + + /// Returns the degree of the polynomial. + fn degree(&self) -> usize { + if self.is_zero() { + 0 + } else { + assert!(self.coeffs.last().map_or(false, |(_, c)| !c.is_zero())); + self.coeffs.last().unwrap().0 + } + } + + /// Evaluates `self` at the given `point` in the field. + fn evaluate(&self, point: &F) -> F { + if self.is_zero() { + return F::zero(); + } + + // We need floor(log2(deg)) + 1 powers, starting from the 0th power p^2^0 = p + let num_powers = 0usize.leading_zeros() - self.degree().leading_zeros(); + let mut powers_of_2 = Vec::with_capacity(num_powers as usize); + + let mut p = *point; + powers_of_2.push(p); + for _ in 1..num_powers { + p.square_in_place(); + powers_of_2.push(p); + } + // compute all coeff * point^{i} and then sum the results + let total = self + .coeffs + .iter() + .map(|(i, c)| { + debug_assert_eq!( + F::pow_with_table(&powers_of_2[..], &[*i as u64]).unwrap(), + point.pow(&[*i as u64]), + "pows not equal" + ); + *c * F::pow_with_table(&powers_of_2[..], &[*i as u64]).unwrap() + }) + .sum(); + total + } +} + +impl Add for SparsePolynomial { + type Output = SparsePolynomial; + + fn add(self, other: SparsePolynomial) -> Self { + &self + &other + } +} + +impl<'a, 'b, F: Field> Add<&'a SparsePolynomial> for &'b SparsePolynomial { + type Output = SparsePolynomial; + + fn add(self, other: &'a SparsePolynomial) -> SparsePolynomial { + if self.is_zero() { + return other.clone(); + } else if other.is_zero() { + return self.clone(); + } + // Single pass add algorithm (merging two sorted sets) + let mut result = SparsePolynomial::::zero(); + // our current index in each vector + let mut self_index = 0; + let mut other_index = 0; + loop { + // if we've reached the end of one vector, just append the other vector to our result. + if self_index == self.coeffs.len() && other_index == other.coeffs.len() { + return result; + } else if self_index == self.coeffs.len() { + result.append_coeffs(&other.coeffs[other_index..]); + return result; + } else if other_index == other.coeffs.len() { + result.append_coeffs(&self.coeffs[self_index..]); + return result; + } + + // Get the current degree / coeff for each + let (self_term_degree, self_term_coeff) = self.coeffs[self_index]; + let (other_term_degree, other_term_coeff) = other.coeffs[other_index]; + // add the lower degree term to our sorted set. + if self_term_degree < other_term_degree { + result.coeffs.push((self_term_degree, self_term_coeff)); + self_index += 1; + } else if self_term_degree == other_term_degree { + let term_sum = self_term_coeff + other_term_coeff; + if !term_sum.is_zero() { + result + .coeffs + .push((self_term_degree, self_term_coeff + other_term_coeff)); + } + self_index += 1; + other_index += 1; + } else { + result.coeffs.push((other_term_degree, other_term_coeff)); + other_index += 1; + } + } + } +} + +impl<'a, 'b, F: Field> AddAssign<&'a SparsePolynomial> for SparsePolynomial { + // TODO: Reduce number of clones + fn add_assign(&mut self, other: &'a SparsePolynomial) { + self.coeffs = (self.clone() + other.clone()).coeffs; + } +} + +impl<'a, 'b, F: Field> AddAssign<(F, &'a SparsePolynomial)> for SparsePolynomial { + // TODO: Reduce number of clones + fn add_assign(&mut self, (f, other): (F, &'a SparsePolynomial)) { + self.coeffs = (self.clone() + other.clone()).coeffs; + for i in 0..self.coeffs.len() { + self.coeffs[i].1 *= f; + } + } +} + +impl Neg for SparsePolynomial { + type Output = SparsePolynomial; + + #[inline] + fn neg(mut self) -> SparsePolynomial { + for (_, coeff) in &mut self.coeffs { + *coeff = -*coeff; + } + self + } +} + +impl<'a, 'b, F: Field> SubAssign<&'a SparsePolynomial> for SparsePolynomial { + // TODO: Reduce number of clones + #[inline] + fn sub_assign(&mut self, other: &'a SparsePolynomial) { + let self_copy = -self.clone(); + self.coeffs = (self_copy + other.clone()).coeffs; + } +} + +impl<'a, 'b, F: Field> Mul for &'b SparsePolynomial { + type Output = SparsePolynomial; + + #[inline] + fn mul(self, elem: F) -> SparsePolynomial { + if self.is_zero() || elem.is_zero() { + SparsePolynomial::zero() + } else { + let mut result = self.clone(); + cfg_iter_mut!(result).for_each(|e| { + (*e).1 *= elem; + }); + result + } + } +} + +impl Zero for SparsePolynomial { + /// Returns the zero polynomial. + fn zero() -> Self { + Self { coeffs: Vec::new() } + } + + /// Checks if the given polynomial is zero. + fn is_zero(&self) -> bool { + self.coeffs.is_empty() || self.coeffs.iter().all(|(_, c)| c.is_zero()) + } +} + +impl SparsePolynomial { + /// Constructs a new polynomial from a list of coefficients. + pub fn from_coefficients_slice(coeffs: &[(usize, F)]) -> Self { + Self::from_coefficients_vec(coeffs.to_vec()) + } + + /// Constructs a new polynomial from a list of coefficients. + pub fn from_coefficients_vec(mut coeffs: Vec<(usize, F)>) -> Self { + // While there are zeros at the end of the coefficient vector, pop them off. + while coeffs.last().map_or(false, |(_, c)| c.is_zero()) { + coeffs.pop(); + } + // Ensure that coeffs are in ascending order. + coeffs.sort_by(|(c1, _), (c2, _)| c1.cmp(c2)); + // Check that either the coefficients vec is empty or that the last coeff is + // non-zero. + assert!(coeffs.last().map_or(true, |(_, c)| !c.is_zero())); + + Self { coeffs } + } + + /// Perform a naive n^2 multiplication of `self` by `other`. + #[allow(clippy::or_fun_call)] + pub fn mul(&self, other: &Self) -> Self { + if self.is_zero() || other.is_zero() { + SparsePolynomial::zero() + } else { + let mut result = BTreeMap::new(); + for (i, self_coeff) in self.coeffs.iter() { + for (j, other_coeff) in other.coeffs.iter() { + let cur_coeff = result.entry(i + j).or_insert(F::zero()); + *cur_coeff += &(*self_coeff * other_coeff); + } + } + let result = result.into_iter().collect::>(); + SparsePolynomial::from_coefficients_vec(result) + } + } + + // append append_coeffs to self. + // Correctness relies on the lowest degree term in append_coeffs + // being higher than self.degree() + fn append_coeffs(&mut self, append_coeffs: &[(usize, F)]) { + assert!(append_coeffs.len() == 0 || self.degree() < append_coeffs[0].0); + for (i, elem) in append_coeffs.iter() { + self.coeffs.push((*i, *elem)); + } + } +} + +impl SparsePolynomial { + /// Evaluate `self` over `domain`. + pub fn evaluate_over_domain_by_ref>( + &self, + domain: D, + ) -> Evaluations { + let poly: DenseOrSparsePolynomial<'_, F> = self.into(); + DenseOrSparsePolynomial::::evaluate_over_domain(poly, domain) + } + + /// Evaluate `self` over `domain`. + pub fn evaluate_over_domain>(self, domain: D) -> Evaluations { + let poly: DenseOrSparsePolynomial<'_, F> = self.into(); + DenseOrSparsePolynomial::::evaluate_over_domain(poly, domain) + } +} + +impl Into> for SparsePolynomial { + fn into(self) -> DensePolynomial { + let mut other = vec![F::zero(); self.degree() + 1]; + for (i, coeff) in self.coeffs { + other[i] = coeff; + } + DensePolynomial::from_coefficients_vec(other) + } +} + +impl From> for SparsePolynomial { + fn from(dense_poly: DensePolynomial) -> SparsePolynomial { + let coeffs = dense_poly.coeffs(); + let mut sparse_coeffs = Vec::<(usize, F)>::new(); + for i in 0..coeffs.len() { + if !coeffs[i].is_zero() { + sparse_coeffs.push((i, coeffs[i])); + } + } + SparsePolynomial::from_coefficients_vec(sparse_coeffs) + } +} + +#[cfg(test)] +mod tests { + use crate::polynomial::Polynomial; + use crate::univariate::{DensePolynomial, SparsePolynomial}; + use crate::{EvaluationDomain, GeneralEvaluationDomain}; + use ark_ff::{UniformRand, Zero}; + use ark_std::cmp::max; + use ark_std::ops::Mul; + use ark_std::rand::Rng; + use ark_std::test_rng; + use ark_test_curves::bls12_381::Fr; + + // probability of rand sparse polynomial having a particular coefficient be 0 + const ZERO_COEFF_PROBABILITY: f64 = 0.8f64; + + fn rand_sparse_poly(degree: usize, rng: &mut R) -> SparsePolynomial { + // Initialize coeffs so that its guaranteed to have a x^{degree} term + let mut coeffs = vec![(degree, Fr::rand(rng))]; + for i in 0..degree { + if !rng.gen_bool(ZERO_COEFF_PROBABILITY) { + coeffs.push((i, Fr::rand(rng))); + } + } + SparsePolynomial::from_coefficients_vec(coeffs) + } + + #[test] + fn evaluate_at_point() { + let mut rng = test_rng(); + // Test evaluation at point by comparing against DensePolynomial + for degree in 0..60 { + let sparse_poly = rand_sparse_poly(degree, &mut rng); + let dense_poly: DensePolynomial = sparse_poly.clone().into(); + let pt = Fr::rand(&mut rng); + assert_eq!(sparse_poly.evaluate(&pt), dense_poly.evaluate(&pt)); + } + } + + #[test] + fn add_polynomial() { + // Test adding polynomials by comparing against dense polynomial + let mut rng = test_rng(); + for degree_a in 0..20 { + let sparse_poly_a = rand_sparse_poly(degree_a, &mut rng); + let dense_poly_a: DensePolynomial = sparse_poly_a.clone().into(); + for degree_b in 0..20 { + let sparse_poly_b = rand_sparse_poly(degree_b, &mut rng); + let dense_poly_b: DensePolynomial = sparse_poly_b.clone().into(); + + // Test Add trait + let sparse_sum = sparse_poly_a.clone() + sparse_poly_b.clone(); + assert_eq!( + sparse_sum.degree(), + max(degree_a, degree_b), + "degree_a = {}, degree_b = {}", + degree_a, + degree_b + ); + let actual_dense_sum: DensePolynomial = sparse_sum.into(); + let expected_dense_sum = dense_poly_a.clone() + dense_poly_b; + assert_eq!( + actual_dense_sum, expected_dense_sum, + "degree_a = {}, degree_b = {}", + degree_a, degree_b + ); + // Test AddAssign Trait + let mut sparse_add_assign_sum = sparse_poly_a.clone(); + sparse_add_assign_sum += &sparse_poly_b; + let actual_add_assign_dense_sum: DensePolynomial = sparse_add_assign_sum.into(); + assert_eq!( + actual_add_assign_dense_sum, expected_dense_sum, + "degree_a = {}, degree_b = {}", + degree_a, degree_b + ); + } + } + } + + #[test] + fn polynomial_additive_identity() { + // Test adding polynomials with its negative equals 0 + let mut rng = test_rng(); + for degree in 0..70 { + // Test with Neg trait + let sparse_poly = rand_sparse_poly(degree, &mut rng); + let neg = -sparse_poly.clone(); + assert!((sparse_poly + neg).is_zero()); + + // Test with SubAssign trait + let sparse_poly = rand_sparse_poly(degree, &mut rng); + let mut result = sparse_poly.clone(); + result -= &sparse_poly; + assert!(result.is_zero()); + } + } + + #[test] + fn mul_random_element() { + let rng = &mut test_rng(); + for degree in 0..20 { + let a = rand_sparse_poly(degree, rng); + let e = Fr::rand(rng); + assert_eq!( + &a * e, + a.mul(&SparsePolynomial::from_coefficients_slice(&[(0, e)])) + ) + } + } + + #[test] + fn mul_polynomial() { + // Test multiplying polynomials over their domains, and over the native representation. + // The expected result is obtained by comparing against dense polynomial + let mut rng = test_rng(); + for degree_a in 0..20 { + let sparse_poly_a = rand_sparse_poly(degree_a, &mut rng); + let dense_poly_a: DensePolynomial = sparse_poly_a.clone().into(); + for degree_b in 0..20 { + let sparse_poly_b = rand_sparse_poly(degree_b, &mut rng); + let dense_poly_b: DensePolynomial = sparse_poly_b.clone().into(); + + // Test multiplying the polynomials over their native representation + let sparse_prod = sparse_poly_a.mul(&sparse_poly_b); + assert_eq!( + sparse_prod.degree(), + degree_a + degree_b, + "degree_a = {}, degree_b = {}", + degree_a, + degree_b + ); + let dense_prod = dense_poly_a.naive_mul(&dense_poly_b); + assert_eq!(sparse_prod.degree(), dense_prod.degree()); + assert_eq!( + sparse_prod, + SparsePolynomial::::from(dense_prod), + "degree_a = {}, degree_b = {}", + degree_a, + degree_b + ); + + // Test multiplying the polynomials over their evaluations and interpolating + let domain = GeneralEvaluationDomain::new(sparse_prod.degree() + 1).unwrap(); + let poly_a_evals = sparse_poly_a.evaluate_over_domain_by_ref(domain); + let poly_b_evals = sparse_poly_b.evaluate_over_domain_by_ref(domain); + let poly_prod_evals = sparse_prod.evaluate_over_domain_by_ref(domain); + assert_eq!(poly_a_evals.mul(&poly_b_evals), poly_prod_evals); + } + } + } + + #[test] + fn evaluate_over_domain() { + // Test that polynomial evaluation over a domain, and interpolation returns the same poly. + let mut rng = test_rng(); + for poly_degree_dim in 0..5 { + let poly_degree = (1 << poly_degree_dim) - 1; + let sparse_poly = rand_sparse_poly(poly_degree, &mut rng); + + for domain_dim in poly_degree_dim..(poly_degree_dim + 2) { + let domain_size = 1 << domain_dim; + let domain = GeneralEvaluationDomain::new(domain_size).unwrap(); + + let sparse_evals = sparse_poly.evaluate_over_domain_by_ref(domain); + + // Test interpolation works, by checking against DensePolynomial + let dense_poly: DensePolynomial = sparse_poly.clone().into(); + let dense_evals = dense_poly.clone().evaluate_over_domain(domain); + assert_eq!( + sparse_evals.clone().interpolate(), + dense_evals.clone().interpolate(), + "poly_degree_dim = {}, domain_dim = {}", + poly_degree_dim, + domain_dim + ); + assert_eq!( + sparse_evals.interpolate(), + dense_poly, + "poly_degree_dim = {}, domain_dim = {}", + poly_degree_dim, + domain_dim + ); + // Consistency check that the dense polynomials interpolation is correct. + assert_eq!( + dense_evals.interpolate(), + dense_poly, + "poly_degree_dim = {}, domain_dim = {}", + poly_degree_dim, + domain_dim + ); + } + } + } +} diff --git a/arkworks/algebra/poly/src/test.rs b/arkworks/algebra/poly/src/test.rs new file mode 100644 index 00000000..58483f01 --- /dev/null +++ b/arkworks/algebra/poly/src/test.rs @@ -0,0 +1,57 @@ +use crate::domain::*; +use ark_ff::{PrimeField, UniformRand}; +use ark_std::test_rng; +use ark_test_curves::bls12_381::{Fr, G1Projective}; +use ark_test_curves::bn384_small_two_adicity::Fr as BNFr; + +// Test multiplying various (low degree) polynomials together and +// comparing with naive evaluations. +#[test] +fn fft_composition() { + fn test_fft_composition< + F: PrimeField, + T: DomainCoeff + UniformRand + core::fmt::Debug + Eq, + R: ark_std::rand::Rng, + D: EvaluationDomain, + >( + rng: &mut R, + max_coeffs: usize, + ) { + for coeffs in 0..max_coeffs { + let coeffs = 1 << coeffs; + + let domain = D::new(coeffs).unwrap(); + + let mut v = vec![]; + for _ in 0..coeffs { + v.push(T::rand(rng)); + } + // Fill up with zeros. + v.resize(domain.size(), T::zero()); + let mut v2 = v.clone(); + + domain.ifft_in_place(&mut v2); + domain.fft_in_place(&mut v2); + assert_eq!(v, v2, "ifft(fft(.)) != iden"); + + domain.fft_in_place(&mut v2); + domain.ifft_in_place(&mut v2); + assert_eq!(v, v2, "fft(ifft(.)) != iden"); + + domain.coset_ifft_in_place(&mut v2); + domain.coset_fft_in_place(&mut v2); + assert_eq!(v, v2, "coset_fft(coset_ifft(.)) != iden"); + + domain.coset_fft_in_place(&mut v2); + domain.coset_ifft_in_place(&mut v2); + assert_eq!(v, v2, "coset_ifft(coset_fft(.)) != iden"); + } + } + + let rng = &mut test_rng(); + + test_fft_composition::>(rng, 10); + test_fft_composition::>(rng, 10); + // This will result in a mixed-radix domain being used. + test_fft_composition::>(rng, 12); +} diff --git a/arkworks/algebra/rustfmt.toml b/arkworks/algebra/rustfmt.toml new file mode 100644 index 00000000..71712138 --- /dev/null +++ b/arkworks/algebra/rustfmt.toml @@ -0,0 +1,9 @@ +reorder_imports = true +wrap_comments = true +normalize_comments = true +use_try_shorthand = true +match_block_trailing_comma = true +use_field_init_shorthand = true +edition = "2018" +condense_wildcard_suffixes = true +merge_imports = true diff --git a/arkworks/algebra/scripts/install-hook.sh b/arkworks/algebra/scripts/install-hook.sh new file mode 100755 index 00000000..eafcf818 --- /dev/null +++ b/arkworks/algebra/scripts/install-hook.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +# This script will install the provided directory ../.hooks as the hook +# directory for the present repo. See there for hooks, including a pre-commit +# hook that runs rustfmt on files before a commit. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HOOKS_DIR="${DIR}/../.hooks" + +git config core.hooksPath "$HOOKS_DIR" diff --git a/arkworks/algebra/scripts/linkify_changelog.py b/arkworks/algebra/scripts/linkify_changelog.py new file mode 100644 index 00000000..f6f018b0 --- /dev/null +++ b/arkworks/algebra/scripts/linkify_changelog.py @@ -0,0 +1,30 @@ +import fileinput +import os +import re +import sys + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub( + r"\- #([0-9]*)", + r"- [\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", + line.rstrip(), + ) + # edits the current file + print(line) diff --git a/arkworks/algebra/scripts/test_vectors.py b/arkworks/algebra/scripts/test_vectors.py new file mode 100644 index 00000000..31c575fb --- /dev/null +++ b/arkworks/algebra/scripts/test_vectors.py @@ -0,0 +1,51 @@ +def generate_from_bytes_mod_order_test_vector(modulus): + def gen_vector(number): + byte_arr = convert_int_to_byte_vec(number) + # s = str(number % modulus) + # return "(" + byte_arr + ", \"" + s + "\")," + return byte_arr + "," + + data = ["vec!["] + + small_values_to_test = [0, 1, 255, 256, 256 * 256 + 255] + modulus_bits = int((len(bin(modulus)[2:]) + 7) / 8) * 8 + values_to_test = small_values_to_test + [ + modulus >> 8, + (modulus >> 8) + 1, + modulus - 1, + modulus, + modulus + 1, + modulus * 2, + modulus * 256, + 17 + (1 << modulus_bits), + 19 + (1 << modulus_bits) + modulus, + 81 + (1 << modulus_bits) * 256 + modulus, + ] + + for i in values_to_test: + data += ["// " + str(i)] + data += [gen_vector(i)] + + data += ["];"] + return "\n".join(data) + + +def convert_int_to_byte_vec(number): + s = bin(number)[2:] + num_bytes = int((len(s) + 7) / 8) + s = s.zfill(num_bytes * 8) + + byte_arr = [] + for i in range(num_bytes): + byte = s[i * 8 : (i + 1) * 8] + i = int(byte, 2) + byte_arr += [str(i) + "u8"] + + data = ", ".join(byte_arr) + return "vec![" + data + "]" + + +bls12_fr_mod = ( + 52435875175126190479447740508185965837690552500527637822603658699938581184513 +) +print(generate_from_bytes_mod_order_test_vector(bls12_fr_mod)) diff --git a/arkworks/algebra/serialize-derive/Cargo.toml b/arkworks/algebra/serialize-derive/Cargo.toml new file mode 100644 index 00000000..79cbc0c8 --- /dev/null +++ b/arkworks/algebra/serialize-derive/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "ark-serialize-derive" +version = "0.3.0" +authors = [ "arkworks Contributors" ] +description = "A library for deriving serialization traits for the arkworks ecosystem" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/algebra/" +keywords = ["cryptography", "finite-fields", "elliptic-curves", "serialization"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +################################# Dependencies ################################ + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "1.0" +syn = "1.0" +quote = "1.0" diff --git a/arkworks/algebra/serialize-derive/LICENSE-APACHE b/arkworks/algebra/serialize-derive/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/serialize-derive/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/serialize-derive/LICENSE-MIT b/arkworks/algebra/serialize-derive/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/serialize-derive/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/serialize-derive/src/lib.rs b/arkworks/algebra/serialize-derive/src/lib.rs new file mode 100644 index 00000000..c0e13487 --- /dev/null +++ b/arkworks/algebra/serialize-derive/src/lib.rs @@ -0,0 +1,266 @@ +#![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] +#![forbid(unsafe_code)] + +use proc_macro2::TokenStream; +use syn::{parse_macro_input, Data, DeriveInput, Index, Type}; + +use quote::{quote, ToTokens}; + +#[proc_macro_derive(CanonicalSerialize)] +pub fn derive_canonical_serialize(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let ast = parse_macro_input!(input as DeriveInput); + proc_macro::TokenStream::from(impl_canonical_serialize(&ast)) +} + +fn impl_serialize_field( + serialize_body: &mut Vec, + serialized_size_body: &mut Vec, + serialize_uncompressed_body: &mut Vec, + serialize_unchecked_body: &mut Vec, + uncompressed_size_body: &mut Vec, + idents: &mut Vec>, + ty: &Type, +) { + // Check if type is a tuple. + match ty { + Type::Tuple(tuple) => { + for (i, elem_ty) in tuple.elems.iter().enumerate() { + let index = Index::from(i); + idents.push(Box::new(index)); + impl_serialize_field( + serialize_body, + serialized_size_body, + serialize_uncompressed_body, + serialize_unchecked_body, + uncompressed_size_body, + idents, + elem_ty, + ); + idents.pop(); + } + } + _ => { + serialize_body + .push(quote! { CanonicalSerialize::serialize(&self.#(#idents).*, &mut writer)?; }); + serialized_size_body + .push(quote! { size += CanonicalSerialize::serialized_size(&self.#(#idents).*); }); + serialize_uncompressed_body.push( + quote! { CanonicalSerialize::serialize_uncompressed(&self.#(#idents).*, &mut writer)?; }, + ); + serialize_unchecked_body.push( + quote! { CanonicalSerialize::serialize_unchecked(&self.#(#idents).*, &mut writer)?; }, + ); + uncompressed_size_body.push( + quote! { size += CanonicalSerialize::uncompressed_size(&self.#(#idents).*); }, + ); + } + } +} + +fn impl_canonical_serialize(ast: &syn::DeriveInput) -> TokenStream { + let name = &ast.ident; + + let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); + + let mut serialize_body = Vec::::new(); + let mut serialized_size_body = Vec::::new(); + let mut serialize_uncompressed_body = Vec::::new(); + let mut serialize_unchecked_body = Vec::::new(); + let mut uncompressed_size_body = Vec::::new(); + + match ast.data { + Data::Struct(ref data_struct) => { + for (i, field) in data_struct.fields.iter().enumerate() { + let mut idents = Vec::>::new(); + match field.ident { + None => { + let index = Index::from(i); + idents.push(Box::new(index)); + } + Some(ref ident) => { + idents.push(Box::new(ident.clone())); + } + } + + impl_serialize_field( + &mut serialize_body, + &mut serialized_size_body, + &mut serialize_uncompressed_body, + &mut serialize_unchecked_body, + &mut uncompressed_size_body, + &mut idents, + &field.ty, + ); + } + } + _ => panic!( + "Serialize can only be derived for structs, {} is not a struct", + name + ), + }; + + let gen = quote! { + impl #impl_generics CanonicalSerialize for #name #ty_generics #where_clause { + #[allow(unused_mut, unused_variables)] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + #(#serialize_body)* + Ok(()) + } + #[allow(unused_mut, unused_variables)] + fn serialized_size(&self) -> usize { + let mut size = 0; + #(#serialized_size_body)* + size + } + #[allow(unused_mut, unused_variables)] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + #(#serialize_uncompressed_body)* + Ok(()) + } + + #[allow(unused_mut, unused_variables)] + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + #(#serialize_unchecked_body)* + Ok(()) + } + #[allow(unused_mut, unused_variables)] + fn uncompressed_size(&self) -> usize { + let mut size = 0; + #(#uncompressed_size_body)* + size + } + } + }; + gen +} + +#[proc_macro_derive(CanonicalDeserialize)] +pub fn derive_canonical_deserialize(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let ast = parse_macro_input!(input as DeriveInput); + proc_macro::TokenStream::from(impl_canonical_deserialize(&ast)) +} + +/// Returns three TokenStreams, one for the compressed deserialize, one for the +/// uncompressed, and one for the unchecked. +fn impl_deserialize_field(ty: &Type) -> (TokenStream, TokenStream, TokenStream) { + // Check if type is a tuple. + match ty { + Type::Tuple(tuple) => { + let mut compressed_fields = Vec::new(); + let mut uncompressed_fields = Vec::new(); + let mut unchecked_fields = Vec::new(); + for elem_ty in tuple.elems.iter() { + let (compressed, uncompressed, unchecked) = impl_deserialize_field(elem_ty); + compressed_fields.push(compressed); + uncompressed_fields.push(uncompressed); + unchecked_fields.push(unchecked); + } + ( + quote! { (#(#compressed_fields)*), }, + quote! { (#(#uncompressed_fields)*), }, + quote! { (#(#unchecked_fields)*), }, + ) + } + _ => ( + quote! { CanonicalDeserialize::deserialize(&mut reader)?, }, + quote! { CanonicalDeserialize::deserialize_uncompressed(&mut reader)?, }, + quote! { CanonicalDeserialize::deserialize_unchecked(&mut reader)?, }, + ), + } +} + +fn impl_canonical_deserialize(ast: &syn::DeriveInput) -> TokenStream { + let name = &ast.ident; + + let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); + + let deserialize_body; + let deserialize_uncompressed_body; + let deserialize_unchecked_body; + + match ast.data { + Data::Struct(ref data_struct) => { + let mut tuple = false; + let mut compressed_field_cases = Vec::::new(); + let mut uncompressed_field_cases = Vec::::new(); + let mut unchecked_field_cases = Vec::::new(); + for field in data_struct.fields.iter() { + match &field.ident { + None => { + tuple = true; + let (compressed, uncompressed, unchecked) = + impl_deserialize_field(&field.ty); + compressed_field_cases.push(compressed); + uncompressed_field_cases.push(uncompressed); + unchecked_field_cases.push(unchecked); + } + // struct field without len_type + Some(ident) => { + let (compressed_field, uncompressed_field, unchecked_field) = + impl_deserialize_field(&field.ty); + compressed_field_cases.push(quote! { #ident: #compressed_field }); + uncompressed_field_cases.push(quote! { #ident: #uncompressed_field }); + unchecked_field_cases.push(quote! { #ident: #unchecked_field }); + } + } + } + + if tuple { + deserialize_body = quote!({ + Ok(#name ( + #(#compressed_field_cases)* + )) + }); + deserialize_uncompressed_body = quote!({ + Ok(#name ( + #(#uncompressed_field_cases)* + )) + }); + deserialize_unchecked_body = quote!({ + Ok(#name ( + #(#unchecked_field_cases)* + )) + }); + } else { + deserialize_body = quote!({ + Ok(#name { + #(#compressed_field_cases)* + }) + }); + deserialize_uncompressed_body = quote!({ + Ok(#name { + #(#uncompressed_field_cases)* + }) + }); + deserialize_unchecked_body = quote!({ + Ok(#name { + #(#unchecked_field_cases)* + }) + }); + } + } + _ => panic!( + "Deserialize can only be derived for structs, {} is not a Struct", + name + ), + }; + + let gen = quote! { + impl #impl_generics CanonicalDeserialize for #name #ty_generics #where_clause { + #[allow(unused_mut,unused_variables)] + fn deserialize(mut reader: R) -> Result { + #deserialize_body + } + #[allow(unused_mut,unused_variables)] + fn deserialize_uncompressed(mut reader: R) -> Result { + #deserialize_uncompressed_body + } + + #[allow(unused_mut,unused_variables)] + fn deserialize_unchecked(mut reader: R) -> Result { + #deserialize_unchecked_body + } + } + }; + gen +} diff --git a/arkworks/algebra/serialize/Cargo.toml b/arkworks/algebra/serialize/Cargo.toml new file mode 100644 index 00000000..bdf3f7da --- /dev/null +++ b/arkworks/algebra/serialize/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "ark-serialize" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for serializing types in the arkworks ecosystem" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-serialize/" +keywords = ["cryptography", "serialization" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-serialize-derive = { version = "^0.3.0", path = "../serialize-derive", optional = true } +ark-std = { path = "../../std", version = "^0.3.0", default-features = false } +digest = { version = "0.9", default-features = false } + +[dev-dependencies] +sha2 = { version = "0.9.3", default-features = false} +sha3 = { version = "0.9.1", default-features = false} +blake2 = { version = "0.9.1", default-features = false} + +[features] +default = [] +std = [ "ark-std/std", ] +derive = [ "ark-serialize-derive" ] diff --git a/arkworks/algebra/serialize/LICENSE-APACHE b/arkworks/algebra/serialize/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/serialize/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/serialize/LICENSE-MIT b/arkworks/algebra/serialize/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/serialize/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/serialize/src/error.rs b/arkworks/algebra/serialize/src/error.rs new file mode 100644 index 00000000..f727ea9e --- /dev/null +++ b/arkworks/algebra/serialize/src/error.rs @@ -0,0 +1,37 @@ +use ark_std::{fmt, io}; + +/// This is an error that could occur during serialization +#[derive(Debug)] +pub enum SerializationError { + /// During serialization, we didn't have enough space to write extra info. + NotEnoughSpace, + /// During serialization, the data was invalid. + InvalidData, + /// During serialization, non-empty flags were given where none were + /// expected. + UnexpectedFlags, + /// During serialization, we countered an I/O error. + IoError(io::Error), +} + +impl ark_std::error::Error for SerializationError {} + +impl From for SerializationError { + fn from(e: io::Error) -> SerializationError { + SerializationError::IoError(e) + } +} + +impl fmt::Display for SerializationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match self { + SerializationError::NotEnoughSpace => write!( + f, + "the last byte does not have enough space to encode the extra info bits" + ), + SerializationError::InvalidData => write!(f, "the input buffer contained invalid data"), + SerializationError::UnexpectedFlags => write!(f, "the call expects empty flags"), + SerializationError::IoError(err) => write!(f, "I/O error: {:?}", err), + } + } +} diff --git a/arkworks/algebra/serialize/src/flags.rs b/arkworks/algebra/serialize/src/flags.rs new file mode 100644 index 00000000..8de49aaf --- /dev/null +++ b/arkworks/algebra/serialize/src/flags.rs @@ -0,0 +1,195 @@ +/// Represents metadata to be appended to an object's serialization. For +/// example, when serializing elliptic curve points, one can +/// use a `Flag` to represent whether the serialization is the point +/// at infinity, or whether the `y` coordinate is positive or not. +/// These bits will be appended to the end of the point's serialization, +/// or included in a new byte, depending on space available. +/// +/// This is meant to be provided to `CanonicalSerializeWithFlags` and +/// `CanonicalDeserializeWithFlags` +pub trait Flags: Default + Clone + Copy + Sized { + /// The number of bits required to encode `Self`. + /// This should be at most 8. + const BIT_SIZE: usize; + + // Returns a bit mask corresponding to `self`. + // For example, if `Self` contains two variants, there are just two possible + // bit masks: `0` and `1 << 7`. + fn u8_bitmask(&self) -> u8; + + // Tries to read `Self` from `value`. Should return `None` if the `Self::BIT_SIZE` + // most-significant bits of `value` do not correspond to those generated by + // `u8_bitmask`. + // + // That is, this method ignores all but the top `Self::BIT_SIZE` bits, and + // decides whether these top bits correspond to a bitmask output by `u8_bitmask`. + fn from_u8(value: u8) -> Option; + + // Convenience method that reads `Self` from `value`, just like `Self::from_u8`, but + // additionally zeroes out the bits corresponding to the resulting flag in `value`. + // If `Self::from_u8(*value)` would return `None`, then this method should + // *not* modify `value`. + fn from_u8_remove_flags(value: &mut u8) -> Option { + let flags = Self::from_u8(*value); + if let Some(f) = flags { + *value &= !f.u8_bitmask(); + } + flags + } +} + +/// Flags to be encoded into the serialization. +#[derive(Default, Clone, Copy, PartialEq, Eq)] +pub struct EmptyFlags; + +impl Flags for EmptyFlags { + const BIT_SIZE: usize = 0; + + #[inline] + fn u8_bitmask(&self) -> u8 { + 0 + } + + #[inline] + fn from_u8(value: u8) -> Option { + if (value >> 7) == 0 { + Some(EmptyFlags) + } else { + None + } + } +} + +/// Flags to be encoded into the serialization. +/// The default flags (empty) should not change the binary representation. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum SWFlags { + Infinity, + PositiveY, + NegativeY, +} + +impl SWFlags { + #[inline] + pub fn infinity() -> Self { + SWFlags::Infinity + } + + #[inline] + pub fn from_y_sign(is_positive: bool) -> Self { + if is_positive { + SWFlags::PositiveY + } else { + SWFlags::NegativeY + } + } + + #[inline] + pub fn is_infinity(&self) -> bool { + matches!(self, SWFlags::Infinity) + } + + #[inline] + pub fn is_positive(&self) -> Option { + match self { + SWFlags::Infinity => None, + SWFlags::PositiveY => Some(true), + SWFlags::NegativeY => Some(false), + } + } +} + +impl Default for SWFlags { + #[inline] + fn default() -> Self { + // NegativeY doesn't change the serialization + SWFlags::NegativeY + } +} + +impl Flags for SWFlags { + const BIT_SIZE: usize = 2; + + #[inline] + fn u8_bitmask(&self) -> u8 { + let mut mask = 0; + match self { + SWFlags::Infinity => mask |= 1 << 6, + SWFlags::PositiveY => mask |= 1 << 7, + _ => (), + } + mask + } + + #[inline] + fn from_u8(value: u8) -> Option { + let x_sign = (value >> 7) & 1 == 1; + let is_infinity = (value >> 6) & 1 == 1; + match (x_sign, is_infinity) { + // This is invalid because we only want *one* way to serialize + // the point at infinity. + (true, true) => None, + (false, true) => Some(SWFlags::Infinity), + (true, false) => Some(SWFlags::PositiveY), + (false, false) => Some(SWFlags::NegativeY), + } + } +} + +/// Flags to be encoded into the serialization. +/// The default flags (empty) should not change the binary representation. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum EdwardsFlags { + PositiveY, + NegativeY, +} + +impl EdwardsFlags { + #[inline] + pub fn from_y_sign(is_positive: bool) -> Self { + if is_positive { + EdwardsFlags::PositiveY + } else { + EdwardsFlags::NegativeY + } + } + + #[inline] + pub fn is_positive(&self) -> bool { + match self { + EdwardsFlags::PositiveY => true, + EdwardsFlags::NegativeY => false, + } + } +} + +impl Default for EdwardsFlags { + #[inline] + fn default() -> Self { + // NegativeY doesn't change the serialization + EdwardsFlags::NegativeY + } +} + +impl Flags for EdwardsFlags { + const BIT_SIZE: usize = 1; + + #[inline] + fn u8_bitmask(&self) -> u8 { + let mut mask = 0; + if let Self::PositiveY = self { + mask |= 1 << 7; + } + mask + } + + #[inline] + fn from_u8(value: u8) -> Option { + let x_sign = (value >> 7) & 1 == 1; + if x_sign { + Some(Self::PositiveY) + } else { + Some(Self::NegativeY) + } + } +} diff --git a/arkworks/algebra/serialize/src/lib.rs b/arkworks/algebra/serialize/src/lib.rs new file mode 100644 index 00000000..382da809 --- /dev/null +++ b/arkworks/algebra/serialize/src/lib.rs @@ -0,0 +1,1108 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] +#![forbid(unsafe_code)] +mod error; +mod flags; + +pub use ark_std::io::{Read, Write}; +use ark_std::{ + borrow::{Cow, ToOwned}, + collections::{BTreeMap, BTreeSet}, + convert::TryFrom, + rc::Rc, + string::String, + vec::Vec, +}; +pub use error::*; +pub use flags::*; + +#[cfg(feature = "derive")] +#[doc(hidden)] +pub use ark_serialize_derive::*; + +use digest::{generic_array::GenericArray, Digest}; + +/// Serializer in little endian format allowing to encode flags. +pub trait CanonicalSerializeWithFlags: CanonicalSerialize { + /// Serializes `self` and `flags` into `writer`. + fn serialize_with_flags( + &self, + writer: W, + flags: F, + ) -> Result<(), SerializationError>; + + /// Serializes `self` and `flags` into `writer`. + fn serialized_size_with_flags(&self) -> usize; +} + +/// Serializer in little endian format. +/// The serialization format must be 'length-extension' safe. +/// e.g. if T implements Canonical Serialize and Deserialize, +/// then for all strings `x, y`, if `a = T::deserialize(Reader(x))` and `a` is not an error, +/// then it must be the case that `a = T::deserialize(Reader(x || y))`, +/// and that both readers read the same number of bytes. +/// +/// This trait can be derived if all fields of a struct implement +/// `CanonicalSerialize` and the `derive` feature is enabled. +/// +/// # Example +/// ``` +/// // The `derive` feature must be set for the derivation to work. +/// use ark_serialize::*; +/// +/// # #[cfg(feature = "derive")] +/// #[derive(CanonicalSerialize)] +/// struct TestStruct { +/// a: u64, +/// b: (u64, (u64, u64)), +/// } +/// ``` +/// +/// If your code depends on `algebra` instead, the example works analogously +/// when importing `algebra::serialize::*`. +pub trait CanonicalSerialize { + /// Serializes `self` into `writer`. + /// It is left up to a particular type for how it strikes the + /// serialization efficiency vs compression tradeoff. + /// For standard types (e.g. `bool`, lengths, etc.) typically an uncompressed + /// form is used, whereas for algebraic types compressed forms are used. + /// + /// Particular examples of interest: + /// `bool` - 1 byte encoding + /// uints - Direct encoding + /// Length prefixing (for any container implemented by default) - 8 byte encoding + /// Elliptic curves - compressed point encoding + fn serialize(&self, writer: W) -> Result<(), SerializationError>; + + fn serialized_size(&self) -> usize; + + /// Serializes `self` into `writer` without compression. + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + self.serialize(writer) + } + + /// Serializes `self` into `writer` without compression, and without + /// performing validity checks. Should be used *only* when there is no + /// danger of adversarial manipulation of the output. + #[inline] + fn serialize_unchecked(&self, writer: W) -> Result<(), SerializationError> { + self.serialize_uncompressed(writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + self.serialized_size() + } +} + +// This private struct works around Serialize taking the pre-existing +// std::io::Write instance of most digest::Digest implementations by value +struct HashMarshaller<'a, H: Digest>(&'a mut H); + +impl<'a, H: Digest> ark_std::io::Write for HashMarshaller<'a, H> { + #[inline] + fn write(&mut self, buf: &[u8]) -> ark_std::io::Result { + Digest::update(self.0, buf); + Ok(buf.len()) + } + + #[inline] + fn flush(&mut self) -> ark_std::io::Result<()> { + Ok(()) + } +} + +/// The CanonicalSerialize induces a natural way to hash the +/// corresponding value, of which this is the convenience trait. +pub trait CanonicalSerializeHashExt: CanonicalSerialize { + fn hash(&self) -> GenericArray::OutputSize> { + let mut hasher = H::new(); + self.serialize(HashMarshaller(&mut hasher)) + .expect("HashMarshaller::flush should be infaillible!"); + hasher.finalize() + } + + fn hash_uncompressed(&self) -> GenericArray::OutputSize> { + let mut hasher = H::new(); + self.serialize_uncompressed(HashMarshaller(&mut hasher)) + .expect("HashMarshaller::flush should be infaillible!"); + hasher.finalize() + } +} + +/// CanonicalSerializeHashExt is a (blanket) extension trait of +/// CanonicalSerialize +impl CanonicalSerializeHashExt for T {} + +/// Deserializer in little endian format allowing flags to be encoded. +pub trait CanonicalDeserializeWithFlags: Sized { + /// Reads `Self` and `Flags` from `reader`. + /// Returns empty flags by default. + fn deserialize_with_flags( + reader: R, + ) -> Result<(Self, F), SerializationError>; +} + +/// Deserializer in little endian format. +/// This trait can be derived if all fields of a struct implement +/// `CanonicalDeserialize` and the `derive` feature is enabled. +/// +/// # Example +/// ``` +/// // The `derive` feature must be set for the derivation to work. +/// use ark_serialize::*; +/// +/// # #[cfg(feature = "derive")] +/// #[derive(CanonicalDeserialize)] +/// struct TestStruct { +/// a: u64, +/// b: (u64, (u64, u64)), +/// } +/// ``` +/// +/// If your code depends on `algebra` instead, the example works analogously +/// when importing `algebra::serialize::*`. +pub trait CanonicalDeserialize: Sized { + /// Reads `Self` from `reader`. + fn deserialize(reader: R) -> Result; + + /// Reads `Self` from `reader` without compression. + #[inline] + fn deserialize_uncompressed(reader: R) -> Result { + Self::deserialize(reader) + } + + /// Reads `self` from `reader` without compression, and without performing + /// validity checks. Should be used *only* when the input is trusted. + #[inline] + fn deserialize_unchecked(reader: R) -> Result { + Self::deserialize_uncompressed(reader) + } +} + +// Macro for implementing serialize for u8, u16, u32, u64 +macro_rules! impl_uint { + ($ty: ident) => { + impl CanonicalSerialize for $ty { + #[inline] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + Ok(writer.write_all(&self.to_le_bytes())?) + } + + #[inline] + fn serialized_size(&self) -> usize { + core::mem::size_of::<$ty>() + } + } + + impl CanonicalDeserialize for $ty { + #[inline] + fn deserialize(mut reader: R) -> Result { + let mut bytes = [0u8; core::mem::size_of::<$ty>()]; + reader.read_exact(&mut bytes)?; + Ok($ty::from_le_bytes(bytes)) + } + } + }; +} + +impl_uint!(u8); +impl_uint!(u16); +impl_uint!(u32); +impl_uint!(u64); + +// Serialize usize with 8 bytes +impl CanonicalSerialize for usize { + #[inline] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + Ok(writer.write_all(&(*self as u64).to_le_bytes())?) + } + + #[inline] + fn serialized_size(&self) -> usize { + core::mem::size_of::() + } +} + +impl CanonicalDeserialize for usize { + #[inline] + fn deserialize(mut reader: R) -> Result { + let mut bytes = [0u8; core::mem::size_of::()]; + reader.read_exact(&mut bytes)?; + usize::try_from(u64::from_le_bytes(bytes)).map_err(|_| SerializationError::InvalidData) + } +} + +// Implement Serialization for `String` +// It is serialized by obtaining its byte representation as a Vec and +// serializing that. This yields an end serialization of +// `string.len() || string_bytes`. +impl CanonicalSerialize for String { + #[inline] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.clone().into_bytes().serialize(&mut writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.clone().into_bytes().serialized_size() + } +} + +impl CanonicalDeserialize for String { + #[inline] + fn deserialize(mut reader: R) -> Result { + String::from_utf8(Vec::::deserialize(&mut reader)?) + .map_err(|_| SerializationError::InvalidData) + } +} + +impl CanonicalSerialize for [T] { + #[inline] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize(&mut writer)?; + for item in self.iter() { + item.serialize(&mut writer)?; + } + Ok(()) + } + + #[inline] + fn serialized_size(&self) -> usize { + 8 + self + .iter() + .map(|item| item.serialized_size()) + .sum::() + } + + #[inline] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize(&mut writer)?; + for item in self.iter() { + item.serialize_uncompressed(&mut writer)?; + } + Ok(()) + } + + #[inline] + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize(&mut writer)?; + for item in self.iter() { + item.serialize_unchecked(&mut writer)?; + } + Ok(()) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + 8 + self + .iter() + .map(|item| item.uncompressed_size()) + .sum::() + } +} + +impl CanonicalSerialize for Vec { + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.as_slice().serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.as_slice().serialized_size() + } + + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + self.as_slice().serialize_uncompressed(writer) + } + + #[inline] + fn serialize_unchecked(&self, writer: W) -> Result<(), SerializationError> { + self.as_slice().serialize_unchecked(writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + self.as_slice().uncompressed_size() + } +} + +impl CanonicalDeserialize for Vec { + #[inline] + fn deserialize(mut reader: R) -> Result { + let len = u64::deserialize(&mut reader)?; + let mut values = Vec::new(); + for _ in 0..len { + values.push(T::deserialize(&mut reader)?); + } + Ok(values) + } + + #[inline] + fn deserialize_uncompressed(mut reader: R) -> Result { + let len = u64::deserialize(&mut reader)?; + let mut values = Vec::new(); + for _ in 0..len { + values.push(T::deserialize_uncompressed(&mut reader)?); + } + Ok(values) + } + + #[inline] + fn deserialize_unchecked(mut reader: R) -> Result { + let len = u64::deserialize(&mut reader)?; + let mut values = Vec::new(); + for _ in 0..len { + values.push(T::deserialize_unchecked(&mut reader)?); + } + Ok(values) + } +} + +#[inline] +pub fn buffer_bit_byte_size(modulus_bits: usize) -> (usize, usize) { + let byte_size = buffer_byte_size(modulus_bits); + ((byte_size * 8), byte_size) +} + +/// Converts the number of bits required to represent a number +/// into the number of bytes required to represent it. +#[inline] +pub const fn buffer_byte_size(modulus_bits: usize) -> usize { + (modulus_bits + 7) / 8 +} + +// Implement Serialization for tuples +macro_rules! impl_tuple { + ($( $ty: ident : $no: tt, )*) => { + impl<$($ty, )*> CanonicalSerialize for ($($ty,)*) where + $($ty: CanonicalSerialize,)* + { + #[inline] + fn serialize(&self, mut _writer: W) -> Result<(), SerializationError> { + $(self.$no.serialize(&mut _writer)?;)* + Ok(()) + } + + #[inline] + fn serialized_size(&self) -> usize { + [$( + self.$no.serialized_size(), + )*].iter().sum() + } + + #[inline] + fn serialize_uncompressed(&self, mut _writer: W) -> Result<(), SerializationError> { + $(self.$no.serialize_uncompressed(&mut _writer)?;)* + Ok(()) + } + + #[inline] + fn serialize_unchecked(&self, mut _writer: W) -> Result<(), SerializationError> { + $(self.$no.serialize_unchecked(&mut _writer)?;)* + Ok(()) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + [$( + self.$no.uncompressed_size(), + )*].iter().sum() + } + } + + impl<$($ty, )*> CanonicalDeserialize for ($($ty,)*) where + $($ty: CanonicalDeserialize,)* + { + #[inline] + fn deserialize(mut _reader: R) -> Result { + Ok(($( + $ty::deserialize(&mut _reader)?, + )*)) + } + + #[inline] + fn deserialize_uncompressed(mut _reader: R) -> Result { + Ok(($( + $ty::deserialize_uncompressed(&mut _reader)?, + )*)) + } + + #[inline] + fn deserialize_unchecked(mut _reader: R) -> Result { + Ok(($( + $ty::deserialize_unchecked(&mut _reader)?, + )*)) + } + } + } +} + +impl_tuple!(); +impl_tuple!(A:0, B:1,); +impl_tuple!(A:0, B:1, C:2,); +impl_tuple!(A:0, B:1, C:2, D:3,); + +// No-op +impl CanonicalSerialize for core::marker::PhantomData { + #[inline] + fn serialize(&self, _writer: W) -> Result<(), SerializationError> { + Ok(()) + } + + #[inline] + fn serialized_size(&self) -> usize { + 0 + } +} + +impl CanonicalDeserialize for core::marker::PhantomData { + #[inline] + fn deserialize(_reader: R) -> Result { + Ok(core::marker::PhantomData) + } +} + +// Serialize cow objects by serializing the underlying object. +impl<'a, T: CanonicalSerialize + ToOwned> CanonicalSerialize for Cow<'a, T> { + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.as_ref().serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.as_ref().serialized_size() + } + + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + self.as_ref().serialize_uncompressed(writer) + } + + #[inline] + fn serialize_unchecked(&self, writer: W) -> Result<(), SerializationError> { + self.as_ref().serialize_unchecked(writer) + } + + fn uncompressed_size(&self) -> usize { + self.as_ref().uncompressed_size() + } +} + +impl<'a, T> CanonicalDeserialize for Cow<'a, T> +where + T: ToOwned, + ::Owned: CanonicalDeserialize, +{ + #[inline] + fn deserialize(reader: R) -> Result { + Ok(Cow::Owned(::Owned::deserialize(reader)?)) + } + + #[inline] + fn deserialize_uncompressed(reader: R) -> Result { + Ok(Cow::Owned(::Owned::deserialize_uncompressed( + reader, + )?)) + } + + #[inline] + fn deserialize_unchecked(reader: R) -> Result { + Ok(Cow::Owned(::Owned::deserialize_unchecked( + reader, + )?)) + } +} + +// If Option is None, serialize as serialize(False). +// If its Some, serialize as serialize(True) || serialize(T) +impl CanonicalSerialize for Option { + #[inline] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.is_some().serialize(&mut writer)?; + if let Some(item) = self { + item.serialize(&mut writer)?; + } + + Ok(()) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.is_some().serialized_size() + + if let Some(item) = self { + item.serialized_size() + } else { + 0 + } + } + + #[inline] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.is_some().serialize_uncompressed(&mut writer)?; + if let Some(item) = self { + item.serialize_uncompressed(&mut writer)?; + } + + Ok(()) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + self.is_some().uncompressed_size() + + if let Some(item) = self { + item.uncompressed_size() + } else { + 0 + } + } + + #[inline] + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + self.is_some().serialize_unchecked(&mut writer)?; + if let Some(item) = self { + item.serialize_unchecked(&mut writer)?; + } + + Ok(()) + } +} + +impl CanonicalDeserialize for Option { + #[inline] + fn deserialize(mut reader: R) -> Result { + let is_some = bool::deserialize(&mut reader)?; + let data = if is_some { + Some(T::deserialize(&mut reader)?) + } else { + None + }; + + Ok(data) + } + + #[inline] + fn deserialize_uncompressed(mut reader: R) -> Result { + let is_some = bool::deserialize_uncompressed(&mut reader)?; + let data = if is_some { + Some(T::deserialize_uncompressed(&mut reader)?) + } else { + None + }; + + Ok(data) + } + + #[inline] + fn deserialize_unchecked(mut reader: R) -> Result { + let is_some = bool::deserialize_unchecked(&mut reader)?; + let data = if is_some { + Some(T::deserialize_unchecked(&mut reader)?) + } else { + None + }; + + Ok(data) + } +} + +// Implement Serialization for `Rc` +impl CanonicalSerialize for Rc { + #[inline] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.as_ref().serialize(&mut writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.as_ref().serialized_size() + } + + #[inline] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.as_ref().serialize_uncompressed(&mut writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + self.as_ref().uncompressed_size() + } + + #[inline] + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + self.as_ref().serialize_unchecked(&mut writer) + } +} + +impl CanonicalDeserialize for Rc { + #[inline] + fn deserialize(mut reader: R) -> Result { + Ok(Rc::new(T::deserialize(&mut reader)?)) + } + + #[inline] + fn deserialize_uncompressed(mut reader: R) -> Result { + Ok(Rc::new(T::deserialize_uncompressed(&mut reader)?)) + } + + #[inline] + fn deserialize_unchecked(mut reader: R) -> Result { + Ok(Rc::new(T::deserialize_unchecked(&mut reader)?)) + } +} + +// Serialize boolean with a full byte +impl CanonicalSerialize for bool { + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + (*self as u8).serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + 1 + } +} + +impl CanonicalDeserialize for bool { + #[inline] + fn deserialize(reader: R) -> Result { + let val = u8::deserialize(reader)?; + if val == 0 { + return Ok(false); + } else if val == 1 { + return Ok(true); + } + + Err(SerializationError::InvalidData) + } + + #[inline] + fn deserialize_unchecked(reader: R) -> Result { + Ok(u8::deserialize(reader)? == 1) + } +} + +// Serialize BTreeMap as `len(map) || key 1 || value 1 || ... || key n || value n` +impl CanonicalSerialize for BTreeMap +where + K: CanonicalSerialize, + V: CanonicalSerialize, +{ + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize(&mut writer)?; + for (k, v) in self.iter() { + k.serialize(&mut writer)?; + v.serialize(&mut writer)?; + } + Ok(()) + } + + fn serialized_size(&self) -> usize { + 8 + self + .iter() + .map(|(k, v)| k.serialized_size() + v.serialized_size()) + .sum::() + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize_uncompressed(&mut writer)?; + for (k, v) in self.iter() { + k.serialize_uncompressed(&mut writer)?; + v.serialize_uncompressed(&mut writer)?; + } + Ok(()) + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize_unchecked(&mut writer)?; + for (k, v) in self.iter() { + k.serialize_unchecked(&mut writer)?; + v.serialize_unchecked(&mut writer)?; + } + Ok(()) + } + + fn uncompressed_size(&self) -> usize { + 8 + self + .iter() + .map(|(k, v)| k.uncompressed_size() + v.uncompressed_size()) + .sum::() + } +} + +impl CanonicalDeserialize for BTreeMap +where + K: Ord + CanonicalDeserialize, + V: CanonicalDeserialize, +{ + fn deserialize(mut reader: R) -> Result { + let len = u64::deserialize(&mut reader)?; + let mut map = BTreeMap::new(); + for _ in 0..len { + map.insert(K::deserialize(&mut reader)?, V::deserialize(&mut reader)?); + } + Ok(map) + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let len = u64::deserialize_uncompressed(&mut reader)?; + let mut map = BTreeMap::new(); + for _ in 0..len { + map.insert( + K::deserialize_uncompressed(&mut reader)?, + V::deserialize_uncompressed(&mut reader)?, + ); + } + Ok(map) + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let len = u64::deserialize_unchecked(&mut reader)?; + let mut map = BTreeMap::new(); + for _ in 0..len { + map.insert( + K::deserialize_unchecked(&mut reader)?, + V::deserialize_unchecked(&mut reader)?, + ); + } + Ok(map) + } +} + +// Serialize BTreeSet as `len(set) || value_1 || ... || value_n`. +impl CanonicalSerialize for BTreeSet { + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize(&mut writer)?; + for elem in self.iter() { + elem.serialize(&mut writer)?; + } + Ok(()) + } + + fn serialized_size(&self) -> usize { + 8 + self + .iter() + .map(|elem| elem.serialized_size()) + .sum::() + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize_uncompressed(&mut writer)?; + for elem in self.iter() { + elem.serialize_uncompressed(&mut writer)?; + } + Ok(()) + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + let len = self.len() as u64; + len.serialize_unchecked(&mut writer)?; + for elem in self.iter() { + elem.serialize_unchecked(&mut writer)?; + } + Ok(()) + } + + fn uncompressed_size(&self) -> usize { + 8 + self + .iter() + .map(|elem| elem.uncompressed_size()) + .sum::() + } +} + +impl CanonicalDeserialize for BTreeSet { + fn deserialize(mut reader: R) -> Result { + let len = u64::deserialize(&mut reader)?; + let mut set = BTreeSet::new(); + for _ in 0..len { + set.insert(T::deserialize(&mut reader)?); + } + Ok(set) + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let len = u64::deserialize_uncompressed(&mut reader)?; + let mut set = BTreeSet::new(); + for _ in 0..len { + set.insert(T::deserialize_uncompressed(&mut reader)?); + } + Ok(set) + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let len = u64::deserialize_unchecked(&mut reader)?; + let mut set = BTreeSet::new(); + for _ in 0..len { + set.insert(T::deserialize_unchecked(&mut reader)?); + } + Ok(set) + } +} + +#[cfg(test)] +mod test { + use super::*; + use ark_std::rand::RngCore; + use ark_std::vec; + + #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] + struct Dummy; + + impl CanonicalSerialize for Dummy { + #[inline] + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + 100u8.serialize(&mut writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + 100u8.serialized_size() + } + + #[inline] + fn serialize_uncompressed( + &self, + mut writer: W, + ) -> Result<(), SerializationError> { + (&[100u8, 200u8]).serialize_uncompressed(&mut writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + (&[100u8, 200u8]).uncompressed_size() + } + + #[inline] + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + (&[100u8, 200u8]).serialize_unchecked(&mut writer) + } + } + + impl CanonicalDeserialize for Dummy { + #[inline] + fn deserialize(mut reader: R) -> Result { + let result = u8::deserialize(&mut reader)?; + assert_eq!(result, 100u8); + Ok(Dummy) + } + + #[inline] + fn deserialize_uncompressed(mut reader: R) -> Result { + let result = Vec::::deserialize_uncompressed(&mut reader)?; + assert_eq!(result.as_slice(), &[100u8, 200u8]); + + Ok(Dummy) + } + + #[inline] + fn deserialize_unchecked(mut reader: R) -> Result { + let result = Vec::::deserialize_unchecked(&mut reader)?; + assert_eq!(result.as_slice(), &[100u8, 200u8]); + + Ok(Dummy) + } + } + + fn test_serialize< + T: PartialEq + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize, + >( + data: T, + ) { + let mut serialized = vec![0; data.serialized_size()]; + data.serialize(&mut serialized[..]).unwrap(); + let de = T::deserialize(&serialized[..]).unwrap(); + assert_eq!(data, de); + + let mut serialized = vec![0; data.uncompressed_size()]; + data.serialize_uncompressed(&mut serialized[..]).unwrap(); + let de = T::deserialize_uncompressed(&serialized[..]).unwrap(); + assert_eq!(data, de); + + let mut serialized = vec![0; data.uncompressed_size()]; + data.serialize_unchecked(&mut serialized[..]).unwrap(); + let de = T::deserialize_unchecked(&serialized[..]).unwrap(); + assert_eq!(data, de); + } + + fn test_hash(data: T) { + let h1 = data.hash::(); + + let mut hash = H::new(); + let mut serialized = vec![0; data.serialized_size()]; + data.serialize(&mut serialized[..]).unwrap(); + hash.update(&serialized); + let h2 = hash.finalize(); + + assert_eq!(h1, h2); + + let h3 = data.hash_uncompressed::(); + + let mut hash = H::new(); + serialized = vec![0; data.uncompressed_size()]; + data.serialize_uncompressed(&mut serialized[..]).unwrap(); + hash.update(&serialized); + let h4 = hash.finalize(); + + assert_eq!(h3, h4); + } + + // Serialize T, randomly mutate the data, and deserialize it. + // Ensure it fails. + // Up to the caller to provide a valid mutation criterion + // to ensure that this test always fails. + // This method requires a concrete instance of the data to be provided, + // to get the serialized size. + fn ensure_non_malleable_encoding< + T: PartialEq + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize, + >( + data: T, + valid_mutation: fn(&[u8]) -> bool, + ) { + let mut r = ark_std::test_rng(); + let mut serialized = vec![0; data.serialized_size()]; + r.fill_bytes(&mut serialized); + while !valid_mutation(&serialized) { + r.fill_bytes(&mut serialized); + } + let de = T::deserialize(&serialized[..]); + assert!(de.is_err()); + + let mut serialized = vec![0; data.uncompressed_size()]; + r.fill_bytes(&mut serialized); + while !valid_mutation(&serialized) { + r.fill_bytes(&mut serialized); + } + let de = T::deserialize_uncompressed(&serialized[..]); + assert!(de.is_err()); + } + + #[test] + fn test_vec() { + test_serialize(vec![1u64, 2, 3, 4, 5]); + test_serialize(Vec::::new()); + } + + #[test] + fn test_uint() { + test_serialize(192830918usize); + test_serialize(192830918u64); + test_serialize(192830918u32); + test_serialize(22313u16); + test_serialize(123u8); + } + + #[test] + fn test_string() { + test_serialize(String::from("arkworks")); + } + + #[test] + fn test_tuple() { + test_serialize(()); + test_serialize((123u64, Dummy)); + test_serialize((123u64, 234u32, Dummy)); + } + + #[test] + fn test_tuple_vec() { + test_serialize(vec![ + (Dummy, Dummy, Dummy), + (Dummy, Dummy, Dummy), + (Dummy, Dummy, Dummy), + ]); + test_serialize(vec![ + (86u8, 98u64, Dummy), + (86u8, 98u64, Dummy), + (86u8, 98u64, Dummy), + ]); + } + + #[test] + fn test_option() { + test_serialize(Some(Dummy)); + test_serialize(None::); + + test_serialize(Some(10u64)); + test_serialize(None::); + } + + #[test] + fn test_rc() { + test_serialize(Rc::new(Dummy)); + } + + #[test] + fn test_bool() { + test_serialize(true); + test_serialize(false); + + let valid_mutation = |data: &[u8]| -> bool { + return data.len() == 1 && data[0] > 1; + }; + for _ in 0..10 { + ensure_non_malleable_encoding(true, valid_mutation); + ensure_non_malleable_encoding(false, valid_mutation); + } + } + + #[test] + fn test_btreemap() { + let mut map = BTreeMap::new(); + map.insert(0u64, Dummy); + map.insert(5u64, Dummy); + test_serialize(map); + let mut map = BTreeMap::new(); + map.insert(10u64, vec![1u8, 2u8, 3u8]); + map.insert(50u64, vec![4u8, 5u8, 6u8]); + test_serialize(map); + } + + #[test] + fn test_btreeset() { + let mut set = BTreeSet::new(); + set.insert(Dummy); + set.insert(Dummy); + test_serialize(set); + let mut set = BTreeSet::new(); + set.insert(vec![1u8, 2u8, 3u8]); + set.insert(vec![4u8, 5u8, 6u8]); + test_serialize(set); + } + + #[test] + fn test_phantomdata() { + test_serialize(core::marker::PhantomData::); + } + + #[test] + fn test_sha2() { + test_hash::<_, sha2::Sha256>(Dummy); + test_hash::<_, sha2::Sha512>(Dummy); + } + + #[test] + fn test_blake2() { + test_hash::<_, blake2::Blake2b>(Dummy); + test_hash::<_, blake2::Blake2s>(Dummy); + } + + #[test] + fn test_sha3() { + test_hash::<_, sha3::Sha3_256>(Dummy); + test_hash::<_, sha3::Sha3_512>(Dummy); + } +} diff --git a/arkworks/algebra/test-curves/Cargo.toml b/arkworks/algebra/test-curves/Cargo.toml new file mode 100644 index 00000000..1222ef98 --- /dev/null +++ b/arkworks/algebra/test-curves/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "ark-test-curves" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for testing ark-ec & ark-poly" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-test-curves/" +keywords = ["cryptography", "serialization" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-std = { version = "^0.3.0", default-features = false } +ark-ff = { version = "^0.3.0", path = "../ff", default-features = false } +ark-ec = { version = "^0.3.0", path = "../ec", default-features = false } + +[dev-dependencies] +ark-serialize = { version = "^0.3.0", path = "../serialize", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", path = "../test-templates", default-features = false } + +[features] +default = [] + +asm = [ "ark-ff/asm" ] + +parallel = [ "ark-ff/parallel", "ark-ec/parallel", "ark-std/parallel" ] + +bls12_381_scalar_field = [] +bls12_381_curve = [ "bls12_381_scalar_field" ] + +mnt4_753_scalar_field = [] +mnt4_753_base_field = [] +mnt4_753_curve = [ "mnt4_753_scalar_field", "mnt4_753_base_field" ] + +bn384_small_two_adicity_scalar_field = [] +bn384_small_two_adicity_base_field = [] +bn384_small_two_adicity_curve = [ "bn384_small_two_adicity_scalar_field", "bn384_small_two_adicity_base_field" ] diff --git a/arkworks/algebra/test-curves/LICENSE-APACHE b/arkworks/algebra/test-curves/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/test-curves/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/test-curves/LICENSE-MIT b/arkworks/algebra/test-curves/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/test-curves/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/test-curves/src/bls12_381/fq.rs b/arkworks/algebra/test-curves/src/bls12_381/fq.rs new file mode 100644 index 00000000..0db87501 --- /dev/null +++ b/arkworks/algebra/test-curves/src/bls12_381/fq.rs @@ -0,0 +1,115 @@ +use ark_ff::{ + biginteger::BigInteger384 as BigInteger, + field_new, + fields::{FftParameters, Fp384, Fp384Parameters, FpParameters}, +}; + +pub type Fq = Fp384; + +pub struct FqParameters; + +impl Fp384Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 1; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, + ]); +} +impl FpParameters for FqParameters { + /// MODULUS = 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xb9feffffffffaaab, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, + ]); + + const MODULUS_BITS: u32 = 381; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 3; + + /// R = 3380320199399472671518931668520476396067793891014375699959770179129436917079669831430077592723774664465579537268733 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xf4df1f341c341746, + 0xa76e6a609d104f1, + 0x8de5476c4c95b6d5, + 0x67eb88a9939d83c0, + 0x9a793e85b519952d, + 0x11988fe592cae3aa, + ]); + + const INV: u64 = 0x89f3fffcfffcfffd; + + /// GENERATOR = 2 + /// Encoded in Montgomery form, so the value is + /// 2 * R % q = 2758230843577277949620073511305048635578704962089743514587482222134842183668501798417467556318533664893264801977679 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0x321300000006554f, + 0xb93c0018d6c40005, + 0x57605e0db0ddbb51, + 0x8b256521ed1f9bcb, + 0x6cf28d7901622c03, + 0x11ebab9dbb81e28c, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xdcff7fffffffd555, + 0xf55ffff58a9ffff, + 0xb39869507b587b12, + 0xb23ba5c279c2895f, + 0x258dd3db21a5d66b, + 0xd0088f51cbff34d, + ]); + + /// T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + /// For T coprime to 2 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0xdcff7fffffffd555, + 0xf55ffff58a9ffff, + 0xb39869507b587b12, + 0xb23ba5c279c2895f, + 0x258dd3db21a5d66b, + 0xd0088f51cbff34d, + ]); + + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xee7fbfffffffeaaa, + 0x7aaffffac54ffff, + 0xd9cc34a83dac3d89, + 0xd91dd2e13ce144af, + 0x92c6e9ed90d2eb35, + 0x680447a8e5ff9a6, + ]); +} + +pub const FQ_ONE: Fq = field_new!(Fq, "1"); +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); diff --git a/arkworks/algebra/test-curves/src/bls12_381/fr.rs b/arkworks/algebra/test-curves/src/bls12_381/fr.rs new file mode 100644 index 00000000..7604c678 --- /dev/null +++ b/arkworks/algebra/test-curves/src/bls12_381/fr.rs @@ -0,0 +1,100 @@ +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters, FpParameters}, +}; + +pub type Fr = Fp256; + +pub struct FrParameters; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 32; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0xb9b58d8c5f0e466a, + 0x5b1b4c801819d7ec, + 0xaf53ae352a31e64, + 0x5bf3adda19e9b27b, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xffffffff00000001, + 0x53bda402fffe5bfe, + 0x3339d80809a1d805, + 0x73eda753299d7d48, + ]); + + const MODULUS_BITS: u32 = 255; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 1; + + /// R = 10920338887063814464675503992315976177888879664585288394250266608035967270910 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0x1fffffffe, + 0x5884b7fa00034802, + 0x998c4fefecbc4ff5, + 0x1824b159acc5056f, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xc999e990f3f29c6d, + 0x2b6cedcb87925c23, + 0x5d314967254398f, + 0x748d9d99f59ff11, + ]); + + const INV: u64 = 0xfffffffeffffffff; + + /// GENERATOR = 7 + /// Encoded in Montgomery form, so the value here is + /// 7 * R % q = 24006497034320510773280787438025867407531605151569380937148207556313189711857 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0xefffffff1, + 0x17e363d300189c0f, + 0xff9c57876f8457b0, + 0x351332208fc5a8c4, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x7fffffff80000000, + 0xa9ded2017fff2dff, + 0x199cec0404d0ec02, + 0x39f6d3a994cebea4, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + // For T coprime to 2 + + // T = (MODULUS - 1) / 2^S = + // 12208678567578594777604504606729831043093128246378069236549469339647 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0xfffe5bfeffffffff, + 0x9a1d80553bda402, + 0x299d7d483339d808, + 0x73eda753, + ]); + + // (T - 1) / 2 = + // 6104339283789297388802252303364915521546564123189034618274734669823 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x7fff2dff7fffffff, + 0x4d0ec02a9ded201, + 0x94cebea4199cec04, + 0x39f6d3a9, + ]); +} diff --git a/arkworks/algebra/test-curves/src/bls12_381/g1.rs b/arkworks/algebra/test-curves/src/bls12_381/g1.rs new file mode 100644 index 00000000..0e8391f9 --- /dev/null +++ b/arkworks/algebra/test-curves/src/bls12_381/g1.rs @@ -0,0 +1,78 @@ +use crate::bls12_381::*; +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::*, +}; +use ark_ff::{field_new, Zero}; + +pub type G1Affine = GroupAffine; +pub type G1Projective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 0 + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = 4 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "4"); + + /// COFACTOR = (x - 1)^2 / 3 = 76329603384216526031706109802092473003 + const COFACTOR: &'static [u64] = &[0x8c00aaab0000aaab, 0x396c8c005555e156]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r + /// = 52435875175126190458656871551744051925719901746859129887267498875565241663483 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "52435875175126190458656871551744051925719901746859129887267498875565241663483"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +/// G1_GENERATOR_X = +/// 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507"); + +/// G1_GENERATOR_Y = +/// 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569"); + +#[cfg(test)] +mod test { + use super::*; + use ark_ec::ProjectiveCurve; + use ark_std::UniformRand; + + #[test] + fn batch_normalization() { + let mut rng = ark_std::test_rng(); + + let mut g_s = [G1Projective::zero(); 100]; + for i in 0..100 { + g_s[i] = G1Projective::rand(&mut rng); + } + + let mut g_s_affine_naive = [G1Affine::zero(); 100]; + for (i, g) in g_s.iter().enumerate() { + g_s_affine_naive[i] = g.into_affine(); + } + + let g_s_affine_fast = G1Projective::batch_normalization_into_affine(&g_s); + assert_eq!(g_s_affine_naive.as_ref(), g_s_affine_fast.as_slice()); + } +} diff --git a/arkworks/algebra/test-curves/src/bls12_381/mod.rs b/arkworks/algebra/test-curves/src/bls12_381/mod.rs new file mode 100644 index 00000000..5485e3a6 --- /dev/null +++ b/arkworks/algebra/test-curves/src/bls12_381/mod.rs @@ -0,0 +1,15 @@ +pub mod fr; +pub use fr::*; + +#[cfg(feature = "bls12_381_curve")] +pub mod fq; +#[cfg(feature = "bls12_381_curve")] +pub mod g1; + +#[cfg(feature = "bls12_381_curve")] +pub use fq::*; +#[cfg(feature = "bls12_381_curve")] +pub use g1::*; + +#[cfg(test)] +mod tests; diff --git a/arkworks/algebra/test-curves/src/bls12_381/tests.rs b/arkworks/algebra/test-curves/src/bls12_381/tests.rs new file mode 100644 index 00000000..05852c38 --- /dev/null +++ b/arkworks/algebra/test-curves/src/bls12_381/tests.rs @@ -0,0 +1,54 @@ +#![allow(unused_imports)] +use ark_ec::{models::SWModelParameters, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{One, UniformRand, Zero}; + +use crate::bls12_381::{g1, Fq, FqParameters, Fr, G1Affine, G1Projective}; +use ark_algebra_test_templates::{curves::*, fields::*, groups::*}; +use ark_std::rand::Rng; + +pub(crate) const ITERATIONS: usize = 5; + +#[test] +fn test_fr() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a: Fr = UniformRand::rand(&mut rng); + let b: Fr = UniformRand::rand(&mut rng); + field_test(a, b); + primefield_test::(); + sqrt_field_test(b); + } +} + +#[test] +fn test_fq() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a: Fq = UniformRand::rand(&mut rng); + let b: Fq = UniformRand::rand(&mut rng); + field_test(a, b); + primefield_test::(); + sqrt_field_test(a); + } +} + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = ark_std::test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} diff --git a/arkworks/algebra/test-curves/src/bn384_small_two_adicity/fq.rs b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/fq.rs new file mode 100644 index 00000000..b1b3d770 --- /dev/null +++ b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/fq.rs @@ -0,0 +1,137 @@ +use ark_ff::{ + biginteger::BigInteger384 as BigInteger, + fields::{FftParameters, Fp384, Fp384Parameters, FpParameters}, +}; + +pub type Fq = Fp384; + +pub struct FqParameters; + +impl Fp384Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 12; + + // TWO_ADIC_ROOT_OF_UNITY = GENERATOR ^ t = + // 4563474743154071393992783416618298946273483760389666561454590580850277486490043009369759159902206584965352075028870 + // t is defined below + // This number needs to be in the Montgomery residue form. + // I.e., write TWO_ADIC_ROOT_OF_UNITY * R + // = 31697142653270303559937416477969693485777517469743380851550419037088206541495408586849687582005649424923407072789 + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 11341361458440748565u64, + 11432203502797436080u64, + 15207341022279519515u64, + 47188373187644751u64, + 123916096934654777u64, + 14839576327112111u64, + ]); + + const SMALL_SUBGROUP_BASE: Option = Some(3); + const SMALL_SUBGROUP_BASE_ADICITY: Option = Some(2); + + // LARGE_SUBGROUP_ROOT_OF_UNITY = GENERATOR ^ (t * 3 ^ 2) = + // 203100967768496856767841701771526315192814992286543641883928020883407386213917566206874176054653008117753458021037 + // I.e., write LARGE_SUBGROUP_ROOT_OF_UNITY * R + // = 81125788721017958531970004711554176763707237538772656640376499392204495132484005854811881368159718832226698073199 + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = Some(BigInteger([ + 6225018931355915375u64, + 180290822448891806u64, + 14465855242330424160u64, + 8575642455718703211u64, + 8320153096556229121u64, + 37980468681094481u64, + ])); +} + +impl FpParameters for FqParameters { + /// MODULUS = 5945877603251831796258517492029536515488649313567122628447476625319762940580461319088175968449723373773214087057409 + const MODULUS: BigInteger = BigInteger([ + 2340831834029625345u64, + 7249631296803227205u64, + 16747242270977641452u64, + 15205557732015452966u64, + 15076886007691743306u64, + 2783667458303802095u64, + ]); + + const MODULUS_BITS: u32 = 382; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 2; + + // R = 2^{384} % MODULUS + // R = 3726740576883488434727935147966394712147843389062710897263433652327144128014442696885210444186575398167343467962362 + const R: BigInteger = BigInteger([ + 4401753069531799546u64, + 11842444440309291617u64, + 10197010816391460981u64, + 1000373976455040278u64, + 1772404322397298239u64, + 1744739323886739041u64, + ]); + + // R2 = R * R % MODULUS + // R2 = 3383647891563276668075677154877236888682502454192504651186644086866057738042913461398173437153800906409349899530047 + const R2: BigInteger = BigInteger([ + 16517710552441204543u64, + 4787934104620433613u64, + 12185556526193827174u64, + 10815510726684521116u64, + 3531299847928964248u64, + 1584114432653590388u64, + ]); + + // INV = -(MODULUS)^{-1} mod 2^64 + const INV: u64 = 887568002135035903u64; + + // GENERATOR = 7 + // This number needs to be in the Montgomery residue form. + // I.e., write 7 * R % MODULUS = + // 2303673625177091858061476067646616923080306469170485767054129065010957133779253601843769235507134292078547927506898 + const GENERATOR: BigInteger = BigInteger([ + 3002200076894543826u64, + 17005097747533029268u64, + 4390106630829661061u64, + 1520619128252124930u64, + 7439518447142769294u64, + 1078505433991964904u64, + ]); + + // (mod - 1) / 2 = 2972938801625915898129258746014768257744324656783561314223738312659881470290230659544087984224861686886607043528704 + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 10393787953869588480u64, + 3624815648401613602u64, + 8373621135488820726u64, + 7602778866007726483u64, + 16761815040700647461u64, + 1391833729151901047u64, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + // S = 12 + + /// T = (MODULUS - 1) / 2^S = + /// 1451630274231404247133427122077523563351721023820098297960809722978457749165151689230511711047295745550101095473 + const T: BigInteger = BigInteger([ + 7228848894076625969u64, + 13746755992250574892u64, + 3633989981855682676u64, + 336978666793584539u64, + 12609256237382989921u64, + 679606313062451u64, + ]); + + /// (T - 1) / 2 = + /// 72581513711570212356671356103876178167586051191004914898040486148922887458257584461525585552364787277505054773 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 3614424447038312984u64, + 6873377996125287446u64, + 11040367027782617146u64, + 9391861370251568077u64, + 15528000155546270768u64, + 339803156531225u64, + ]); +} diff --git a/arkworks/algebra/test-curves/src/bn384_small_two_adicity/fr.rs b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/fr.rs new file mode 100644 index 00000000..d9eb2bad --- /dev/null +++ b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/fr.rs @@ -0,0 +1,138 @@ +use ark_ff::{ + biginteger::BigInteger384 as BigInteger, + fields::{FftParameters, Fp384, Fp384Parameters, FpParameters}, +}; + +pub type Fr = Fp384; + +pub struct FrParameters; + +pub const FR_ONE: Fr = ark_ff::field_new!(Fr, "1"); + +impl Fp384Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 12; + + // TWO_ADIC_ROOT_OF_UNITY = GENERATOR ^ T = + // 1685271986666084262778868986067286870708440243287855288358961780551611799713704250599068248127477556627411635786779 + // t is defined below + // This number needs to be in the Montgomery residue form. + // I.e., write TWO_ADIC_ROOT_OF_UNITY * R + // = 1539563187696293616856158973955665088899482868488546332850378941921984564611273075190849188323241758701638100060070 + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 13480433396127238054u64, + 1703594782943735056u64, + 8417751128359587317u64, + 11248980344203883641u64, + 4705241879150942070u64, + 720773657239228462u64, + ]); + + const SMALL_SUBGROUP_BASE: Option = Some(3); + const SMALL_SUBGROUP_BASE_ADICITY: Option = Some(2); + + // LARGE_SUBGROUP_ROOT_OF_UNITY = GENERATOR ^ (T * 3 ^ 2) + // = 3524614118565436050820346784762407349815771892452866211429575895239855511309348587252928054123237406857164753350910 + // This number needs to be in the Montgomery residue form. + // I.e., write LARGE_SUBGROUP_ROOT_OF_UNITY * R + // = 2243640460791708394678669425369274565832631199871689254948845545672204516674256702673568780556211844984291473787379 + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = Some(BigInteger([ + 16448235414327691763u64, + 6101416213029103415u64, + 1714659905601749299u64, + 18157817127906248745u64, + 7986749858655934624u64, + 1050399849371924948u64, + ])); +} +impl FpParameters for FrParameters { + /// MODULUS = 5945877603251831796258517492029536515488649313567122628445038208291596545947608789992834434053176523624102324539393 + const MODULUS: BigInteger = BigInteger([ + 17382266338285916161u64, + 13339389119208890949u64, + 9581378667081472421u64, + 15205557732015452966u64, + 15076886007691743306u64, + 2783667458303802095u64, + ]); + + const MODULUS_BITS: u32 = 382; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 2; + + // R = 2^{384} % MODULUS + // 3726740576883488434727935147966394712147843389062710897278064154496142495811557871457259650565856499062014043070458 + const R: BigInteger = BigInteger([ + 6386866412541812730u64, + 12197385653294412380u64, + 16298704292349371933u64, + 1000373976455040280u64, + 1772404322397298239u64, + 1744739323886739041u64, + ]); + + // R2 = R * R % MODULUS + // 743374565348571412572717835265798450620415736052604204514879248137607002524418551738269123252154404143816216787227 + const R2: BigInteger = BigInteger([ + 7278302575398336795u64, + 13899913090107078051u64, + 14214418478611586731u64, + 17879031161354349451u64, + 934436771375522906u64, + 348023912527199718u64, + ]); + + // INV = -(MODULUS)^{-1} mod 2^64 + const INV: u64 = 5652841145273880575; + + // GENERATOR = 5 + // This number needs to be in the Montgomery residue form. + // Here, write 5 * R = 796070074661946784864123263743364014273269004612186601055206147605922841214962987307794950669752924437763241734111 + const GENERATOR: BigInteger = BigInteger([ + 16681021195270418399u64, + 2522016835135837435u64, + 15855897313083339171u64, + 14725428907357497352u64, + 524851736330364506u64, + 372694244522288918u64, + ]); + + // (mod - 1) / 2 = 2972938801625915898129258746014768257744324656783561314222519104145798272973804394996417217026588261812051162269696 + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 17914505205997733888u64, + 15893066596459221282u64, + 4790689333540736210u64, + 7602778866007726483u64, + 16761815040700647461u64, + 1391833729151901047u64, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + // S = 12 + + /// T = (MODULUS - 1) / 2^S = + /// 1451630274231404247133427122077523563351721023820098297960214406321190562975490427244344344251263799712915606577 + const T: BigInteger = BigInteger([ + 2620835100870003761u64, + 11122644166774436482u64, + 3632240503436762713u64, + 336978666793584539u64, + 12609256237382989921u64, + 679606313062451u64, + ]); + + /// (T - 1) / 2 = + /// 725815137115702123566713561038761781675860511910049148980107203160595281487745213622172172125631899856457803288 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 1310417550435001880u64, + 14784694120241994049u64, + 11039492288573157164u64, + 9391861370251568077u64, + 15528000155546270768u64, + 339803156531225u64, + ]); +} diff --git a/arkworks/algebra/test-curves/src/bn384_small_two_adicity/g1.rs b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/g1.rs new file mode 100644 index 00000000..aadf0815 --- /dev/null +++ b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/g1.rs @@ -0,0 +1,47 @@ +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::*, +}; +use ark_ff::{field_new, Zero}; + +use crate::bn384_small_two_adicity::{Fq, Fr}; + +pub type G1Affine = GroupAffine; +pub type G1Projective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 0 + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = 17 + const COEFF_B: Fq = field_new!(Fq, "17"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r = 1 + const COFACTOR_INV: Fr = field_new!(Fr, "1"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +/// G1_GENERATOR_X = -1 +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "-1"); + +/// G1_GENERATOR_Y = 4 +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "4"); diff --git a/arkworks/algebra/test-curves/src/bn384_small_two_adicity/mod.rs b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/mod.rs new file mode 100644 index 00000000..da8e6a6a --- /dev/null +++ b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/mod.rs @@ -0,0 +1,17 @@ +#[cfg(feature = "bn384_small_two_adicity_base_field")] +pub mod fq; +#[cfg(feature = "bn384_small_two_adicity_base_field")] +pub use fq::*; + +#[cfg(feature = "bn384_small_two_adicity_scalar_field")] +pub mod fr; +#[cfg(feature = "bn384_small_two_adicity_scalar_field")] +pub use fr::*; + +#[cfg(feature = "bn384_small_two_adicity_curve")] +pub mod g1; +#[cfg(feature = "bn384_small_two_adicity_curve")] +pub use g1::*; + +#[cfg(test)] +mod tests; diff --git a/arkworks/algebra/test-curves/src/bn384_small_two_adicity/tests.rs b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/tests.rs new file mode 100644 index 00000000..6268718f --- /dev/null +++ b/arkworks/algebra/test-curves/src/bn384_small_two_adicity/tests.rs @@ -0,0 +1,54 @@ +#![allow(unused_imports)] +use ark_ec::{models::SWModelParameters, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{One, UniformRand, Zero}; +use ark_std::rand::Rng; + +use crate::bn384_small_two_adicity::{g1, Fq, FqParameters, Fr, G1Affine, G1Projective}; +use ark_algebra_test_templates::{curves::*, fields::*, groups::*}; + +pub(crate) const ITERATIONS: usize = 5; + +#[test] +fn test_fr() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a: Fr = UniformRand::rand(&mut rng); + let b: Fr = UniformRand::rand(&mut rng); + field_test(a, b); + primefield_test::(); + sqrt_field_test(b); + } +} + +#[test] +fn test_fq() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a: Fq = UniformRand::rand(&mut rng); + let b: Fq = UniformRand::rand(&mut rng); + field_test(a, b); + primefield_test::(); + sqrt_field_test(a); + } +} + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = ark_std::test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} diff --git a/arkworks/algebra/test-curves/src/lib.rs b/arkworks/algebra/test-curves/src/lib.rs new file mode 100644 index 00000000..068d0540 --- /dev/null +++ b/arkworks/algebra/test-curves/src/lib.rs @@ -0,0 +1,18 @@ +#![no_std] + +#[cfg(any(feature = "bls12_381_scalar_field", feature = "bls12_381_curve"))] +pub mod bls12_381; + +#[cfg(any( + feature = "mnt4_753_scalar_field", + feature = "mnt4_753_base_field", + feature = "mnt4_753_curve" +))] +pub mod mnt4_753; + +#[cfg(any( + feature = "bn384_small_two_adicity_scalar_field", + feature = "bn384_small_two_adicity_base_field", + feature = "bn384_small_two_adicity_curve" +))] +pub mod bn384_small_two_adicity; diff --git a/arkworks/algebra/test-curves/src/mnt4_753/fq.rs b/arkworks/algebra/test-curves/src/mnt4_753/fq.rs new file mode 100644 index 00000000..8b7d3f54 --- /dev/null +++ b/arkworks/algebra/test-curves/src/mnt4_753/fq.rs @@ -0,0 +1,170 @@ +use ark_ff::{ + biginteger::BigInteger768 as BigInteger, + fields::{FftParameters, Fp768, Fp768Parameters, FpParameters}, +}; + +pub type Fq = Fp768; + +pub struct FqParameters; + +impl Fp768Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 15; + + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x3b079c7556ac378, + 0x2c8c74d04a3f00d4, + 0xd3b001061b90d4cf, + 0x946e77514891b0e6, + 0x79caec8ad6dc9ea1, + 0xbefd780edc81435d, + 0xe093d4dca630b154, + 0x43a0f673199f1c12, + 0x92276c78436253ff, + 0xe249d1cf014fcd24, + 0x96f36471fb7c3ec5, + 0x1080b8906b7c4, + ]); + + const SMALL_SUBGROUP_BASE: Option = Some(5); + const SMALL_SUBGROUP_BASE_ADICITY: Option = Some(2); + /// LARGE_SUBGROUP_ROOT_OF_UNITY = + /// 12249458902762217747626832919710926618510011455364963726393752854649914979954138109976331601455448780251166045203053508523342111624583986869301658366625356826888785691823710598470775453742133593634524619429629803955083254436531 + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = Some(BigInteger([ + 8926681816978929800, + 10873079436792120119, + 6519893728366769435, + 7899277225737766970, + 8416573500933450083, + 12951641800297678468, + 7093775028595490583, + 14327009285082556021, + 18228411097456927576, + 2823658094446565457, + 1708328092507553067, + 109589007594791, + ])); +} +impl FpParameters for FqParameters { + /// MODULUS = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888253786114353726529584385201591605722013126468931404347949840543007986327743462853720628051692141265303114721689601 + const MODULUS: BigInteger = BigInteger([ + 0x5e9063de245e8001, + 0xe39d54522cdd119f, + 0x638810719ac425f0, + 0x685acce9767254a4, + 0xb80f0da5cb537e38, + 0xb117e776f218059d, + 0x99d124d9a15af79d, + 0x7fdb925e8a0ed8d, + 0x5eb7e8f96c97d873, + 0xb7f997505b8fafed, + 0x10229022eee2cdad, + 0x1c4c62d92c411, + ]); + + const MODULUS_BITS: u32 = 753; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 15; + + const R: BigInteger = BigInteger([ + 0x98a8ecabd9dc6f42, + 0x91cd31c65a034686, + 0x97c3e4a0cd14572e, + 0x79589819c788b601, + 0xed269c942108976f, + 0x1e0f4d8acf031d68, + 0x320c3bb713338559, + 0x598b4302d2f00a62, + 0x4074c9cbfd8ca621, + 0xfa47edb3865e88c, + 0x95455fb31ff9a195, + 0x7b479ec8e242, + ]); + + const R2: BigInteger = BigInteger([ + 0x84717088cfd190c8, + 0xc7d9ff8e7df03c0a, + 0xa24bea56242b3507, + 0xa896a656a0714c7d, + 0x80a46659ff6f3ddf, + 0x2f47839ef88d7ce8, + 0xa8c86d4604a3b597, + 0xe03c79cac4f7ef07, + 0x2505daf1f4a81245, + 0x8e4605754c381723, + 0xb081f15bcbfdacaf, + 0x2a33e89cb485, + ]); + + const INV: u64 = 0xf2044cfbe45e7fff; + + const GENERATOR: BigInteger = BigInteger([ + 0xa8f627f0e629635e, + 0x202afce346c36872, + 0x85e1ece733493254, + 0x6d76e610664ac389, + 0xdf542f3f04441585, + 0x3aa4885bf6d4dd80, + 0xeb8b63c1c0fffc74, + 0xd2488e985f6cfa4e, + 0xcce1c2a623f7a66a, + 0x2a060f4d5085b19a, + 0xa9111a596408842f, + 0x11ca8d50bf627, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xaf4831ef122f4000, + 0x71ceaa29166e88cf, + 0x31c40838cd6212f8, + 0x342d6674bb392a52, + 0xdc0786d2e5a9bf1c, + 0xd88bf3bb790c02ce, + 0xcce8926cd0ad7bce, + 0x83fedc92f45076c6, + 0xaf5bf47cb64bec39, + 0xdbfccba82dc7d7f6, + 0x88114811777166d6, + 0xe26316c96208, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + + /// T = (MODULUS - 1) / 2^S = + /// 1278640471433073529124274133033466709233725278318907137200424283478556909563327233064541435662546964154604216671394463687571830033251476599169665701965732619291119517454523942352538645255842982596454713491581459512424155325 + const T: BigInteger = BigInteger([ + 0x233ebd20c7bc48bd, + 0x4be1c73aa8a459ba, + 0xa948c71020e33588, + 0xfc70d0b599d2ece4, + 0xb3b701e1b4b96a6, + 0xef3b622fceede430, + 0xdb1b33a249b342b5, + 0xb0e60ffb724bd141, + 0x5fdabd6fd1f2d92f, + 0x9b5b6ff32ea0b71f, + 0x882220452045ddc5, + 0x3898c5b25, + ]); + + /// (T - 1) / 2 = + /// 639320235716536764562137066516733354616862639159453568600212141739278454781663616532270717831273482077302108335697231843785915016625738299584832850982866309645559758727261971176269322627921491298227356745790729756212077662 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x119f5e9063de245e, + 0x25f0e39d54522cdd, + 0x54a4638810719ac4, + 0x7e38685acce97672, + 0x59db80f0da5cb53, + 0xf79db117e776f218, + 0xed8d99d124d9a15a, + 0xd87307fdb925e8a0, + 0xafed5eb7e8f96c97, + 0xcdadb7f997505b8f, + 0xc41110229022eee2, + 0x1c4c62d92, + ]); +} diff --git a/arkworks/algebra/test-curves/src/mnt4_753/fr.rs b/arkworks/algebra/test-curves/src/mnt4_753/fr.rs new file mode 100644 index 00000000..dbf42e98 --- /dev/null +++ b/arkworks/algebra/test-curves/src/mnt4_753/fr.rs @@ -0,0 +1,153 @@ +use ark_ff::{ + biginteger::BigInteger768 as BigInteger, + fields::{FftParameters, Fp768, Fp768Parameters, FpParameters}, +}; + +pub type Fr = Fp768; + +pub struct FrParameters; + +pub const FR_ONE: Fr = ark_ff::field_new!(Fr, "1"); + +impl Fp768Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 30; + + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x307f66b297671883, + 0xd72a7f2b1e645f4e, + 0x67079daa9a902283, + 0xf33f7620a86c668b, + 0x8878570d66464c12, + 0xa557af5b524f522b, + 0x5fafa3f6ef19319d, + 0x1eb9e04110a65629, + 0x3f96feb3c639a0b0, + 0x4d4fe37df3ffd732, + 0xadc831bd55bcf3e9, + 0x1b9f32a8bd6ab, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888458477323173057491593855069696241854796396165721416325350064441470418137846398469611935719059908164220784476160001 + const MODULUS: BigInteger = BigInteger([ + 0xd90776e240000001, + 0x4ea099170fa13a4f, + 0xd6c381bc3f005797, + 0xb9dff97634993aa4, + 0x3eebca9429212636, + 0xb26c5c28c859a99b, + 0x99d124d9a15af79d, + 0x7fdb925e8a0ed8d, + 0x5eb7e8f96c97d873, + 0xb7f997505b8fafed, + 0x10229022eee2cdad, + 0x1c4c62d92c411, + ]); + + const MODULUS_BITS: u32 = 753; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 15; + + const R: BigInteger = BigInteger([ + 0xb99680147fff6f42, + 0x4eb16817b589cea8, + 0xa1ebd2d90c79e179, + 0xf725caec549c0da, + 0xab0c4ee6d3e6dad4, + 0x9fbca908de0ccb62, + 0x320c3bb713338498, + 0x598b4302d2f00a62, + 0x4074c9cbfd8ca621, + 0xfa47edb3865e88c, + 0x95455fb31ff9a195, + 0x7b479ec8e242, + ]); + + const R2: BigInteger = BigInteger([ + 0x3f9c69c7b7f4c8d1, + 0x70a50fa9ee48d127, + 0xcdbe6702009569cb, + 0x6bd8c6c6c49edc38, + 0x7955876cc35ee94e, + 0xc7285529be54a3f4, + 0xded52121ecec77cf, + 0x99be80f2ee12ee8e, + 0xc8a0ff01493bdcef, + 0xacc27988f3d9a316, + 0xd9e817a8fb44b3c9, + 0x5b58037e0e4, + ]); + + const INV: u64 = 0xc90776e23fffffff; + + const GENERATOR: BigInteger = BigInteger([ + 0xeee0a5d37ff6635e, + 0xff458536cfa1cff4, + 0x659af978d8169ab0, + 0x1f1841c24780e3f1, + 0x602213036dcfef3a, + 0xd1d5c8f39d72db20, + 0xeb8b63c1c0ffefab, + 0xd2488e985f6cfa4e, + 0xcce1c2a623f7a66a, + 0x2a060f4d5085b19a, + 0xa9111a596408842f, + 0x11ca8d50bf627, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xec83bb7120000000, + 0xa7504c8b87d09d27, + 0x6b61c0de1f802bcb, + 0x5ceffcbb1a4c9d52, + 0x9f75e54a1490931b, + 0xd9362e14642cd4cd, + 0xcce8926cd0ad7bce, + 0x83fedc92f45076c6, + 0xaf5bf47cb64bec39, + 0xdbfccba82dc7d7f6, + 0x88114811777166d6, + 0xe26316c96208, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + + /// T = (MODULUS - 1) / 2^S = + /// 39021010480745652133919498688765463538626870065884617224134041854204007249857398469987226430131438115069708760723898631821547688442835449306011425196003537779414482717728302293895201885929702287178426719326440397855625 + const T: BigInteger = BigInteger([ + 0x3e84e93f641ddb89, + 0xfc015e5d3a82645c, + 0xd264ea935b0e06f0, + 0xa48498dae77fe5d8, + 0x2166a66cfbaf2a50, + 0x856bde76c9b170a3, + 0xa283b63667449366, + 0xb25f61cc1ff6e497, + 0x6e3ebfb57adfa3e5, + 0xbb8b36b6dfe65d41, + 0xb64b1044408a408b, + 0x71318, + ]); + + /// (T - 1) / 2 = + /// 19510505240372826066959749344382731769313435032942308612067020927102003624928699234993613215065719057534854380361949315910773844221417724653005712598001768889707241358864151146947600942964851143589213359663220198927812 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x1f42749fb20eedc4, + 0x7e00af2e9d41322e, + 0x69327549ad870378, + 0x52424c6d73bff2ec, + 0x90b353367dd79528, + 0x42b5ef3b64d8b851, + 0xd141db1b33a249b3, + 0xd92fb0e60ffb724b, + 0xb71f5fdabd6fd1f2, + 0xddc59b5b6ff32ea0, + 0x5b25882220452045, + 0x3898c, + ]); +} diff --git a/arkworks/algebra/test-curves/src/mnt4_753/g1.rs b/arkworks/algebra/test-curves/src/mnt4_753/g1.rs new file mode 100644 index 00000000..71863ead --- /dev/null +++ b/arkworks/algebra/test-curves/src/mnt4_753/g1.rs @@ -0,0 +1,52 @@ +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::*, +}; +use ark_ff::field_new; + +use crate::mnt4_753::{Fq, Fr, FR_ONE}; + +pub type G1Affine = GroupAffine; +pub type G1Projective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 2 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "2"); + + /// COEFF_B = 0x01373684A8C9DCAE7A016AC5D7748D3313CD8E39051C596560835DF0C9E50A5B59B882A92C78DC537E51A16703EC9855C77FC3D8BB21C8D68BB8CFB9DB4B8C8FBA773111C36C8B1B4E8F1ECE940EF9EAAD265458E06372009C9A0491678EF4 + /// = 28798803903456388891410036793299405764940372360099938340752576406393880372126970068421383312482853541572780087363938442377933706865252053507077543420534380486492786626556269083255657125025963825610840222568694137138741554679540 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "28798803903456388891410036793299405764940372360099938340752576406393880372126970068421383312482853541572780087363938442377933706865252053507077543420534380486492786626556269083255657125025963825610840222568694137138741554679540"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[1]; + + /// COFACTOR^(-1) mod r = + /// 1 + #[rustfmt::skip] + const COFACTOR_INV: Fr = FR_ONE; + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); +} + +// Generator of G1 +// X = 7790163481385331313124631546957228376128961350185262705123068027727518350362064426002432450801002268747950550964579198552865939244360469674540925037890082678099826733417900510086646711680891516503232107232083181010099241949569, +// Y = 6913648190367314284606685101150155872986263667483624713540251048208073654617802840433842931301128643140890502238233930290161632176167186761333725658542781350626799660920481723757654531036893265359076440986158843531053720994648, +/// G1_GENERATOR_X = +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "7790163481385331313124631546957228376128961350185262705123068027727518350362064426002432450801002268747950550964579198552865939244360469674540925037890082678099826733417900510086646711680891516503232107232083181010099241949569"); + +/// G1_GENERATOR_Y = +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "6913648190367314284606685101150155872986263667483624713540251048208073654617802840433842931301128643140890502238233930290161632176167186761333725658542781350626799660920481723757654531036893265359076440986158843531053720994648"); diff --git a/arkworks/algebra/test-curves/src/mnt4_753/mod.rs b/arkworks/algebra/test-curves/src/mnt4_753/mod.rs new file mode 100644 index 00000000..e3d286cb --- /dev/null +++ b/arkworks/algebra/test-curves/src/mnt4_753/mod.rs @@ -0,0 +1,14 @@ +#[cfg(feature = "mnt4_753_base_field")] +pub mod fq; +#[cfg(feature = "mnt4_753_base_field")] +pub use fq::*; + +#[cfg(feature = "mnt4_753_scalar_field")] +pub mod fr; +#[cfg(feature = "mnt4_753_scalar_field")] +pub use fr::*; + +#[cfg(feature = "mnt4_753_curve")] +pub mod g1; +#[cfg(feature = "mnt4_753_curve")] +pub use g1::*; diff --git a/arkworks/algebra/test-templates/Cargo.toml b/arkworks/algebra/test-templates/Cargo.toml new file mode 100644 index 00000000..bdb91409 --- /dev/null +++ b/arkworks/algebra/test-templates/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "ark-algebra-test-templates" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for tests for finite fields, elliptic curves, and pairings" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-curve-tests/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-std = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", path = "../serialize", default-features = false } +ark-ff = { version = "^0.3.0", path = "../ff", default-features = false } +ark-ec = { version = "^0.3.0", path = "../ec", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-serialize/std", "ark-ec/std" ] diff --git a/arkworks/algebra/test-templates/LICENSE-APACHE b/arkworks/algebra/test-templates/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/algebra/test-templates/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/algebra/test-templates/LICENSE-MIT b/arkworks/algebra/test-templates/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/algebra/test-templates/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/algebra/test-templates/src/curves.rs b/arkworks/algebra/test-templates/src/curves.rs new file mode 100644 index 00000000..b7487148 --- /dev/null +++ b/arkworks/algebra/test-templates/src/curves.rs @@ -0,0 +1,562 @@ +#![allow(unused)] +use ark_ec::twisted_edwards_extended::GroupProjective; +use ark_ec::wnaf::WnafContext; +use ark_ec::{ + AffineCurve, MontgomeryModelParameters, ProjectiveCurve, SWModelParameters, TEModelParameters, +}; +use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SWFlags, SerializationError}; +use ark_std::{io::Cursor, vec::Vec}; + +pub const ITERATIONS: usize = 10; + +fn random_addition_test() { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = G::rand(&mut rng); + let b = G::rand(&mut rng); + let c = G::rand(&mut rng); + let a_affine = a.into_affine(); + let b_affine = b.into_affine(); + let c_affine = c.into_affine(); + + // a + a should equal the doubling + { + let mut aplusa = a; + aplusa.add_assign(&a); + + let mut aplusamixed = a; + aplusamixed.add_assign_mixed(&a.into_affine()); + + let mut adouble = a; + adouble.double_in_place(); + + assert_eq!(aplusa, adouble); + assert_eq!(aplusa, aplusamixed); + } + + let mut tmp = vec![G::zero(); 6]; + + // (a + b) + c + tmp[0] = (a + &b) + &c; + + // a + (b + c) + tmp[1] = a + &(b + &c); + + // (a + c) + b + tmp[2] = (a + &c) + &b; + + // Mixed addition + + // (a + b) + c + tmp[3] = a_affine.into_projective(); + tmp[3].add_assign_mixed(&b_affine); + tmp[3].add_assign_mixed(&c_affine); + + // a + (b + c) + tmp[4] = b_affine.into_projective(); + tmp[4].add_assign_mixed(&c_affine); + tmp[4].add_assign_mixed(&a_affine); + + // (a + c) + b + tmp[5] = a_affine.into_projective(); + tmp[5].add_assign_mixed(&c_affine); + tmp[5].add_assign_mixed(&b_affine); + + // Comparisons + for i in 0..6 { + for j in 0..6 { + if tmp[i] != tmp[j] { + println!("{} \n{}", tmp[i], tmp[j]); + } + assert_eq!(tmp[i], tmp[j], "Associativity failed {} {}", i, j); + assert_eq!( + tmp[i].into_affine(), + tmp[j].into_affine(), + "Associativity failed" + ); + } + + assert!(tmp[i] != a); + assert!(tmp[i] != b); + assert!(tmp[i] != c); + + assert!(a != tmp[i]); + assert!(b != tmp[i]); + assert!(c != tmp[i]); + } + } +} + +fn random_multiplication_test() { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let mut a = G::rand(&mut rng); + let mut b = G::rand(&mut rng); + let a_affine = a.into_affine(); + let b_affine = b.into_affine(); + + let s = G::ScalarField::rand(&mut rng); + + // s ( a + b ) + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.mul_assign(s); + + // s ( a + b) using wNAF for several window values in [2,5] + for w in 2..=5 { + let mut tmp4 = a + &b; + let context = WnafContext::new(w); + assert_eq!(tmp1, context.mul(tmp4, &s)); + + if w > 2 { + let bad_context = WnafContext::new(w - 1); + let bad_table = bad_context.table(tmp4); + assert_eq!(context.mul_with_table(&bad_table, &s), None); + } + } + + // sa + sb + a.mul_assign(s); + b.mul_assign(s); + + let mut tmp2 = a; + tmp2.add_assign(&b); + + // Affine multiplication + let mut tmp3 = a_affine.mul(s.into_repr()); + tmp3.add_assign(&b_affine.mul(s.into_repr())); + + assert_eq!(tmp1, tmp2); + assert_eq!(tmp1, tmp3); + } +} + +fn random_doubling_test() { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let mut a = G::rand(&mut rng); + let mut b = G::rand(&mut rng); + + // 2(a + b) + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.double_in_place(); + + // 2a + 2b + a.double_in_place(); + b.double_in_place(); + + let mut tmp2 = a; + tmp2.add_assign(&b); + + let mut tmp3 = a; + tmp3.add_assign_mixed(&b.into_affine()); + + assert_eq!(tmp1, tmp2); + assert_eq!(tmp1, tmp3); + } +} + +fn random_negation_test() { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let r = G::rand(&mut rng); + + let s = G::ScalarField::rand(&mut rng); + let sneg = -s; + assert!((s + &sneg).is_zero()); + + let mut t1 = r; + t1.mul_assign(s); + + let mut t2 = r; + t2.mul_assign(sneg); + + let mut t3 = t1; + t3.add_assign(&t2); + assert!(t3.is_zero()); + + let mut t4 = t1; + t4.add_assign_mixed(&t2.into_affine()); + assert!(t4.is_zero()); + + t1 = -t1; + assert_eq!(t1, t2); + } +} + +fn random_transformation_test() { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let g = G::rand(&mut rng); + let g_affine = g.into_affine(); + let g_projective = g_affine.into_projective(); + assert_eq!(g, g_projective); + } + + // Batch normalization + for _ in 0..10 { + let mut v = (0..ITERATIONS) + .map(|_| G::rand(&mut rng).double()) + .collect::>(); + + use ark_std::rand::distributions::{Distribution, Uniform}; + let between = Uniform::from(0..ITERATIONS); + // Sprinkle in some normalized points + for _ in 0..5 { + v[between.sample(&mut rng)] = G::zero(); + } + for _ in 0..5 { + let s = between.sample(&mut rng); + v[s] = v[s].into_affine().into_projective(); + } + + let expected_v = v + .iter() + .map(|v| v.into_affine().into_projective()) + .collect::>(); + G::batch_normalization(&mut v); + + for i in &v { + assert!(i.is_normalized()); + } + + assert_eq!(v, expected_v); + } +} + +pub fn curve_tests() { + let mut rng = ark_std::test_rng(); + + // Negation edge case with zero. + { + let z = -G::zero(); + assert!(z.is_zero()); + } + + // Doubling edge case with zero. + { + let mut z = -G::zero(); + z.double_in_place(); + assert!(z.is_zero()); + } + + // Addition edge cases with zero + { + let mut r = G::rand(&mut rng); + let rcopy = r; + r.add_assign(&G::zero()); + assert_eq!(r, rcopy); + r.add_assign_mixed(&G::Affine::zero()); + assert_eq!(r, rcopy); + + let mut z = G::zero(); + z.add_assign(&G::zero()); + assert!(z.is_zero()); + z.add_assign_mixed(&G::Affine::zero()); + assert!(z.is_zero()); + + let mut z2 = z; + z2.add_assign(&r); + + z.add_assign_mixed(&r.into_affine()); + + assert_eq!(z, z2); + assert_eq!(z, r); + } + + // Transformations + { + let a = G::rand(&mut rng); + let b = a.into_affine().into_projective(); + let c = a + .into_affine() + .into_projective() + .into_affine() + .into_projective(); + assert_eq!(a, b); + assert_eq!(b, c); + } + + // Test COFACTOR and COFACTOR_INV + { + let a = G::rand(&mut rng); + let b = a.into_affine(); + let c = b.mul_by_cofactor_inv().mul_by_cofactor(); + assert_eq!(b, c); + } + + random_addition_test::(); + random_multiplication_test::(); + random_doubling_test::(); + random_negation_test::(); + random_transformation_test::(); +} + +pub fn sw_tests() { + sw_curve_serialization_test::

(); + sw_from_random_bytes::

(); + sw_affine_sum_test::

(); +} + +pub fn sw_from_random_bytes() { + use ark_ec::models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}; + + let buf_size = GroupAffine::

::zero().serialized_size(); + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = GroupProjective::

::rand(&mut rng); + let mut a = a.into_affine(); + { + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let p1 = GroupAffine::

::deserialize(&mut cursor).unwrap(); + let p2 = GroupAffine::

::from_random_bytes(&serialized).unwrap(); + assert_eq!(p1, p2); + } + } +} + +pub fn sw_curve_serialization_test() { + use ark_ec::models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}; + + let buf_size = GroupAffine::

::zero().serialized_size(); + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = GroupProjective::

::rand(&mut rng); + let mut a = a.into_affine(); + { + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + a.y = -a.y; + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = GroupAffine::

::zero(); + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = GroupAffine::

::zero(); + let mut serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap_err(); + } + + { + let serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&serialized[..]); + GroupAffine::

::deserialize(&mut cursor).unwrap_err(); + } + + { + let mut serialized = vec![0; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + a.y = -a.y; + let mut serialized = vec![0; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = GroupAffine::

::zero(); + let mut serialized = vec![0; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + } +} + +pub fn sw_affine_sum_test() { + use ark_ec::models::short_weierstrass_jacobian::{GroupAffine, GroupProjective}; + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let mut test_vec = Vec::new(); + for _ in 0..10 { + test_vec.push(GroupProjective::

::rand(&mut rng).into_affine()); + } + + let sum_computed: GroupAffine

= test_vec.iter().sum(); + let mut sum_expected = GroupAffine::zero(); + for p in test_vec.iter() { + sum_expected += &p; + } + + assert_eq!(sum_computed, sum_expected); + } +} + +pub fn montgomery_conversion_test

() +where + P: TEModelParameters, +{ + // A = 2 * (a + d) / (a - d) + let a = P::BaseField::one().double() + * &(P::COEFF_A + &P::COEFF_D) + * &(P::COEFF_A - &P::COEFF_D).inverse().unwrap(); + // B = 4 / (a - d) + let b = P::BaseField::one().double().double() * &(P::COEFF_A - &P::COEFF_D).inverse().unwrap(); + + assert_eq!(a, P::MontgomeryModelParameters::COEFF_A); + assert_eq!(b, P::MontgomeryModelParameters::COEFF_B); +} + +pub fn edwards_tests() +where + P::BaseField: PrimeField, +{ + edwards_curve_serialization_test::

(); + edwards_from_random_bytes::

(); +} + +pub fn edwards_from_random_bytes() +where + P::BaseField: PrimeField, +{ + use ark_ec::models::twisted_edwards_extended::{GroupAffine, GroupProjective}; + use ark_ff::{to_bytes, ToBytes}; + + let buf_size = GroupAffine::

::zero().serialized_size(); + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = GroupProjective::

::rand(&mut rng); + let mut a = a.into_affine(); + { + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let p1 = GroupAffine::

::deserialize(&mut cursor).unwrap(); + let p2 = GroupAffine::

::from_random_bytes(&serialized).unwrap(); + assert_eq!(p1, p2); + } + } + + for _ in 0..ITERATIONS { + let mut biginteger = + < as AffineCurve>::BaseField as PrimeField>::BigInt::rand(&mut rng); + let mut bytes = to_bytes![biginteger].unwrap(); + let mut g = GroupAffine::

::from_random_bytes(&bytes); + while g.is_none() { + bytes.iter_mut().for_each(|i| *i = i.wrapping_sub(1)); + g = GroupAffine::

::from_random_bytes(&bytes); + } + let _g = g.unwrap(); + } +} + +pub fn edwards_curve_serialization_test() { + use ark_ec::models::twisted_edwards_extended::{GroupAffine, GroupProjective}; + + let buf_size = GroupAffine::

::zero().serialized_size(); + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = GroupProjective::

::rand(&mut rng); + let a = a.into_affine(); + { + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = GroupAffine::

::zero(); + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = GroupAffine::

::zero(); + let mut serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap_err(); + } + + { + let serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&serialized[..]); + GroupAffine::

::deserialize(&mut cursor).unwrap_err(); + } + + { + let mut serialized = vec![0; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = GroupAffine::

::zero(); + let mut serialized = vec![0; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = GroupAffine::

::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + } +} diff --git a/arkworks/algebra/test-templates/src/fields.rs b/arkworks/algebra/test-templates/src/fields.rs new file mode 100644 index 00000000..5732ad19 --- /dev/null +++ b/arkworks/algebra/test-templates/src/fields.rs @@ -0,0 +1,458 @@ +#![allow(unused)] +#![allow(clippy::eq_op)] +use ark_ff::fields::{FftField, FftParameters, Field, LegendreSymbol, PrimeField, SquareRootField}; +use ark_serialize::{buffer_bit_byte_size, Flags, SWFlags}; +use ark_std::io::Cursor; +use ark_std::rand::Rng; + +pub const ITERATIONS: u32 = 40; + +fn random_negation_tests(rng: &mut R) { + for _ in 0..ITERATIONS { + let a = F::rand(rng); + let mut b = -a; + b += &a; + + assert!(b.is_zero()); + } +} + +fn random_addition_tests(rng: &mut R) { + for _ in 0..ITERATIONS { + let a = F::rand(rng); + let b = F::rand(rng); + let c = F::rand(rng); + + let t0 = (a + &b) + &c; // (a + b) + c + + let t1 = (a + &c) + &b; // (a + c) + b + + let t2 = (b + &c) + &a; // (b + c) + a + + assert_eq!(t0, t1); + assert_eq!(t1, t2); + } +} + +fn random_subtraction_tests(rng: &mut R) { + for _ in 0..ITERATIONS { + let a = F::rand(rng); + let b = F::rand(rng); + + let t0 = a - &b; // (a - b) + + let mut t1 = b; // (b - a) + t1 -= &a; + + let mut t2 = t0; // (a - b) + (b - a) = 0 + t2 += &t1; + + assert!(t2.is_zero()); + } +} + +fn random_multiplication_tests(rng: &mut R) { + for _ in 0..ITERATIONS { + let a = F::rand(rng); + let b = F::rand(rng); + let c = F::rand(rng); + + let mut t0 = a; // (a * b) * c + t0 *= &b; + t0 *= &c; + + let mut t1 = a; // (a * c) * b + t1 *= &c; + t1 *= &b; + + let mut t2 = b; // (b * c) * a + t2 *= &c; + t2 *= &a; + + assert_eq!(t0, t1); + assert_eq!(t1, t2); + } +} + +fn random_inversion_tests(rng: &mut R) { + assert!(F::zero().inverse().is_none()); + + for _ in 0..ITERATIONS { + let mut a = F::rand(rng); + let b = a.inverse().map(|b| { + a *= &b; + assert_eq!(a, F::one()); + }); + } +} + +fn random_doubling_tests(rng: &mut R) { + for _ in 0..ITERATIONS { + let mut a = F::rand(rng); + let mut b = a; + a += &b; + b.double_in_place(); + + assert_eq!(a, b); + } +} + +fn random_squaring_tests(rng: &mut R) { + for _ in 0..ITERATIONS { + let mut a = F::rand(rng); + let mut b = a; + a *= &b; + b.square_in_place(); + + assert_eq!(a, b); + } +} + +fn random_expansion_tests(rng: &mut R) { + for _ in 0..ITERATIONS { + // Compare (a + b)(c + d) and (a*c + b*c + a*d + b*d) + + let a = F::rand(rng); + let b = F::rand(rng); + let c = F::rand(rng); + let d = F::rand(rng); + + let mut t0 = a; + t0 += &b; + let mut t1 = c; + t1 += &d; + t0 *= &t1; + + let mut t2 = a; + t2 *= &c; + let mut t3 = b; + t3 *= &c; + let mut t4 = a; + t4 *= &d; + let mut t5 = b; + t5 *= &d; + + t2 += &t3; + t2 += &t4; + t2 += &t5; + + assert_eq!(t0, t2); + } + + for _ in 0..ITERATIONS { + // Compare (a + b)c and (a*c + b*c) + + let a = F::rand(rng); + let b = F::rand(rng); + let c = F::rand(rng); + + let t0 = (a + &b) * &c; + let t2 = a * &c + &(b * &c); + + assert_eq!(t0, t2); + } +} + +fn random_field_tests() { + let mut rng = ark_std::test_rng(); + + random_negation_tests::(&mut rng); + random_addition_tests::(&mut rng); + random_subtraction_tests::(&mut rng); + random_multiplication_tests::(&mut rng); + random_inversion_tests::(&mut rng); + random_doubling_tests::(&mut rng); + random_squaring_tests::(&mut rng); + random_expansion_tests::(&mut rng); + + assert!(F::zero().is_zero()); + { + let z = -F::zero(); + assert!(z.is_zero()); + } + + assert!(F::zero().inverse().is_none()); + + // Multiplication by zero + { + let a = F::rand(&mut rng) * &F::zero(); + assert!(a.is_zero()); + } + + // Addition by zero + { + let mut a = F::rand(&mut rng); + let copy = a; + a += &F::zero(); + assert_eq!(a, copy); + } +} + +fn random_sqrt_tests() { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = F::rand(&mut rng); + let b = a.square(); + assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue); + + let b = b.sqrt().unwrap(); + assert!(a == b || a == -b); + } + + let mut c = F::one(); + for _ in 0..ITERATIONS { + let mut b = c.square(); + assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue); + + b = b.sqrt().unwrap(); + + if b != c { + b = -b; + } + + assert_eq!(b, c); + + c += &F::one(); + } +} + +pub fn from_str_test() { + { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let n: u64 = rng.gen(); + + let a = F::from_str(&ark_std::format!("{}", n)) + .map_err(|_| ()) + .unwrap(); + let b = F::from(n); + + assert_eq!(a, b); + } + } + + assert!(F::from_str("").is_err()); + assert!(F::from_str("0").map_err(|_| ()).unwrap().is_zero()); + assert!(F::from_str("00").is_err()); + assert!(F::from_str("00000000000").is_err()); +} + +pub fn field_test(a: F, b: F) { + let zero = F::zero(); + assert_eq!(zero, zero); + assert_eq!(zero.is_zero(), true); + assert_eq!(zero.is_one(), false); + + let one = F::one(); + assert_eq!(one, one); + assert_eq!(one.is_zero(), false); + assert_eq!(one.is_one(), true); + assert_eq!(zero + &one, one); + + let two = one + &one; + assert_eq!(two, two); + assert_ne!(zero, two); + assert_ne!(one, two); + + // a == a + assert_eq!(a, a); + // a + 0 = a + assert_eq!(a + &zero, a); + // a - 0 = a + assert_eq!(a - &zero, a); + // a - a = 0 + assert_eq!(a - &a, zero); + // 0 - a = -a + assert_eq!(zero - &a, -a); + // a.double() = a + a + assert_eq!(a.double(), a + &a); + // b.double() = b + b + assert_eq!(b.double(), b + &b); + // a + b = b + a + assert_eq!(a + &b, b + &a); + // a - b = -(b - a) + assert_eq!(a - &b, -(b - &a)); + // (a + b) + a = a + (b + a) + assert_eq!((a + &b) + &a, a + &(b + &a)); + // (a + b).double() = (a + b) + (b + a) + assert_eq!((a + &b).double(), (a + &b) + &(b + &a)); + + // a * 0 = 0 + assert_eq!(a * &zero, zero); + // a * 1 = a + assert_eq!(a * &one, a); + // a * 2 = a.double() + assert_eq!(a * &two, a.double()); + // a * a^-1 = 1 + assert_eq!(a * &a.inverse().unwrap(), one); + // a * a = a^2 + assert_eq!(a * &a, a.square()); + // a * a * a = a^3 + assert_eq!(a * &(a * &a), a.pow([0x3, 0x0, 0x0, 0x0])); + // a * b = b * a + assert_eq!(a * &b, b * &a); + // (a * b) * a = a * (b * a) + assert_eq!((a * &b) * &a, a * &(b * &a)); + // (a + b)^2 = a^2 + 2ab + b^2 + assert_eq!( + (a + &b).square(), + a.square() + &((a * &b) + &(a * &b)) + &b.square() + ); + // (a - b)^2 = (-(b - a))^2 + assert_eq!((a - &b).square(), (-(b - &a)).square()); + random_field_tests::(); +} + +pub fn fft_field_test() { + assert_eq!( + F::two_adic_root_of_unity().pow([1 << F::FftParams::TWO_ADICITY]), + F::one() + ); + + if let Some(small_subgroup_base) = F::FftParams::SMALL_SUBGROUP_BASE { + let small_subgroup_base_adicity = F::FftParams::SMALL_SUBGROUP_BASE_ADICITY.unwrap(); + let large_subgroup_root_of_unity = F::large_subgroup_root_of_unity().unwrap(); + let pow = (1 << F::FftParams::TWO_ADICITY) + * (small_subgroup_base as u64).pow(small_subgroup_base_adicity); + assert_eq!(large_subgroup_root_of_unity.pow([pow]), F::one()); + + for i in 0..F::FftParams::TWO_ADICITY { + for j in 0..small_subgroup_base_adicity { + use core::convert::TryFrom; + let size = usize::try_from(1 << i as usize).unwrap() + * usize::try_from((small_subgroup_base as u64).pow(j)).unwrap(); + let root = F::get_root_of_unity(size).unwrap(); + assert_eq!(root.pow([size as u64]), F::one()); + } + } + } else { + for i in 0..F::FftParams::TWO_ADICITY { + let size = 1 << i; + let root = F::get_root_of_unity(size).unwrap(); + assert_eq!(root.pow([size as u64]), F::one()); + } + } +} + +pub fn primefield_test() { + from_str_test::(); + let one = F::one(); + assert_eq!(F::from_repr(one.into_repr()).unwrap(), one); + + fft_field_test::(); +} + +pub fn sqrt_field_test(elem: F) { + let square = elem.square(); + let sqrt = square.sqrt().unwrap(); + assert!(sqrt == elem || sqrt == -elem); + if let Some(sqrt) = elem.sqrt() { + assert!(sqrt.square() == elem || sqrt.square() == -elem); + } + random_sqrt_tests::(); +} + +pub fn frobenius_test>(characteristic: C, maxpower: usize) { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = F::rand(&mut rng); + + let mut a_0 = a; + a_0.frobenius_map(0); + assert_eq!(a, a_0); + + let mut a_q = a.pow(&characteristic); + for power in 1..maxpower { + let mut a_qi = a; + a_qi.frobenius_map(power); + assert_eq!(a_qi, a_q, "failed on power {}", power); + + a_q = a_q.pow(&characteristic); + } + } +} + +pub fn field_serialization_test(buf_size: usize) { + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = F::rand(&mut rng); + { + let mut serialized = vec![0u8; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = F::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let mut serialized = vec![0u8; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = F::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let mut serialized = vec![0u8; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_with_flags(&mut cursor, SWFlags::from_y_sign(true)) + .unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let (b, flags) = F::deserialize_with_flags::<_, SWFlags>(&mut cursor).unwrap(); + assert_eq!(flags.is_positive(), Some(true)); + assert!(!flags.is_infinity()); + assert_eq!(a, b); + } + + #[derive(Default, Clone, Copy, Debug)] + struct DummyFlags; + impl Flags for DummyFlags { + const BIT_SIZE: usize = 200; + + fn u8_bitmask(&self) -> u8 { + 0 + } + + fn from_u8(_value: u8) -> Option { + Some(DummyFlags) + } + } + + use ark_serialize::SerializationError; + { + let mut serialized = vec![0; buf_size]; + assert!(if let SerializationError::NotEnoughSpace = a + .serialize_with_flags(&mut &mut serialized[..], DummyFlags) + .unwrap_err() + { + true + } else { + false + }); + assert!(if let SerializationError::NotEnoughSpace = + F::deserialize_with_flags::<_, DummyFlags>(&mut &serialized[..]).unwrap_err() + { + true + } else { + false + }); + } + + { + let mut serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap_err(); + + let mut cursor = Cursor::new(&serialized[..]); + F::deserialize(&mut cursor).unwrap_err(); + } + } +} diff --git a/arkworks/algebra/test-templates/src/groups.rs b/arkworks/algebra/test-templates/src/groups.rs new file mode 100644 index 00000000..8651b2c7 --- /dev/null +++ b/arkworks/algebra/test-templates/src/groups.rs @@ -0,0 +1,75 @@ +#![allow(unused)] +#![allow(clippy::eq_op)] +use ark_ec::group::Group; +use ark_ff::{One, UniformRand, Zero}; + +pub fn group_test(a: G, mut b: G) { + let mut rng = ark_std::test_rng(); + let zero = G::zero(); + let fr_zero = G::ScalarField::zero(); + let fr_one = G::ScalarField::one(); + let fr_two = fr_one + &fr_one; + + assert_eq!(zero, zero); + assert_eq!(zero.is_zero(), true); + assert_eq!(a.mul(&fr_one), a); + assert_eq!(a.mul(&fr_two), a + &a); + assert_eq!(a.mul(&fr_zero), zero); + assert_eq!(a.mul(&fr_zero) - &a, -a); + assert_eq!(a.mul(&fr_one) - &a, zero); + assert_eq!(a.mul(&fr_two) - &a, a); + + // a == a + assert_eq!(a, a); + // a + 0 = a + assert_eq!(a + &zero, a); + // a - 0 = a + assert_eq!(a - &zero, a); + // a - a = 0 + assert_eq!(a - &a, zero); + // 0 - a = -a + assert_eq!(zero - &a, -a); + // a.double() = a + a + assert_eq!(a.double(), a + &a); + // b.double() = b + b + assert_eq!(b.double(), b + &b); + // a + b = b + a + assert_eq!(a + &b, b + &a); + // a - b = -(b - a) + assert_eq!(a - &b, -(b - &a)); + // (a + b) + a = a + (b + a) + assert_eq!((a + &b) + &a, a + &(b + &a)); + // (a + b).double() = (a + b) + (b + a) + assert_eq!((a + &b).double(), (a + &b) + &(b + &a)); + + // Check that double_in_place and double give the same result + let original_b = b; + b.double_in_place(); + assert_eq!(original_b.double(), b); + + let fr_rand1 = G::ScalarField::rand(&mut rng); + let fr_rand2 = G::ScalarField::rand(&mut rng); + let a_rand1 = a.mul(&fr_rand1); + let a_rand2 = a.mul(&fr_rand2); + let fr_three = fr_two + &fr_rand1; + let a_two = a.mul(&fr_two); + assert_eq!(a_two, a.double(), "(a * 2) != a.double()"); + let a_six = a.mul(&(fr_three * &fr_two)); + assert_eq!(a_two.mul(&fr_three), a_six, "(a * 2) * 3 != a * (2 * 3)"); + + assert_eq!( + a_rand1.mul(&fr_rand2), + a_rand2.mul(&fr_rand1), + "(a * r1) * r2 != (a * r2) * r1" + ); + assert_eq!( + a_rand2.mul(&fr_rand1), + a.mul(&(fr_rand1 * &fr_rand2)), + "(a * r2) * r1 != a * (r1 * r2)" + ); + assert_eq!( + a_rand1.mul(&fr_rand2), + a.mul(&(fr_rand1 * &fr_rand2)), + "(a * r1) * r2 != a * (r1 * r2)" + ); +} diff --git a/arkworks/algebra/test-templates/src/lib.rs b/arkworks/algebra/test-templates/src/lib.rs new file mode 100644 index 00000000..53f90130 --- /dev/null +++ b/arkworks/algebra/test-templates/src/lib.rs @@ -0,0 +1,4 @@ +pub mod curves; +pub mod fields; +pub mod groups; +pub mod msm; diff --git a/arkworks/algebra/test-templates/src/msm.rs b/arkworks/algebra/test-templates/src/msm.rs new file mode 100644 index 00000000..7d06a1c7 --- /dev/null +++ b/arkworks/algebra/test-templates/src/msm.rs @@ -0,0 +1,33 @@ +use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; +use ark_ff::{PrimeField, UniformRand, Zero}; + +fn naive_var_base_msm( + bases: &[G], + scalars: &[::BigInt], +) -> G::Projective { + let mut acc = G::Projective::zero(); + + for (base, scalar) in bases.iter().zip(scalars.iter()) { + acc += &base.mul(*scalar); + } + acc +} + +pub fn test_var_base_msm() { + const SAMPLES: usize = 1 << 10; + + let mut rng = ark_std::test_rng(); + + let v = (0..SAMPLES - 1) + .map(|_| G::ScalarField::rand(&mut rng).into_repr()) + .collect::>(); + let g = (0..SAMPLES) + .map(|_| G::Projective::rand(&mut rng)) + .collect::>(); + let g = ::batch_normalization_into_affine(&g); + + let naive = naive_var_base_msm(g.as_slice(), v.as_slice()); + let fast = VariableBaseMSM::multi_scalar_mul(g.as_slice(), v.as_slice()); + + assert_eq!(naive.into_affine(), fast.into_affine()); +} diff --git a/arkworks/crypto-primitives/.github/.markdownlint.yml b/arkworks/crypto-primitives/.github/.markdownlint.yml new file mode 100644 index 00000000..936fc62e --- /dev/null +++ b/arkworks/crypto-primitives/.github/.markdownlint.yml @@ -0,0 +1,14 @@ +# See https://github.com/DavidAnson/markdownlint#rules--aliases for list of markdown lint codes +default: true +# MD01 lint blocks having header's incrementing by more than # at a time. +MD001: false +MD007: { indent: 4 } +# MD013 blocks long lines +MD013: false +MD024: { siblings_only: true } +MD025: false +# MD033 lint blocks HTML in MD +MD033: false +# MD036 no-emphasis-as-heading +MD036: false +MD041: false diff --git a/arkworks/crypto-primitives/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/crypto-primitives/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..dcb2f6b0 --- /dev/null +++ b/arkworks/crypto-primitives/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (main) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/crypto-primitives/.github/workflows/ci.yml b/arkworks/crypto-primitives/.github/workflows/ci.yml new file mode 100644 index 00000000..234fe807 --- /dev/null +++ b/arkworks/crypto-primitives/.github/workflows/ci.yml @@ -0,0 +1,121 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --all --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: "--all \ + --all-features \ + --exclude cp-benches " + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: thumbv6m-none-eabi + override: true + + - name: Install Rust ARM64 (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: crypto-primitives + run: | + cargo build --no-default-features --features=r1cs --target aarch64-unknown-none + cargo check --examples --no-default-features --features=r1cs --target aarch64-unknown-none diff --git a/arkworks/crypto-primitives/.github/workflows/linkify_changelog.yml b/arkworks/crypto-primitives/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..2c64bf2e --- /dev/null +++ b/arkworks/crypto-primitives/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push \ No newline at end of file diff --git a/arkworks/crypto-primitives/.github/workflows/mdlinter.yml b/arkworks/crypto-primitives/.github/workflows/mdlinter.yml new file mode 100644 index 00000000..50cb2214 --- /dev/null +++ b/arkworks/crypto-primitives/.github/workflows/mdlinter.yml @@ -0,0 +1,34 @@ +name: Lint +on: + push: + branches: + - master + paths: + - "**.md" + pull_request: + paths: + - "**.md" + +jobs: + build: + name: Markdown linter + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Lint Code Base + uses: docker://github/super-linter:latest + env: + LINTER_RULES_PATH: .github + VALIDATE_ALL_CODEBASE: true + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + VALIDATE_MD: true + MARKDOWN_CONFIG_FILE: .markdownlint.yml + VALIDATE_PROTOBUF: false + VALIDATE_JSCPD: false + # use Python Pylint as the only linter to avoid conflicts + VALIDATE_PYTHON_BLACK: false + VALIDATE_PYTHON_FLAKE8: false + VALIDATE_PYTHON_ISORT: false + VALIDATE_PYTHON_MYPY: false \ No newline at end of file diff --git a/arkworks/crypto-primitives/.gitignore b/arkworks/crypto-primitives/.gitignore new file mode 100644 index 00000000..448a8bb6 --- /dev/null +++ b/arkworks/crypto-primitives/.gitignore @@ -0,0 +1,12 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo + diff --git a/arkworks/crypto-primitives/CHANGELOG.md b/arkworks/crypto-primitives/CHANGELOG.md new file mode 100644 index 00000000..be04a566 --- /dev/null +++ b/arkworks/crypto-primitives/CHANGELOG.md @@ -0,0 +1,45 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +- [\#30](https://github.com/arkworks-rs/crypto-primitives/pull/30) Refactor the Merkle tree to separate the leaf hash and two-to-one hash. + +### Features + +- [\#38](https://github.com/arkworks-rs/crypto-primitives/pull/38) Add a signature verification trait `SigVerifyGadget`. +- [\#44](https://github.com/arkworks-rs/crypto-primitives/pull/44) Add basic ElGamal encryption gadgets. +- [\#48](https://github.com/arkworks-rs/crypto-primitives/pull/48) Add `CanonicalSerialize` and `CanonicalDeserialize` to `Path` and `CRH` outputs. + +### Improvements + +### Bug fixes + +## v0.2.0 + +### Breaking changes + +### Features + +- [\#2](https://github.com/arkworks-rs/crypto-primitives/pull/2) Add the `SNARK` gadget traits. +- [\#3](https://github.com/arkworks-rs/crypto-primitives/pull/3) Add unchecked allocation for `ProofVar` and `VerifyingKeyVar`. +- [\#4](https://github.com/arkworks-rs/crypto-primitives/pull/4) Add `verifier_size` to `SNARKGadget`. +- [\#6](https://github.com/arkworks-rs/crypto-primitives/pull/6) Add `IntoIterator` for SNARK input gadgets. +- [\#28](https://github.com/arkworks-rs/crypto-primitives/pull/28) Adds Poseidon CRH w/ constraints. + +### Improvements + +### Bug fixes + +## v0.1.0 (Initial release of arkworks/crypto-primitives) diff --git a/arkworks/crypto-primitives/Cargo.toml b/arkworks/crypto-primitives/Cargo.toml new file mode 100644 index 00000000..c33ca328 --- /dev/null +++ b/arkworks/crypto-primitives/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "ark-crypto-primitives" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library of useful cryptographic primitives" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/crypto-primitives" +documentation = "https://docs.rs/ark-crypto-primitives/" +keywords = [ "r1cs", "pedersen", "blake2s", "snark", "schnorr" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +################################# Dependencies ################################ + +[dependencies] +ark-ff = { path = "../algebra/ff", version = "^0.3.0", default-features = false } +ark-ec = { path = "../algebra/ec", version = "^0.3.0", default-features = false } +ark-std = { path = "../std", version = "^0.3.0", default-features = false } +ark-relations = { path = "../snark/relations", version = "^0.3.0", default-features = false } +ark-serialize = { path = "../algebra/serialize", version = "^0.3.0", default-features = false, features = [ "derive" ] } + +blake2 = { version = "0.9", default-features = false } +digest = "0.9" + +ark-r1cs-std = { path = "../r1cs-std", version = "^0.3.0", optional = true, default-features = false } +ark-snark = { path = "../snark/snark", version = "^0.3.0", default-features = false } + +ark-nonnative-field = { path = "../nonnative", version = "^0.3.0", optional = true, default-features = false } + +rayon = { version = "1.0", optional = true } +derivative = { version = "2.0", features = ["use_core"] } +tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true } + +[features] +default = ["std"] +std = [ "ark-ff/std", "ark-ec/std", "ark-std/std", "ark-relations/std" ] +print-trace = [ "ark-std/print-trace" ] +parallel = [ "std", "rayon", "ark-ec/parallel", "ark-std/parallel", "ark-ff/parallel" ] +r1cs = [ "ark-r1cs-std", "tracing", "ark-nonnative-field" ] + +[dev-dependencies] +ark-ed-on-bls12-381 = { version = "^0.3.0", default-features = false, features = [ "r1cs" ] } +ark-bls12-377 = { version = "^0.3.0", default-features = false, features = [ "curve", "r1cs" ] } +ark-mnt4-298 = { version = "^0.3.0", default-features = false, features = [ "curve", "r1cs" ] } +ark-mnt6-298 = { version = "^0.3.0", default-features = false, features = [ "r1cs" ] } diff --git a/arkworks/crypto-primitives/LICENSE-APACHE b/arkworks/crypto-primitives/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/crypto-primitives/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/crypto-primitives/LICENSE-MIT b/arkworks/crypto-primitives/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/crypto-primitives/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/crypto-primitives/README.md b/arkworks/crypto-primitives/README.md new file mode 100644 index 00000000..9231a000 --- /dev/null +++ b/arkworks/crypto-primitives/README.md @@ -0,0 +1,56 @@ +

ark-crypto-primitives

+ +

+ + + + +

+ +The arkworks ecosystem consist of Rust libraries for designing and working with __zero knowledge succinct non-interactive arguments (zkSNARKs)__. This repository contains efficient implementations of cryptographic primitives such as collision-resistant hash functions, hiding commitments, pseudo-random functions, signatures, and, optionally, R1CS constraints for these. + +This library is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic proof-of-concept prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler. To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: + +```bash +rustup install stable +``` + +After that, use `cargo`, the standard Rust build tool, to build the library: + +```bash +git clone https://github.com/arkworks-rs/crypto-primitives.git +cargo build --release +``` + +This library comes with unit tests for each of the provided crates. Run the tests with: + +```bash +cargo test +``` + +## License + +This library is licensed under either of the following licenses, at your discretion. + +* Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or [apache.org license link](http://www.apache.org/licenses/LICENSE-2.0)) +* MIT license ([LICENSE-MIT](LICENSE-MIT) or [opensource.org license link](http://opensource.org/licenses/MIT)) + +Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +## Acknowledgements + +This work was supported by: +a Google Faculty Award; +the National Science Foundation; +the UC Berkeley Center for Long-Term Cybersecurity; +and donations from the Ethereum Foundation, the Interchain Foundation, and Qtum. + +An earlier version of this library was developed as part of the paper *"[ZEXE: Enabling Decentralized Private Computation][zexe]"*. + +[zexe]: https://ia.cr/2018/962 diff --git a/arkworks/crypto-primitives/cp-benches/Cargo.toml b/arkworks/crypto-primitives/cp-benches/Cargo.toml new file mode 100644 index 00000000..7dbde8ae --- /dev/null +++ b/arkworks/crypto-primitives/cp-benches/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "cp-benches" +version = "0.3.0" +authors = [ "arkworks contributors"] +description = "Benchmarks for cryptographic primitivesthat are used by Zexe" +repository = "https://github.com/arkworks-rs/crypto-primitives" +keywords = ["r1cs", "groth16", "gm17", "pedersen", "blake2s"] +categories = ["cryptography"] +license = "MIT/Apache-2.0" +publish = false +edition = "2018" + +################################# Dependencies ################################ + +[dev-dependencies] +ark-crypto-primitives = { path = "../" } +ark-ed-on-bls12-377 = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } + +blake2 = { version = "0.9", default-features = false } +criterion = "0.3.1" + +################################# Benchmarks ################################## + +[[bench]] +name = "pedersen_crh" +path = "benches/crypto_primitives/crh.rs" +harness = false + +[[bench]] +name = "pedersen_comm" +path = "benches/crypto_primitives/comm.rs" +harness = false + +[[bench]] +name = "blake2s_prf" +path = "benches/crypto_primitives/prf.rs" +harness = false + +[[bench]] +name = "schnorr_sig" +path = "benches/crypto_primitives/signature.rs" +harness = false diff --git a/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/comm.rs b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/comm.rs new file mode 100644 index 00000000..25a78a2a --- /dev/null +++ b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/comm.rs @@ -0,0 +1,52 @@ +#[macro_use] +extern crate criterion; + +use ark_crypto_primitives::commitment::{pedersen::*, CommitmentScheme}; +use ark_ed_on_bls12_377::EdwardsProjective as Edwards; +use ark_std::UniformRand; +use criterion::Criterion; + +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct CommWindow; + +impl Window for CommWindow { + const WINDOW_SIZE: usize = 250; + const NUM_WINDOWS: usize = 8; +} + +fn pedersen_comm_setup(c: &mut Criterion) { + c.bench_function("Pedersen Commitment Setup", move |b| { + b.iter(|| { + let mut rng = &mut ark_std::test_rng(); + Commitment::::setup(&mut rng).unwrap() + }) + }); +} + +fn pedersen_comm_eval(c: &mut Criterion) { + let mut rng = &mut ark_std::test_rng(); + let parameters = Commitment::::setup(&mut rng).unwrap(); + let input = vec![5u8; 128]; + c.bench_function("Pedersen Commitment Eval", move |b| { + b.iter(|| { + let rng = &mut ark_std::test_rng(); + let commitment_randomness = Randomness::rand(rng); + Commitment::::commit(¶meters, &input, &commitment_randomness) + .unwrap() + }) + }); +} + +criterion_group! { + name = comm_setup; + config = Criterion::default().sample_size(10); + targets = pedersen_comm_setup +} + +criterion_group! { + name = comm_eval; + config = Criterion::default().sample_size(10); + targets = pedersen_comm_eval +} + +criterion_main!(comm_setup, comm_eval); diff --git a/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/crh.rs b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/crh.rs new file mode 100644 index 00000000..2196ef86 --- /dev/null +++ b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/crh.rs @@ -0,0 +1,49 @@ +#[macro_use] +extern crate criterion; + +use ark_crypto_primitives::crh::{ + pedersen::{Window, CRH as PedersenCRH}, + CRH, +}; +use ark_ed_on_bls12_377::EdwardsProjective as Edwards; +use criterion::Criterion; + +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct HashWindow; + +impl Window for HashWindow { + const WINDOW_SIZE: usize = 250; + const NUM_WINDOWS: usize = 8; +} + +fn pedersen_crh_setup(c: &mut Criterion) { + c.bench_function("Pedersen CRH Setup", move |b| { + b.iter(|| { + let mut rng = &mut ark_std::test_rng(); + PedersenCRH::::setup(&mut rng).unwrap() + }) + }); +} + +fn pedersen_crh_eval(c: &mut Criterion) { + let mut rng = &mut ark_std::test_rng(); + let parameters = PedersenCRH::::setup(&mut rng).unwrap(); + let input = vec![5u8; 128]; + c.bench_function("Pedersen CRH Eval", move |b| { + b.iter(|| PedersenCRH::::evaluate(¶meters, &input).unwrap()) + }); +} + +criterion_group! { + name = crh_setup; + config = Criterion::default().sample_size(10); + targets = pedersen_crh_setup +} + +criterion_group! { + name = crh_eval; + config = Criterion::default().sample_size(10); + targets = pedersen_crh_eval +} + +criterion_main!(crh_setup, crh_eval); diff --git a/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/prf.rs b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/prf.rs new file mode 100644 index 00000000..4c2eb274 --- /dev/null +++ b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/prf.rs @@ -0,0 +1,23 @@ +#[macro_use] +extern crate criterion; + +use ark_crypto_primitives::prf::*; +use ark_std::rand::Rng; +use criterion::Criterion; + +fn blake2s_prf_eval(c: &mut Criterion) { + let rng = &mut ark_std::test_rng(); + let input: [u8; 32] = rng.gen(); + let seed: [u8; 32] = rng.gen(); + c.bench_function("Blake2s PRF Eval", move |b| { + b.iter(|| Blake2s::evaluate(&seed, &input).unwrap()) + }); +} + +criterion_group! { + name = prf_eval; + config = Criterion::default().sample_size(50); + targets = blake2s_prf_eval +} + +criterion_main!(prf_eval); diff --git a/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/signature.rs b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/signature.rs new file mode 100644 index 00000000..7af2cab7 --- /dev/null +++ b/arkworks/crypto-primitives/cp-benches/benches/crypto_primitives/signature.rs @@ -0,0 +1,89 @@ +#[macro_use] +extern crate criterion; + +use ark_crypto_primitives::signature::{schnorr::*, SignatureScheme}; +use ark_ed_on_bls12_377::EdwardsProjective as Edwards; +use ark_std::rand::Rng; +use blake2::Blake2s; +use criterion::Criterion; + +type SchnorrEdwards = Schnorr; +fn schnorr_signature_setup(c: &mut Criterion) { + c.bench_function("SchnorrEdwards: Setup", move |b| { + b.iter(|| { + let mut rng = &mut ark_std::test_rng(); + SchnorrEdwards::setup(&mut rng).unwrap() + }) + }); +} + +fn schnorr_signature_keygen(c: &mut Criterion) { + let mut rng = &mut ark_std::test_rng(); + let parameters = SchnorrEdwards::setup(&mut rng).unwrap(); + + c.bench_function("SchnorrEdwards: KeyGen", move |b| { + b.iter(|| { + let mut rng = &mut ark_std::test_rng(); + SchnorrEdwards::keygen(¶meters, &mut rng).unwrap() + }) + }); +} + +fn schnorr_signature_sign(c: &mut Criterion) { + let mut rng = &mut ark_std::test_rng(); + let parameters = SchnorrEdwards::setup(&mut rng).unwrap(); + let (_, sk) = SchnorrEdwards::keygen(¶meters, &mut rng).unwrap(); + let message = [100u8; 128]; + + c.bench_function("SchnorrEdwards: Sign", move |b| { + b.iter(|| { + let mut rng = &mut ark_std::test_rng(); + SchnorrEdwards::sign(¶meters, &sk, &message, &mut rng).unwrap() + }) + }); +} + +fn schnorr_signature_verify(c: &mut Criterion) { + let mut rng = &mut ark_std::test_rng(); + let parameters = SchnorrEdwards::setup(&mut rng).unwrap(); + let (pk, sk) = SchnorrEdwards::keygen(¶meters, &mut rng).unwrap(); + let message = [100u8; 128]; + let signature = SchnorrEdwards::sign(¶meters, &sk, &message, &mut rng).unwrap(); + + c.bench_function("SchnorrEdwards: Verify", move |b| { + b.iter(|| SchnorrEdwards::verify(¶meters, &pk, &message, &signature).unwrap()) + }); +} + +fn schnorr_signature_randomize_pk(c: &mut Criterion) { + let mut rng = &mut ark_std::test_rng(); + let parameters = SchnorrEdwards::setup(&mut rng).unwrap(); + let (pk, _) = SchnorrEdwards::keygen(¶meters, &mut rng).unwrap(); + let randomness: [u8; 32] = rng.gen(); + + c.bench_function("SchnorrEdwards: Randomize PubKey", move |b| { + b.iter(|| SchnorrEdwards::randomize_public_key(¶meters, &pk, &randomness).unwrap()) + }); +} + +fn schnorr_signature_randomize_signature(c: &mut Criterion) { + let mut rng = &mut ark_std::test_rng(); + let parameters = SchnorrEdwards::setup(&mut rng).unwrap(); + let (_, sk) = SchnorrEdwards::keygen(¶meters, &mut rng).unwrap(); + let randomness: [u8; 32] = rng.gen(); + let message = [100u8; 128]; + let signature = SchnorrEdwards::sign(¶meters, &sk, &message, &mut rng).unwrap(); + + c.bench_function("SchnorrEdwards: Randomize Signature", move |b| { + b.iter(|| { + SchnorrEdwards::randomize_signature(¶meters, &signature, &randomness).unwrap() + }) + }); +} +criterion_group! { + name = schnorr_sig; + config = Criterion::default().sample_size(20); + targets = schnorr_signature_setup, schnorr_signature_keygen, schnorr_signature_sign, + schnorr_signature_verify, schnorr_signature_randomize_pk, schnorr_signature_randomize_signature +} +criterion_main!(schnorr_sig); diff --git a/arkworks/crypto-primitives/scripts/linkify_changelog.py b/arkworks/crypto-primitives/scripts/linkify_changelog.py new file mode 100644 index 00000000..1d85f290 --- /dev/null +++ b/arkworks/crypto-primitives/scripts/linkify_changelog.py @@ -0,0 +1,30 @@ +import fileinput +import os +import re +import sys + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub( + r"\- #([0-9]*)", + r"- [\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", + line.rstrip(), + ) + # edits the current file + print(line) \ No newline at end of file diff --git a/arkworks/crypto-primitives/src/commitment/blake2s/constraints.rs b/arkworks/crypto-primitives/src/commitment/blake2s/constraints.rs new file mode 100644 index 00000000..4659faae --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/blake2s/constraints.rs @@ -0,0 +1,132 @@ +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use crate::{ + commitment::{blake2s, CommitmentGadget}, + prf::blake2s::constraints::{evaluate_blake2s, OutputVar}, + Vec, +}; +use ark_ff::{Field, PrimeField}; +use ark_r1cs_std::prelude::*; + +use core::borrow::Borrow; + +#[derive(Clone)] +pub struct ParametersVar; + +#[derive(Clone)] +pub struct RandomnessVar(pub Vec>); + +pub struct CommGadget; + +impl CommitmentGadget for CommGadget { + type OutputVar = OutputVar; + type ParametersVar = ParametersVar; + type RandomnessVar = RandomnessVar; + + #[tracing::instrument(target = "r1cs", skip(input, r))] + fn commit( + _: &Self::ParametersVar, + input: &[UInt8], + r: &Self::RandomnessVar, + ) -> Result { + let mut input_bits = Vec::with_capacity(512); + for byte in input.iter().chain(r.0.iter()) { + input_bits.extend_from_slice(&byte.to_bits_le()?); + } + let mut result = Vec::new(); + for int in evaluate_blake2s(&input_bits)?.into_iter() { + let chunk = int.to_bytes()?; + result.extend_from_slice(&chunk); + } + Ok(OutputVar(result)) + } +} + +impl AllocVar<(), ConstraintF> for ParametersVar { + #[tracing::instrument(target = "r1cs", skip(_cs, _f))] + fn new_variable>( + _cs: impl Into>, + _f: impl FnOnce() -> Result, + _mode: AllocationMode, + ) -> Result { + Ok(ParametersVar) + } +} + +impl AllocVar<[u8; 32], ConstraintF> for RandomnessVar { + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let bytes = f().map(|b| *b.borrow()).unwrap_or([0u8; 32]); + match mode { + AllocationMode::Constant => Ok(Self(UInt8::constant_vec(&bytes))), + AllocationMode::Input => UInt8::new_input_vec(cs, &bytes).map(Self), + AllocationMode::Witness => UInt8::new_witness_vec(cs, &bytes).map(Self), + } + } +} + +#[cfg(test)] +mod test { + use crate::commitment::{ + blake2s::{ + constraints::{CommGadget, RandomnessVar}, + Commitment, + }, + CommitmentGadget, CommitmentScheme, + }; + use ark_ed_on_bls12_381::Fq as Fr; + use ark_r1cs_std::prelude::*; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::rand::Rng; + + #[test] + fn commitment_gadget_test() { + let cs = ConstraintSystem::::new_ref(); + + let input = [1u8; 32]; + + let rng = &mut ark_std::test_rng(); + + type TestCOMM = Commitment; + type TestCOMMGadget = CommGadget; + + let mut randomness = [0u8; 32]; + rng.fill(&mut randomness); + + let parameters = (); + let primitive_result = Commitment::commit(¶meters, &input, &randomness).unwrap(); + + let mut input_var = vec![]; + for byte in &input { + input_var.push(UInt8::new_witness(cs.clone(), || Ok(*byte)).unwrap()); + } + + let mut randomness_var = vec![]; + for r_byte in randomness.iter() { + randomness_var.push(UInt8::new_witness(cs.clone(), || Ok(r_byte)).unwrap()); + } + let randomness_var = RandomnessVar(randomness_var); + + let parameters_var = + >::ParametersVar::new_witness( + ark_relations::ns!(cs, "gadget_parameters"), + || Ok(¶meters), + ) + .unwrap(); + let result_var = >::commit( + ¶meters_var, + &input_var, + &randomness_var, + ) + .unwrap(); + + for i in 0..32 { + assert_eq!(primitive_result[i], result_var.0[i].value().unwrap()); + } + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/arkworks/crypto-primitives/src/commitment/blake2s/mod.rs b/arkworks/crypto-primitives/src/commitment/blake2s/mod.rs new file mode 100644 index 00000000..4c118b0b --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/blake2s/mod.rs @@ -0,0 +1,33 @@ +use super::CommitmentScheme; +use crate::Error; +use ark_std::rand::Rng; +use blake2::Blake2s as b2s; +use digest::Digest; + +pub struct Commitment; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +impl CommitmentScheme for Commitment { + type Parameters = (); + type Randomness = [u8; 32]; + type Output = [u8; 32]; + + fn setup(_: &mut R) -> Result { + Ok(()) + } + + fn commit( + _: &Self::Parameters, + input: &[u8], + r: &Self::Randomness, + ) -> Result { + let mut h = b2s::new(); + h.update(input); + h.update(r.as_ref()); + let mut result = [0u8; 32]; + result.copy_from_slice(&h.finalize()); + Ok(result) + } +} diff --git a/arkworks/crypto-primitives/src/commitment/constraints.rs b/arkworks/crypto-primitives/src/commitment/constraints.rs new file mode 100644 index 00000000..5124e5a5 --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/constraints.rs @@ -0,0 +1,23 @@ +use crate::commitment::CommitmentScheme; +use ark_ff::Field; +use ark_r1cs_std::prelude::*; +use ark_relations::r1cs::SynthesisError; +use core::fmt::Debug; + +pub trait CommitmentGadget { + type OutputVar: EqGadget + + ToBytesGadget + + AllocVar + + R1CSVar + + Clone + + Sized + + Debug; + type ParametersVar: AllocVar + Clone; + type RandomnessVar: AllocVar + Clone; + + fn commit( + parameters: &Self::ParametersVar, + input: &[UInt8], + r: &Self::RandomnessVar, + ) -> Result; +} diff --git a/arkworks/crypto-primitives/src/commitment/injective_map/constraints.rs b/arkworks/crypto-primitives/src/commitment/injective_map/constraints.rs new file mode 100644 index 00000000..7a1ce3cb --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/injective_map/constraints.rs @@ -0,0 +1,60 @@ +use crate::commitment::{ + injective_map::{InjectiveMap, PedersenCommCompressor}, + pedersen::{ + constraints::{CommGadget, ParametersVar, RandomnessVar}, + Window, + }, +}; + +pub use crate::crh::injective_map::constraints::InjectiveMapGadget; +use ark_ec::ProjectiveCurve; +use ark_ff::{Field, PrimeField}; +use ark_r1cs_std::{ + groups::{CurveVar, GroupOpsBounds}, + uint8::UInt8, +}; +use ark_relations::r1cs::SynthesisError; + +use ark_std::marker::PhantomData; + +type ConstraintF = <::BaseField as Field>::BasePrimeField; + +pub struct CommitmentCompressorGadget +where + C: ProjectiveCurve, + I: InjectiveMap, + W: Window, + GG: CurveVar>, + IG: InjectiveMapGadget, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + _compressor: PhantomData, + _compressor_gadget: PhantomData, + _comm: PhantomData>, +} + +impl + crate::commitment::CommitmentGadget, ConstraintF> + for CommitmentCompressorGadget +where + C: ProjectiveCurve, + I: InjectiveMap, + GG: CurveVar>, + ConstraintF: PrimeField, + IG: InjectiveMapGadget, + W: Window, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + type OutputVar = IG::OutputVar; + type ParametersVar = ParametersVar; + type RandomnessVar = RandomnessVar>; + + fn commit( + parameters: &Self::ParametersVar, + input: &[UInt8>], + r: &Self::RandomnessVar, + ) -> Result { + let result = CommGadget::::commit(parameters, input, r)?; + IG::evaluate(&result) + } +} diff --git a/arkworks/crypto-primitives/src/commitment/injective_map/mod.rs b/arkworks/crypto-primitives/src/commitment/injective_map/mod.rs new file mode 100644 index 00000000..c62ad12f --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/injective_map/mod.rs @@ -0,0 +1,44 @@ +use crate::Error; +use ark_std::marker::PhantomData; + +use super::{pedersen, CommitmentScheme}; +pub use crate::crh::injective_map::InjectiveMap; +use ark_ec::ProjectiveCurve; +use ark_std::rand::Rng; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +pub struct PedersenCommCompressor, W: pedersen::Window> { + _group: PhantomData, + _compressor: PhantomData, + _comm: pedersen::Commitment, +} + +impl, W: pedersen::Window> CommitmentScheme + for PedersenCommCompressor +{ + type Output = I::Output; + type Parameters = pedersen::Parameters; + type Randomness = pedersen::Randomness; + + fn setup(rng: &mut R) -> Result { + let time = start_timer!(|| format!("PedersenCompressor::Setup")); + let params = pedersen::Commitment::::setup(rng); + end_timer!(time); + params + } + + fn commit( + parameters: &Self::Parameters, + input: &[u8], + randomness: &Self::Randomness, + ) -> Result { + let eval_time = start_timer!(|| "PedersenCompressor::Eval"); + let result = I::injective_map(&pedersen::Commitment::::commit( + parameters, input, randomness, + )?)?; + end_timer!(eval_time); + Ok(result) + } +} diff --git a/arkworks/crypto-primitives/src/commitment/mod.rs b/arkworks/crypto-primitives/src/commitment/mod.rs new file mode 100644 index 00000000..0b365b27 --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/mod.rs @@ -0,0 +1,30 @@ +use ark_ff::UniformRand; +use ark_std::rand::Rng; +use ark_std::{fmt::Debug, hash::Hash}; + +use ark_ff::bytes::ToBytes; + +pub mod blake2s; +pub mod injective_map; +pub mod pedersen; + +#[cfg(feature = "r1cs")] +pub mod constraints; +#[cfg(feature = "r1cs")] +pub use constraints::*; + +use crate::Error; + +pub trait CommitmentScheme { + type Output: ToBytes + Clone + Default + Eq + Hash + Debug; + type Parameters: Clone; + type Randomness: Clone + ToBytes + Default + Eq + UniformRand + Debug; + + fn setup(r: &mut R) -> Result; + + fn commit( + parameters: &Self::Parameters, + input: &[u8], + r: &Self::Randomness, + ) -> Result; +} diff --git a/arkworks/crypto-primitives/src/commitment/pedersen/constraints.rs b/arkworks/crypto-primitives/src/commitment/pedersen/constraints.rs new file mode 100644 index 00000000..83ddcab1 --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/pedersen/constraints.rs @@ -0,0 +1,203 @@ +use crate::{ + commitment::pedersen::{Commitment, Parameters, Randomness}, + crh::pedersen::Window, + Vec, +}; +use ark_ec::ProjectiveCurve; +use ark_ff::{ + fields::{Field, PrimeField}, + to_bytes, Zero, +}; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use ark_r1cs_std::prelude::*; +use core::{borrow::Borrow, marker::PhantomData}; + +type ConstraintF = <::BaseField as Field>::BasePrimeField; + +#[derive(Derivative)] +#[derivative(Clone(bound = "C: ProjectiveCurve, GG: CurveVar>"))] +pub struct ParametersVar>> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + params: Parameters, + #[doc(hidden)] + _group_var: PhantomData, +} + +#[derive(Clone, Debug)] +pub struct RandomnessVar(Vec>); + +pub struct CommGadget>, W: Window> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + #[doc(hidden)] + _curve: PhantomData<*const C>, + #[doc(hidden)] + _group_var: PhantomData<*const GG>, + #[doc(hidden)] + _window: PhantomData<*const W>, +} + +impl crate::commitment::CommitmentGadget, ConstraintF> + for CommGadget +where + C: ProjectiveCurve, + GG: CurveVar>, + W: Window, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, + ConstraintF: PrimeField, +{ + type OutputVar = GG; + type ParametersVar = ParametersVar; + type RandomnessVar = RandomnessVar>; + + #[tracing::instrument(target = "r1cs", skip(parameters, r))] + fn commit( + parameters: &Self::ParametersVar, + input: &[UInt8>], + r: &Self::RandomnessVar, + ) -> Result { + assert!((input.len() * 8) <= (W::WINDOW_SIZE * W::NUM_WINDOWS)); + + let mut padded_input = input.to_vec(); + // Pad if input length is less than `W::WINDOW_SIZE * W::NUM_WINDOWS`. + if (input.len() * 8) < W::WINDOW_SIZE * W::NUM_WINDOWS { + let current_length = input.len(); + for _ in current_length..((W::WINDOW_SIZE * W::NUM_WINDOWS) / 8) { + padded_input.push(UInt8::constant(0u8)); + } + } + + assert_eq!(padded_input.len() * 8, W::WINDOW_SIZE * W::NUM_WINDOWS); + assert_eq!(parameters.params.generators.len(), W::NUM_WINDOWS); + + // Allocate new variable for commitment output. + let input_in_bits: Vec> = padded_input + .iter() + .flat_map(|byte| byte.to_bits_le().unwrap()) + .collect(); + let input_in_bits = input_in_bits.chunks(W::WINDOW_SIZE); + let mut result = + GG::precomputed_base_multiscalar_mul_le(¶meters.params.generators, input_in_bits)?; + + // Compute h^r + let rand_bits: Vec<_> = + r.0.iter() + .flat_map(|byte| byte.to_bits_le().unwrap()) + .collect(); + result.precomputed_base_scalar_mul_le( + rand_bits + .iter() + .zip(¶meters.params.randomness_generator), + )?; + + Ok(result) + } +} + +impl AllocVar, ConstraintF> for ParametersVar +where + C: ProjectiveCurve, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + fn new_variable>>( + _cs: impl Into>>, + f: impl FnOnce() -> Result, + _mode: AllocationMode, + ) -> Result { + let params = f()?.borrow().clone(); + Ok(ParametersVar { + params, + _group_var: PhantomData, + }) + } +} + +impl AllocVar, F> for RandomnessVar +where + C: ProjectiveCurve, + F: PrimeField, +{ + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let r = to_bytes![&f().map(|b| b.borrow().0).unwrap_or(C::ScalarField::zero())].unwrap(); + match mode { + AllocationMode::Constant => Ok(Self(UInt8::constant_vec(&r))), + AllocationMode::Input => UInt8::new_input_vec(cs, &r).map(Self), + AllocationMode::Witness => UInt8::new_witness_vec(cs, &r).map(Self), + } + } +} + +#[cfg(test)] +mod test { + use ark_ed_on_bls12_381::{constraints::EdwardsVar, EdwardsProjective as JubJub, Fq, Fr}; + use ark_std::{test_rng, UniformRand}; + + use crate::{ + commitment::{ + pedersen::{constraints::CommGadget, Commitment, Randomness}, + CommitmentGadget, CommitmentScheme, + }, + crh::pedersen, + }; + use ark_r1cs_std::prelude::*; + use ark_relations::r1cs::ConstraintSystem; + + #[test] + fn commitment_gadget_test() { + let cs = ConstraintSystem::::new_ref(); + + #[derive(Clone, PartialEq, Eq, Hash)] + pub(super) struct Window; + + impl pedersen::Window for Window { + const WINDOW_SIZE: usize = 4; + const NUM_WINDOWS: usize = 8; + } + + let input = [1u8; 4]; + + let rng = &mut test_rng(); + + type TestCOMM = Commitment; + type TestCOMMGadget = CommGadget; + + let randomness = Randomness(Fr::rand(rng)); + + let parameters = Commitment::::setup(rng).unwrap(); + let primitive_result = + Commitment::::commit(¶meters, &input, &randomness).unwrap(); + + let mut input_var = vec![]; + for input_byte in input.iter() { + input_var.push(UInt8::new_witness(cs.clone(), || Ok(*input_byte)).unwrap()); + } + + let randomness_var = + >::RandomnessVar::new_witness( + ark_relations::ns!(cs, "gadget_randomness"), + || Ok(&randomness), + ) + .unwrap(); + let parameters_var = + >::ParametersVar::new_witness( + ark_relations::ns!(cs, "gadget_parameters"), + || Ok(¶meters), + ) + .unwrap(); + let result_var = + TestCOMMGadget::commit(¶meters_var, &input_var, &randomness_var).unwrap(); + + let primitive_result = primitive_result; + assert_eq!(primitive_result, result_var.value().unwrap()); + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/arkworks/crypto-primitives/src/commitment/pedersen/mod.rs b/arkworks/crypto-primitives/src/commitment/pedersen/mod.rs new file mode 100644 index 00000000..2a7c9657 --- /dev/null +++ b/arkworks/crypto-primitives/src/commitment/pedersen/mod.rs @@ -0,0 +1,120 @@ +use crate::{Error, Vec}; +use ark_ec::ProjectiveCurve; +use ark_ff::{bytes::ToBytes, BitIteratorLE, Field, FpParameters, PrimeField, ToConstraintField}; +use ark_std::io::{Result as IoResult, Write}; +use ark_std::marker::PhantomData; +use ark_std::rand::Rng; +use ark_std::UniformRand; + +use super::CommitmentScheme; + +pub use crate::crh::pedersen::Window; +use crate::crh::{pedersen, CRH}; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +#[derive(Clone)] +pub struct Parameters { + pub randomness_generator: Vec, + pub generators: Vec>, +} + +pub struct Commitment { + group: PhantomData, + window: PhantomData, +} + +#[derive(Derivative)] +#[derivative(Clone, PartialEq, Debug, Eq, Default)] +pub struct Randomness(pub C::ScalarField); + +impl UniformRand for Randomness { + #[inline] + fn rand(rng: &mut R) -> Self { + Randomness(UniformRand::rand(rng)) + } +} + +impl ToBytes for Randomness { + fn write(&self, writer: W) -> IoResult<()> { + self.0.write(writer) + } +} + +impl CommitmentScheme for Commitment { + type Parameters = Parameters; + type Randomness = Randomness; + type Output = C::Affine; + + fn setup(rng: &mut R) -> Result { + let time = start_timer!(|| format!( + "PedersenCOMM::Setup: {} {}-bit windows; {{0,1}}^{{{}}} -> C", + W::NUM_WINDOWS, + W::WINDOW_SIZE, + W::NUM_WINDOWS * W::WINDOW_SIZE + )); + let num_powers = ::Params::MODULUS_BITS as usize; + let randomness_generator = pedersen::CRH::::generator_powers(num_powers, rng); + let generators = pedersen::CRH::::create_generators(rng); + end_timer!(time); + + Ok(Self::Parameters { + randomness_generator, + generators, + }) + } + + fn commit( + parameters: &Self::Parameters, + input: &[u8], + randomness: &Self::Randomness, + ) -> Result { + let commit_time = start_timer!(|| "PedersenCOMM::Commit"); + // If the input is too long, return an error. + if input.len() > W::WINDOW_SIZE * W::NUM_WINDOWS { + panic!("incorrect input length: {:?}", input.len()); + } + // Pad the input to the necessary length. + let mut padded_input = Vec::with_capacity(input.len()); + let mut input = input; + if (input.len() * 8) < W::WINDOW_SIZE * W::NUM_WINDOWS { + padded_input.extend_from_slice(input); + let padded_length = (W::WINDOW_SIZE * W::NUM_WINDOWS) / 8; + padded_input.resize(padded_length, 0u8); + input = padded_input.as_slice(); + } + assert_eq!(parameters.generators.len(), W::NUM_WINDOWS); + + // Invoke Pedersen CRH here, to prevent code duplication. + + let crh_parameters = pedersen::Parameters { + generators: parameters.generators.clone(), + }; + let mut result: C = pedersen::CRH::::evaluate(&crh_parameters, &input)?.into(); + let randomize_time = start_timer!(|| "Randomize"); + + // Compute h^r. + for (bit, power) in BitIteratorLE::new(randomness.0.into_repr()) + .into_iter() + .zip(¶meters.randomness_generator) + { + if bit { + result += power + } + } + end_timer!(randomize_time); + end_timer!(commit_time); + + Ok(result.into()) + } +} + +impl> + ToConstraintField for Parameters +{ + #[inline] + fn to_field_elements(&self) -> Option> { + Some(Vec::new()) + } +} diff --git a/arkworks/crypto-primitives/src/crh/bowe_hopwood/constraints.rs b/arkworks/crypto-primitives/src/crh/bowe_hopwood/constraints.rs new file mode 100644 index 00000000..795d0adc --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/bowe_hopwood/constraints.rs @@ -0,0 +1,174 @@ +use core::{borrow::Borrow, marker::PhantomData}; + +use crate::{ + crh::{ + bowe_hopwood::{Parameters, CHUNK_SIZE, CRH}, + pedersen::Window, + CRHGadget as CRGGadgetTrait, + }, + Vec, +}; +use ark_ec::{ModelParameters, TEModelParameters}; +use ark_ff::Field; +use ark_r1cs_std::{ + alloc::AllocVar, groups::curves::twisted_edwards::AffineVar, prelude::*, uint8::UInt8, +}; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use ark_r1cs_std::bits::boolean::Boolean; + +type ConstraintF

= <

::BaseField as Field>::BasePrimeField; + +#[derive(Derivative)] +#[derivative(Clone(bound = "P: TEModelParameters, W: Window"))] +pub struct ParametersVar { + params: Parameters

, + #[doc(hidden)] + _window: PhantomData, +} + +pub struct CRHGadget>> +where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[doc(hidden)] + _params: PhantomData

, + #[doc(hidden)] + _base_field: PhantomData, +} + +impl CRGGadgetTrait, ConstraintF

> for CRHGadget +where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, + F: FieldVar>, + F: TwoBitLookupGadget, TableConstant = P::BaseField> + + ThreeBitCondNegLookupGadget, TableConstant = P::BaseField>, + P: TEModelParameters, + W: Window, +{ + type OutputVar = AffineVar; + type ParametersVar = ParametersVar; + + #[tracing::instrument(target = "r1cs", skip(parameters, input))] + fn evaluate( + parameters: &Self::ParametersVar, + input: &[UInt8>], + ) -> Result { + // Pad the input if it is not the current length. + let mut input_in_bits: Vec> = input + .iter() + .flat_map(|byte| byte.to_bits_le().unwrap()) + .collect(); + if (input_in_bits.len()) % CHUNK_SIZE != 0 { + let current_length = input_in_bits.len(); + for _ in 0..(CHUNK_SIZE - current_length % CHUNK_SIZE) { + input_in_bits.push(Boolean::constant(false)); + } + } + assert!(input_in_bits.len() % CHUNK_SIZE == 0); + assert_eq!(parameters.params.generators.len(), W::NUM_WINDOWS); + for generators in parameters.params.generators.iter() { + assert_eq!(generators.len(), W::WINDOW_SIZE); + } + + // Allocate new variable for the result. + let input_in_bits = input_in_bits + .chunks(W::WINDOW_SIZE * CHUNK_SIZE) + .map(|x| x.chunks(CHUNK_SIZE).collect::>()) + .collect::>(); + let result = AffineVar::precomputed_base_3_bit_signed_digit_scalar_mul( + ¶meters.params.generators, + &input_in_bits, + )?; + + Ok(result) + } +} + +impl AllocVar, ConstraintF

> for ParametersVar +where + P: TEModelParameters, + W: Window, +{ + #[tracing::instrument(target = "r1cs", skip(_cs, f))] + fn new_variable>>( + _cs: impl Into>>, + f: impl FnOnce() -> Result, + _mode: AllocationMode, + ) -> Result { + let params = f()?.borrow().clone(); + Ok(ParametersVar { + params, + _window: PhantomData, + }) + } +} + +#[cfg(test)] +mod test { + use ark_std::rand::Rng; + + use crate::crh::bowe_hopwood; + use crate::crh::pedersen; + use crate::{CRHGadget, CRH}; + use ark_ec::ProjectiveCurve; + use ark_ed_on_bls12_381::{constraints::FqVar, EdwardsParameters, Fq as Fr}; + use ark_r1cs_std::{alloc::AllocVar, uint8::UInt8, R1CSVar}; + use ark_relations::r1cs::{ConstraintSystem, ConstraintSystemRef}; + use ark_std::test_rng; + + type TestCRH = bowe_hopwood::CRH; + type TestCRHGadget = bowe_hopwood::constraints::CRHGadget; + + #[derive(Clone, PartialEq, Eq, Hash)] + pub(super) struct Window; + + impl pedersen::Window for Window { + const WINDOW_SIZE: usize = 63; + const NUM_WINDOWS: usize = 8; + } + + fn generate_input( + cs: ConstraintSystemRef, + rng: &mut R, + ) -> ([u8; 189], Vec>) { + let mut input = [1u8; 189]; + rng.fill_bytes(&mut input); + + let mut input_bytes = vec![]; + for byte in input.iter() { + input_bytes.push(UInt8::new_witness(cs.clone(), || Ok(byte)).unwrap()); + } + (input, input_bytes) + } + + #[test] + fn test_native_equality() { + let rng = &mut test_rng(); + let cs = ConstraintSystem::::new_ref(); + + let (input, input_var) = generate_input(cs.clone(), rng); + println!("number of constraints for input: {}", cs.num_constraints()); + + let parameters = TestCRH::setup(rng).unwrap(); + let primitive_result = TestCRH::evaluate(¶meters, &input).unwrap(); + + let parameters_var = >::ParametersVar::new_witness( + ark_relations::ns!(cs, "parameters_var"), + || Ok(¶meters), + ) + .unwrap(); + println!( + "number of constraints for input + params: {}", + cs.num_constraints() + ); + + let result_var = TestCRHGadget::evaluate(¶meters_var, &input_var).unwrap(); + + println!("number of constraints total: {}", cs.num_constraints()); + + let primitive_result = primitive_result.into_affine(); + assert_eq!(primitive_result, result_var.value().unwrap().into_affine()); + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/arkworks/crypto-primitives/src/crh/bowe_hopwood/mod.rs b/arkworks/crypto-primitives/src/crh/bowe_hopwood/mod.rs new file mode 100644 index 00000000..d798e972 --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/bowe_hopwood/mod.rs @@ -0,0 +1,198 @@ +use crate::{Error, Vec}; +use ark_std::rand::Rng; +use ark_std::{ + fmt::{Debug, Formatter, Result as FmtResult}, + marker::PhantomData, +}; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +use super::pedersen; +use crate::crh::CRH as CRHTrait; +use ark_ec::{ + twisted_edwards_extended::GroupProjective as TEProjective, ProjectiveCurve, TEModelParameters, +}; +use ark_ff::{biginteger::BigInteger, fields::PrimeField}; +use ark_std::cfg_chunks; +use ark_std::UniformRand; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +pub const CHUNK_SIZE: usize = 3; + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Default(bound = ""))] +pub struct Parameters { + pub generators: Vec>>, +} + +pub struct CRH { + group: PhantomData

, + window: PhantomData, +} + +impl CRH { + pub fn create_generators(rng: &mut R) -> Vec>> { + let mut generators = Vec::new(); + for _ in 0..W::NUM_WINDOWS { + let mut generators_for_segment = Vec::new(); + let mut base = TEProjective::rand(rng); + for _ in 0..W::WINDOW_SIZE { + generators_for_segment.push(base); + for _ in 0..4 { + base.double_in_place(); + } + } + generators.push(generators_for_segment); + } + generators + } +} + +impl CRHTrait for CRH { + const INPUT_SIZE_BITS: usize = pedersen::CRH::, W>::INPUT_SIZE_BITS; + type Output = TEProjective

; + type Parameters = Parameters

; + + fn setup(rng: &mut R) -> Result { + fn calculate_num_chunks_in_segment() -> usize { + let upper_limit = F::modulus_minus_one_div_two(); + let mut c = 0; + let mut range = F::BigInt::from(2_u64); + while range < upper_limit { + range.muln(4); + c += 1; + } + + c + } + + let maximum_num_chunks_in_segment = calculate_num_chunks_in_segment::(); + if W::WINDOW_SIZE > maximum_num_chunks_in_segment { + panic!( + "Bowe-Hopwood-PedersenCRH hash must have a window size resulting in scalars < (p-1)/2, \ + maximum segment size is {}", + maximum_num_chunks_in_segment + ); + } + + let time = start_timer!(|| format!( + "Bowe-Hopwood-PedersenCRH::Setup: {} segments of {} 3-bit chunks; {{0,1}}^{{{}}} -> P", + W::NUM_WINDOWS, + W::WINDOW_SIZE, + W::WINDOW_SIZE * W::NUM_WINDOWS * CHUNK_SIZE + )); + let generators = Self::create_generators(rng); + end_timer!(time); + Ok(Self::Parameters { generators }) + } + + fn evaluate(parameters: &Self::Parameters, input: &[u8]) -> Result { + let eval_time = start_timer!(|| "BoweHopwoodPedersenCRH::Eval"); + + if (input.len() * 8) > W::WINDOW_SIZE * W::NUM_WINDOWS * CHUNK_SIZE { + panic!( + "incorrect input length {:?} for window params {:?}x{:?}x{}", + input.len(), + W::WINDOW_SIZE, + W::NUM_WINDOWS, + CHUNK_SIZE, + ); + } + + let mut padded_input = Vec::with_capacity(input.len()); + let input = pedersen::bytes_to_bits(input); + // Pad the input if it is not the current length. + padded_input.extend_from_slice(&input); + if input.len() % CHUNK_SIZE != 0 { + let remaining = CHUNK_SIZE - input.len() % CHUNK_SIZE; + padded_input.extend_from_slice(&vec![false; remaining]); + } + + assert_eq!(padded_input.len() % CHUNK_SIZE, 0); + + assert_eq!( + parameters.generators.len(), + W::NUM_WINDOWS, + "Incorrect pp of size {:?} for window params {:?}x{:?}x{}", + parameters.generators.len(), + W::WINDOW_SIZE, + W::NUM_WINDOWS, + CHUNK_SIZE, + ); + for generators in parameters.generators.iter() { + assert_eq!(generators.len(), W::WINDOW_SIZE); + } + assert_eq!(CHUNK_SIZE, 3); + + // Compute sum of h_i^{sum of + // (1-2*c_{i,j,2})*(1+c_{i,j,0}+2*c_{i,j,1})*2^{4*(j-1)} for all j in segment} + // for all i. Described in section 5.4.1.7 in the Zcash protocol + // specification. + + let result = cfg_chunks!(padded_input, W::WINDOW_SIZE * CHUNK_SIZE) + .zip(¶meters.generators) + .map(|(segment_bits, segment_generators)| { + cfg_chunks!(segment_bits, CHUNK_SIZE) + .zip(segment_generators) + .map(|(chunk_bits, generator)| { + let mut encoded = *generator; + if chunk_bits[0] { + encoded += generator; + } + if chunk_bits[1] { + encoded += &generator.double(); + } + if chunk_bits[2] { + encoded = -encoded; + } + encoded + }) + .sum::>() + }) + .sum::>(); + + end_timer!(eval_time); + + Ok(result) + } +} + +impl Debug for Parameters

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + writeln!(f, "Bowe-Hopwood-Pedersen Hash Parameters {{")?; + for (i, g) in self.generators.iter().enumerate() { + writeln!(f, "\t Generator {}: {:?}", i, g)?; + } + writeln!(f, "}}") + } +} + +#[cfg(test)] +mod test { + use crate::{ + crh::{bowe_hopwood, pedersen::Window}, + CRH, + }; + use ark_ed_on_bls12_381::EdwardsParameters; + use ark_std::test_rng; + + #[test] + fn test_simple_bh() { + #[derive(Clone)] + struct TestWindow {} + impl Window for TestWindow { + const WINDOW_SIZE: usize = 63; + const NUM_WINDOWS: usize = 8; + } + + let rng = &mut test_rng(); + let params = as CRH>::setup(rng).unwrap(); + let _ = as CRH>::evaluate( + ¶ms, + &[1, 2, 3], + ) + .unwrap(); + } +} diff --git a/arkworks/crypto-primitives/src/crh/constraints.rs b/arkworks/crypto-primitives/src/crh/constraints.rs new file mode 100644 index 00000000..dddcd621 --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/constraints.rs @@ -0,0 +1,44 @@ +use ark_ff::Field; +use core::fmt::Debug; + +use crate::crh::{TwoToOneCRH, CRH}; +use ark_relations::r1cs::SynthesisError; + +use ark_r1cs_std::prelude::*; + +pub trait CRHGadget: Sized { + type OutputVar: EqGadget + + ToBytesGadget + + CondSelectGadget + + AllocVar + + R1CSVar + + Debug + + Clone + + Sized; + + type ParametersVar: AllocVar + Clone; + + fn evaluate( + parameters: &Self::ParametersVar, + input: &[UInt8], + ) -> Result; +} + +pub trait TwoToOneCRHGadget: Sized { + type OutputVar: EqGadget + + ToBytesGadget + + CondSelectGadget + + AllocVar + + R1CSVar + + Debug + + Clone + + Sized; + + type ParametersVar: AllocVar + Clone; + + fn evaluate( + parameters: &Self::ParametersVar, + left_input: &[UInt8], + right_input: &[UInt8], + ) -> Result; +} diff --git a/arkworks/crypto-primitives/src/crh/injective_map/constraints.rs b/arkworks/crypto-primitives/src/crh/injective_map/constraints.rs new file mode 100644 index 00000000..f80d49fe --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/injective_map/constraints.rs @@ -0,0 +1,123 @@ +use core::{fmt::Debug, marker::PhantomData}; + +use crate::crh::{ + constraints, + injective_map::{InjectiveMap, PedersenCRHCompressor, TECompressor}, + pedersen::{constraints as ped_constraints, Window}, +}; + +use ark_ec::{ + models::{ModelParameters, TEModelParameters}, + twisted_edwards_extended::GroupProjective as TEProjective, + ProjectiveCurve, +}; +use ark_ff::fields::{Field, PrimeField, SquareRootField}; +use ark_r1cs_std::{ + fields::fp::FpVar, + groups::{curves::twisted_edwards::AffineVar as TEVar, CurveVar}, + prelude::*, +}; +use ark_relations::r1cs::SynthesisError; + +type ConstraintF = <::BaseField as Field>::BasePrimeField; + +pub trait InjectiveMapGadget< + C: ProjectiveCurve, + I: InjectiveMap, + GG: CurveVar>, +> where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + type OutputVar: EqGadget> + + ToBytesGadget> + + CondSelectGadget> + + AllocVar> + + R1CSVar, Value = I::Output> + + Debug + + Clone + + Sized; + + fn evaluate(ge: &GG) -> Result; +} + +pub struct TECompressorGadget; + +impl InjectiveMapGadget, TECompressor, TEVar>> + for TECompressorGadget +where + F: PrimeField + SquareRootField, + P: TEModelParameters + ModelParameters, +{ + type OutputVar = FpVar; + + fn evaluate(ge: &TEVar>) -> Result { + Ok(ge.x.clone()) + } +} + +pub struct PedersenCRHCompressorGadget +where + C: ProjectiveCurve, + I: InjectiveMap, + W: Window, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, + IG: InjectiveMapGadget, +{ + #[doc(hidden)] + _compressor: PhantomData, + #[doc(hidden)] + _compressor_gadget: PhantomData, + #[doc(hidden)] + _crh: ped_constraints::CRHGadget, +} + +impl constraints::CRHGadget, ConstraintF> + for PedersenCRHCompressorGadget +where + C: ProjectiveCurve, + I: InjectiveMap, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, + IG: InjectiveMapGadget, + W: Window, +{ + type OutputVar = IG::OutputVar; + type ParametersVar = ped_constraints::CRHParametersVar; + + #[tracing::instrument(target = "r1cs", skip(parameters, input))] + fn evaluate( + parameters: &Self::ParametersVar, + input: &[UInt8>], + ) -> Result { + let result = ped_constraints::CRHGadget::::evaluate(parameters, input)?; + IG::evaluate(&result) + } +} + +impl constraints::TwoToOneCRHGadget, ConstraintF> + for PedersenCRHCompressorGadget +where + C: ProjectiveCurve, + I: InjectiveMap, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, + IG: InjectiveMapGadget, + W: Window, +{ + type OutputVar = IG::OutputVar; + type ParametersVar = ped_constraints::CRHParametersVar; + + #[tracing::instrument(target = "r1cs", skip(parameters))] + fn evaluate( + parameters: &Self::ParametersVar, + left_input: &[UInt8>], + right_input: &[UInt8>], + ) -> Result { + // assume equality of left and right length + assert_eq!(left_input.len(), right_input.len()); + let result = + ped_constraints::CRHGadget::::evaluate(parameters, left_input, right_input)?; + IG::evaluate(&result) + } +} diff --git a/arkworks/crypto-primitives/src/crh/injective_map/mod.rs b/arkworks/crypto-primitives/src/crh/injective_map/mod.rs new file mode 100644 index 00000000..13bbe774 --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/injective_map/mod.rs @@ -0,0 +1,98 @@ +use crate::{CryptoError, Error}; +use ark_ff::bytes::ToBytes; +use ark_std::rand::Rng; +use ark_std::{fmt::Debug, hash::Hash, marker::PhantomData}; + +use super::{pedersen, TwoToOneCRH, CRH}; +use ark_ec::{ + models::{ModelParameters, TEModelParameters}, + twisted_edwards_extended::{GroupAffine as TEAffine, GroupProjective as TEProjective}, + ProjectiveCurve, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +pub trait InjectiveMap { + type Output: ToBytes + + Clone + + Eq + + Hash + + Default + + Debug + + CanonicalSerialize + + CanonicalDeserialize; + + fn injective_map(ge: &C::Affine) -> Result; +} + +pub struct TECompressor; + +impl InjectiveMap> for TECompressor { + type Output =

::BaseField; + + fn injective_map(ge: &TEAffine

) -> Result { + debug_assert!(ge.is_in_correct_subgroup_assuming_on_curve()); + Ok(ge.x) + } +} + +pub struct PedersenCRHCompressor, W: pedersen::Window> { + _group: PhantomData, + _compressor: PhantomData, + _crh: pedersen::CRH, +} + +impl, W: pedersen::Window> CRH + for PedersenCRHCompressor +{ + const INPUT_SIZE_BITS: usize = pedersen::CRH::::INPUT_SIZE_BITS; + type Output = I::Output; + type Parameters = pedersen::Parameters; + + fn setup(rng: &mut R) -> Result { + let time = start_timer!(|| format!("PedersenCRHCompressor::Setup")); + let params = as CRH>::setup(rng); + end_timer!(time); + params + } + + fn evaluate(parameters: &Self::Parameters, input: &[u8]) -> Result { + let eval_time = start_timer!(|| "PedersenCRHCompressor::Eval"); + let result = I::injective_map(& as CRH>::evaluate(parameters, input)?)?; + end_timer!(eval_time); + Ok(result) + } +} + +impl, W: pedersen::Window> TwoToOneCRH + for PedersenCRHCompressor +{ + const LEFT_INPUT_SIZE_BITS: usize = pedersen::CRH::::LEFT_INPUT_SIZE_BITS; + const RIGHT_INPUT_SIZE_BITS: usize = pedersen::CRH::::RIGHT_INPUT_SIZE_BITS; + type Output = I::Output; + type Parameters = pedersen::Parameters; + + fn setup(r: &mut R) -> Result { + as TwoToOneCRH>::setup(r) + } + + /// A simple implementation method: just concat the left input and right input together + /// + /// `evaluate` requires that `left_input` and `right_input` are of equal length. + fn evaluate( + parameters: &Self::Parameters, + left_input: &[u8], + right_input: &[u8], + ) -> Result { + let eval_time = start_timer!(|| "PedersenCRHCompressor::Eval"); + let result = I::injective_map(& as TwoToOneCRH>::evaluate( + parameters, + left_input, + right_input, + )?)?; + end_timer!(eval_time); + Ok(result) + } +} diff --git a/arkworks/crypto-primitives/src/crh/mod.rs b/arkworks/crypto-primitives/src/crh/mod.rs new file mode 100644 index 00000000..41196d1d --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/mod.rs @@ -0,0 +1,67 @@ +#![allow(clippy::upper_case_acronyms)] + +use ark_ff::bytes::ToBytes; +use ark_std::hash::Hash; +use ark_std::rand::Rng; + +pub mod bowe_hopwood; +pub mod injective_map; +pub mod pedersen; +pub mod poseidon; + +use crate::Error; + +#[cfg(feature = "r1cs")] +pub mod constraints; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +#[cfg(feature = "r1cs")] +pub use constraints::*; + +/// Interface to CRH. Note that in this release, while all implementations of `CRH` have fixed length, +/// variable length CRH may also implement this trait in future. +pub trait CRH { + const INPUT_SIZE_BITS: usize; + + type Output: ToBytes + + Clone + + Eq + + core::fmt::Debug + + Hash + + Default + + CanonicalSerialize + + CanonicalDeserialize; + type Parameters: Clone + Default; + + fn setup(r: &mut R) -> Result; + fn evaluate(parameters: &Self::Parameters, input: &[u8]) -> Result; +} + +pub trait TwoToOneCRH { + /// The bit size of the left input. + const LEFT_INPUT_SIZE_BITS: usize; + /// The bit size of the right input. + const RIGHT_INPUT_SIZE_BITS: usize; + + type Output: ToBytes + + Clone + + Eq + + core::fmt::Debug + + Hash + + Default + + CanonicalSerialize + + CanonicalDeserialize; + type Parameters: Clone + Default; + + fn setup(r: &mut R) -> Result; + /// Evaluates this CRH on the left and right inputs. + /// + /// # Panics + /// + /// If `left_input.len() != Self::LEFT_INPUT_SIZE_BITS`, or if + /// `right_input.len() != Self::RIGHT_INPUT_SIZE_BITS`, then this method panics. + fn evaluate( + parameters: &Self::Parameters, + left_input: &[u8], + right_input: &[u8], + ) -> Result; +} diff --git a/arkworks/crypto-primitives/src/crh/pedersen/constraints.rs b/arkworks/crypto-primitives/src/crh/pedersen/constraints.rs new file mode 100644 index 00000000..aec9987a --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/pedersen/constraints.rs @@ -0,0 +1,212 @@ +use crate::{ + crh::{ + pedersen::{Parameters, Window, CRH}, + CRHGadget as CRHGadgetTrait, + }, + Vec, +}; +use ark_ec::ProjectiveCurve; +use ark_ff::Field; +use ark_r1cs_std::prelude::*; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use crate::crh::TwoToOneCRHGadget; +use core::{borrow::Borrow, marker::PhantomData}; + +#[derive(Derivative)] +#[derivative(Clone(bound = "C: ProjectiveCurve, GG: CurveVar>"))] +pub struct CRHParametersVar>> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + params: Parameters, + #[doc(hidden)] + _group_g: PhantomData, +} + +type ConstraintF = <::BaseField as Field>::BasePrimeField; +pub struct CRHGadget>, W: Window> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + #[doc(hidden)] + _group: PhantomData<*const C>, + #[doc(hidden)] + _group_var: PhantomData<*const GG>, + #[doc(hidden)] + _window: PhantomData<*const W>, +} + +impl CRHGadgetTrait, ConstraintF> for CRHGadget +where + C: ProjectiveCurve, + GG: CurveVar>, + W: Window, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + type OutputVar = GG; + type ParametersVar = CRHParametersVar; + + #[tracing::instrument(target = "r1cs", skip(parameters, input))] + fn evaluate( + parameters: &Self::ParametersVar, + input: &[UInt8>], + ) -> Result { + let mut padded_input = input.to_vec(); + // Pad the input if it is not the current length. + if input.len() * 8 < W::WINDOW_SIZE * W::NUM_WINDOWS { + let current_length = input.len(); + for _ in current_length..(W::WINDOW_SIZE * W::NUM_WINDOWS / 8) { + padded_input.push(UInt8::constant(0u8)); + } + } + assert_eq!(padded_input.len() * 8, W::WINDOW_SIZE * W::NUM_WINDOWS); + assert_eq!(parameters.params.generators.len(), W::NUM_WINDOWS); + + // Allocate new variable for the result. + let input_in_bits: Vec> = padded_input + .iter() + .flat_map(|b| b.to_bits_le().unwrap()) + .collect(); + let input_in_bits = input_in_bits.chunks(W::WINDOW_SIZE); + let result = + GG::precomputed_base_multiscalar_mul_le(¶meters.params.generators, input_in_bits)?; + Ok(result) + } +} + +impl TwoToOneCRHGadget, ConstraintF> for CRHGadget +where + C: ProjectiveCurve, + GG: CurveVar>, + W: Window, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + type OutputVar = GG; + type ParametersVar = CRHParametersVar; + + #[tracing::instrument(target = "r1cs", skip(parameters))] + fn evaluate( + parameters: &Self::ParametersVar, + left_input: &[UInt8>], + right_input: &[UInt8>], + ) -> Result { + // assume equality of left and right length + assert_eq!(left_input.len(), right_input.len()); + let chained_input: Vec<_> = left_input + .to_vec() + .into_iter() + .chain(right_input.to_vec().into_iter()) + .collect(); + >::evaluate(parameters, &chained_input) + } +} + +impl AllocVar, ConstraintF> for CRHParametersVar +where + C: ProjectiveCurve, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + #[tracing::instrument(target = "r1cs", skip(_cs, f))] + fn new_variable>>( + _cs: impl Into>>, + f: impl FnOnce() -> Result, + _mode: AllocationMode, + ) -> Result { + let params = f()?.borrow().clone(); + Ok(CRHParametersVar { + params, + _group_g: PhantomData, + }) + } +} + +#[cfg(test)] +mod test { + use crate::crh::{pedersen, CRHGadget, TwoToOneCRH, TwoToOneCRHGadget, CRH}; + use ark_ed_on_bls12_381::{constraints::EdwardsVar, EdwardsProjective as JubJub, Fq as Fr}; + use ark_r1cs_std::prelude::*; + use ark_relations::r1cs::{ConstraintSystem, ConstraintSystemRef}; + use ark_std::rand::Rng; + use ark_std::test_rng; + + type TestCRH = pedersen::CRH; + type TestCRHGadget = pedersen::constraints::CRHGadget; + + #[derive(Clone, PartialEq, Eq, Hash)] + pub(super) struct Window; + + impl pedersen::Window for Window { + const WINDOW_SIZE: usize = 128; + const NUM_WINDOWS: usize = 8; + } + + fn generate_u8_input( + cs: ConstraintSystemRef, + size: usize, + rng: &mut R, + ) -> (Vec, Vec>) { + let mut input = vec![1u8; size]; + rng.fill_bytes(&mut input); + + let mut input_bytes = vec![]; + for byte in input.iter() { + input_bytes.push(UInt8::new_witness(cs.clone(), || Ok(byte)).unwrap()); + } + (input, input_bytes) + } + + #[test] + fn test_native_equality() { + let rng = &mut test_rng(); + let cs = ConstraintSystem::::new_ref(); + + let (input, input_var) = generate_u8_input(cs.clone(), 128, rng); + + let parameters = ::setup(rng).unwrap(); + let primitive_result = ::evaluate(¶meters, &input).unwrap(); + + let parameters_var = pedersen::constraints::CRHParametersVar::new_constant( + ark_relations::ns!(cs, "CRH Parameters"), + ¶meters, + ) + .unwrap(); + + let result_var = + >::evaluate(¶meters_var, &input_var).unwrap(); + + let primitive_result = primitive_result; + assert_eq!(primitive_result, result_var.value().unwrap()); + assert!(cs.is_satisfied().unwrap()); + } + + #[test] + fn test_naive_two_to_one_equality() { + let rng = &mut test_rng(); + let cs = ConstraintSystem::::new_ref(); + + let (left_input, left_input_var) = generate_u8_input(cs.clone(), 64, rng); + let (right_input, right_input_var) = generate_u8_input(cs.clone(), 64, rng); + let parameters = ::setup(rng).unwrap(); + let primitive_result = + ::evaluate(¶meters, &left_input, &right_input).unwrap(); + + let parameters_var = pedersen::constraints::CRHParametersVar::new_constant( + ark_relations::ns!(cs, "CRH Parameters"), + ¶meters, + ) + .unwrap(); + + let result_var = >::evaluate( + ¶meters_var, + &left_input_var, + &right_input_var, + ) + .unwrap(); + + let primitive_result = primitive_result; + assert_eq!(primitive_result, result_var.value().unwrap()); + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/arkworks/crypto-primitives/src/crh/pedersen/mod.rs b/arkworks/crypto-primitives/src/crh/pedersen/mod.rs new file mode 100644 index 00000000..a5bb201d --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/pedersen/mod.rs @@ -0,0 +1,189 @@ +use crate::{Error, Vec}; +use ark_std::rand::Rng; +use ark_std::{ + fmt::{Debug, Formatter, Result as FmtResult}, + marker::PhantomData, +}; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +use crate::crh::{TwoToOneCRH, CRH as CRHTrait}; +use ark_ec::ProjectiveCurve; +use ark_ff::{Field, ToConstraintField}; +use ark_std::cfg_chunks; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +pub trait Window: Clone { + const WINDOW_SIZE: usize; + const NUM_WINDOWS: usize; +} + +#[derive(Clone, Default)] +pub struct Parameters { + pub generators: Vec>, +} + +pub struct CRH { + group: PhantomData, + window: PhantomData, +} + +impl CRH { + pub fn create_generators(rng: &mut R) -> Vec> { + let mut generators_powers = Vec::new(); + for _ in 0..W::NUM_WINDOWS { + generators_powers.push(Self::generator_powers(W::WINDOW_SIZE, rng)); + } + generators_powers + } + + pub fn generator_powers(num_powers: usize, rng: &mut R) -> Vec { + let mut cur_gen_powers = Vec::with_capacity(num_powers); + let mut base = C::rand(rng); + for _ in 0..num_powers { + cur_gen_powers.push(base); + base.double_in_place(); + } + cur_gen_powers + } +} + +impl CRHTrait for CRH { + const INPUT_SIZE_BITS: usize = W::WINDOW_SIZE * W::NUM_WINDOWS; + type Output = C::Affine; + type Parameters = Parameters; + + fn setup(rng: &mut R) -> Result { + let time = start_timer!(|| format!( + "PedersenCRH::Setup: {} {}-bit windows; {{0,1}}^{{{}}} -> C", + W::NUM_WINDOWS, + W::WINDOW_SIZE, + W::NUM_WINDOWS * W::WINDOW_SIZE + )); + let generators = Self::create_generators(rng); + end_timer!(time); + Ok(Self::Parameters { generators }) + } + + fn evaluate(parameters: &Self::Parameters, input: &[u8]) -> Result { + let eval_time = start_timer!(|| "PedersenCRH::Eval"); + + if (input.len() * 8) > W::WINDOW_SIZE * W::NUM_WINDOWS { + panic!( + "incorrect input length {:?} for window params {:?}✕{:?}", + input.len(), + W::WINDOW_SIZE, + W::NUM_WINDOWS + ); + } + + let mut padded_input = Vec::with_capacity(input.len()); + let mut input = input; + // Pad the input if it is not the current length. + if (input.len() * 8) < W::WINDOW_SIZE * W::NUM_WINDOWS { + padded_input.extend_from_slice(input); + let padded_length = (W::WINDOW_SIZE * W::NUM_WINDOWS) / 8; + padded_input.resize(padded_length, 0u8); + input = padded_input.as_slice(); + } + + assert_eq!( + parameters.generators.len(), + W::NUM_WINDOWS, + "Incorrect pp of size {:?}✕{:?} for window params {:?}✕{:?}", + parameters.generators[0].len(), + parameters.generators.len(), + W::WINDOW_SIZE, + W::NUM_WINDOWS + ); + + // Compute sum of h_i^{m_i} for all i. + let bits = bytes_to_bits(input); + let result = cfg_chunks!(bits, W::WINDOW_SIZE) + .zip(¶meters.generators) + .map(|(bits, generator_powers)| { + let mut encoded = C::zero(); + for (bit, base) in bits.iter().zip(generator_powers.iter()) { + if *bit { + encoded += base; + } + } + encoded + }) + .sum::(); + + end_timer!(eval_time); + + Ok(result.into()) + } +} + +impl TwoToOneCRH for CRH { + const LEFT_INPUT_SIZE_BITS: usize = W::WINDOW_SIZE * W::NUM_WINDOWS / 2; + const RIGHT_INPUT_SIZE_BITS: usize = Self::LEFT_INPUT_SIZE_BITS; + type Output = C::Affine; + type Parameters = Parameters; + + fn setup(r: &mut R) -> Result { + ::setup(r) + } + + /// A simple implementation method: just concat the left input and right input together + /// + /// `evaluate` requires that `left_input` and `right_input` are of equal length. + fn evaluate( + parameters: &Self::Parameters, + left_input: &[u8], + right_input: &[u8], + ) -> Result { + assert_eq!( + left_input.len(), + right_input.len(), + "left and right input should be of equal length" + ); + // check overflow + + debug_assert!(left_input.len() * 8 <= Self::LEFT_INPUT_SIZE_BITS); + + let mut buffer = vec![0u8; (Self::LEFT_INPUT_SIZE_BITS + Self::RIGHT_INPUT_SIZE_BITS) / 8]; + + buffer + .iter_mut() + .zip(left_input.iter().chain(right_input.iter())) + .for_each(|(b, l_b)| *b = *l_b); + + ::evaluate(parameters, &buffer) + } +} + +pub fn bytes_to_bits(bytes: &[u8]) -> Vec { + let mut bits = Vec::with_capacity(bytes.len() * 8); + for byte in bytes { + for i in 0..8 { + let bit = (*byte >> i) & 1; + bits.push(bit == 1) + } + } + bits +} + +impl Debug for Parameters { + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + writeln!(f, "Pedersen Hash Parameters {{")?; + for (i, g) in self.generators.iter().enumerate() { + writeln!(f, "\t Generator {}: {:?}", i, g)?; + } + writeln!(f, "}}") + } +} + +impl> + ToConstraintField for Parameters +{ + #[inline] + fn to_field_elements(&self) -> Option> { + Some(Vec::new()) + } +} diff --git a/arkworks/crypto-primitives/src/crh/poseidon/constraints.rs b/arkworks/crypto-primitives/src/crh/poseidon/constraints.rs new file mode 100644 index 00000000..f051a5ed --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/poseidon/constraints.rs @@ -0,0 +1,298 @@ +use super::sbox::constraints::SboxConstraints; +use super::PoseidonRoundParams; +use super::{Poseidon, CRH}; +use crate::CRHGadget as CRHGadgetTrait; +use ark_ff::PrimeField; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::uint8::UInt8; +use ark_r1cs_std::ToConstraintFieldGadget; +use ark_r1cs_std::{alloc::AllocVar, fields::FieldVar, prelude::*}; +use ark_relations::r1cs::{Namespace, SynthesisError}; +use ark_std::vec::Vec; + +use crate::crh::TwoToOneCRHGadget; +use ark_std::borrow::ToOwned; +use ark_std::marker::PhantomData; +use core::borrow::Borrow; + +#[derive(Derivative, Clone)] +pub struct PoseidonRoundParamsVar> { + params: Poseidon, +} + +pub struct CRHGadget> { + field: PhantomData, + params: PhantomData>, +} + +impl> PoseidonRoundParamsVar { + fn permute(&self, input: Vec>) -> Result>, SynthesisError> { + let width = P::WIDTH; + assert_eq!(input.len(), width); + + let full_rounds_beginning = P::FULL_ROUNDS_BEGINNING; + let partial_rounds = P::PARTIAL_ROUNDS; + let full_rounds_end = P::FULL_ROUNDS_END; + + let mut input_vars: Vec> = input; + + let mut round_keys_offset = 0; + + // ------------ First rounds with full SBox begin -------------------- + + for _k in 0..full_rounds_beginning { + // TODO: Check if Scalar::default() can be replaced by FpVar::one() or FpVar::zero() + let mut sbox_outputs: Vec> = vec![FpVar::::one(); width]; + + // Substitution (S-box) layer + for i in 0..width { + let round_key = self.params.round_keys[round_keys_offset]; + sbox_outputs[i] = P::SBOX + .synthesize_sbox(input_vars[i].clone(), round_key)? + .into(); + + round_keys_offset += 1; + } + + // TODO: Check if Scalar::default() can be replaced by FpVar::one() + let mut next_input_vars: Vec> = vec![FpVar::::one(); width]; + + self.apply_linear_layer( + width, + sbox_outputs, + &mut next_input_vars, + &self.params.mds_matrix, + ); + + for i in 0..width { + // replace input_vars with next_input_vars + input_vars[i] = next_input_vars.remove(0); + } + } + + // ------------ First rounds with full SBox begin -------------------- + + // ------------ Middle rounds with partial SBox begin -------------------- + + for _k in full_rounds_beginning..(full_rounds_beginning + partial_rounds) { + let mut sbox_outputs: Vec> = vec![FpVar::::one(); width]; + + // Substitution (S-box) layer + for i in 0..width { + let round_key = self.params.round_keys[round_keys_offset]; + + // apply Sbox to only 1 element of the state. + // Here the last one is chosen but the choice is arbitrary. + if i == width - 1 { + sbox_outputs[i] = P::SBOX + .synthesize_sbox(input_vars[i].clone(), round_key)? + .into(); + } else { + sbox_outputs[i] = input_vars[i].clone() + round_key; + } + + round_keys_offset += 1; + } + + // Linear layer + // TODO: Check if Scalar::default() can be replaced by FpVar::one() + let mut next_input_vars: Vec> = vec![FpVar::::one(); width]; + + self.apply_linear_layer( + width, + sbox_outputs, + &mut next_input_vars, + &self.params.mds_matrix, + ); + + for i in 0..width { + // replace input_vars with simplified next_input_vars + input_vars[i] = next_input_vars.remove(0); + } + } + + // ------------ Middle rounds with partial SBox end -------------------- + + // ------------ Last rounds with full SBox begin -------------------- + + for _k in (full_rounds_beginning + partial_rounds) + ..(full_rounds_beginning + partial_rounds + full_rounds_end) + { + // TODO: Check if Scalar::default() can be replaced by FpVar::one() + let mut sbox_outputs: Vec> = vec![FpVar::::one(); width]; + + // Substitution (S-box) layer + for i in 0..width { + let round_key = self.params.round_keys[round_keys_offset]; + sbox_outputs[i] = P::SBOX + .synthesize_sbox(input_vars[i].clone(), round_key)? + .into(); + + round_keys_offset += 1; + } + + // Linear layer + // TODO: Check if Scalar::default() can be replaced by FpVar::one() + let mut next_input_vars: Vec> = vec![FpVar::::one(); width]; + + self.apply_linear_layer( + width, + sbox_outputs, + &mut next_input_vars, + &self.params.mds_matrix, + ); + + for i in 0..width { + // replace input_vars with next_input_vars + input_vars[i] = next_input_vars.remove(0); + } + } + + // ------------ Last rounds with full SBox end -------------------- + + Ok(input_vars) + } + + fn apply_linear_layer( + &self, + width: usize, + sbox_outs: Vec>, + next_inputs: &mut Vec>, + mds_matrix: &Vec>, + ) { + for j in 0..width { + for i in 0..width { + next_inputs[i] = next_inputs[i].clone() + + sbox_outs[j].clone() * &FpVar::::Constant(mds_matrix[i][j]); + } + } + } + + fn hash_2( + &self, + xl: FpVar, + xr: FpVar, + statics: Vec>, + ) -> Result, SynthesisError> { + let width = P::WIDTH; + // Only 2 inputs to the permutation are set to the input of this hash + // function. + assert_eq!(statics.len(), width - 2); + + // Always keep the 1st input as 0 + let mut inputs = vec![statics[0].to_owned()]; + inputs.push(xl); + inputs.push(xr); + + // statics correspond to committed variables with values as PADDING_CONST + // and 0s and randomness as 0 + for i in 1..statics.len() { + inputs.push(statics[i].to_owned()); + } + let permutation_output = self.permute(inputs)?; + Ok(permutation_output[1].clone()) + } + + fn hash_4( + &self, + input: &[FpVar], + statics: Vec>, + ) -> Result, SynthesisError> { + assert_eq!(input.len(), 4); + let width = P::WIDTH; + // Only 4 inputs to the permutation are set to the input of this hash + // function. + assert_eq!(statics.len(), width - 4); + // Always keep the 1st input as 0 + let mut inputs = vec![statics[0].to_owned()]; + inputs.push(input[0].clone()); + inputs.push(input[1].clone()); + inputs.push(input[2].clone()); + inputs.push(input[3].clone()); + + // statics correspond to committed variables with values as PADDING_CONST + // and 0s and randomness as 0 + for i in 1..statics.len() { + inputs.push(statics[i].to_owned()); + } + + let permutation_output = self.permute(inputs)?; + Ok(permutation_output[1].to_owned()) + } +} + +// https://github.com/arkworks-rs/r1cs-std/blob/master/src/bits/uint8.rs#L343 +impl> CRHGadgetTrait, F> for CRHGadget { + type OutputVar = FpVar; + type ParametersVar = PoseidonRoundParamsVar; + + fn evaluate( + parameters: &Self::ParametersVar, + input: &[UInt8], + ) -> Result { + let f_var_vec: Vec> = input.to_constraint_field()?; + + // Choice is arbitrary + let padding_const: F = F::from(101u32); + let zero_const: F = F::zero(); + + let statics = match f_var_vec.len() { + 2 => { + vec![ + FpVar::::Constant(zero_const), + FpVar::::Constant(padding_const), + FpVar::::Constant(zero_const), + FpVar::::Constant(zero_const), + ] + } + 4 => { + vec![ + FpVar::::Constant(zero_const), + FpVar::::Constant(padding_const), + ] + } + _ => panic!("incorrect number (elements) for poseidon hash"), + }; + + let result = match f_var_vec.len() { + 2 => parameters.hash_2(f_var_vec[0].clone(), f_var_vec[1].clone(), statics), + 4 => parameters.hash_4(&f_var_vec, statics), + _ => panic!("incorrect number (elements) for poseidon hash"), + }; + Ok(result.unwrap_or(Self::OutputVar::zero())) + } +} + +impl> TwoToOneCRHGadget, F> for CRHGadget { + type OutputVar = FpVar; + type ParametersVar = PoseidonRoundParamsVar; + + fn evaluate( + parameters: &Self::ParametersVar, + left_input: &[UInt8], + right_input: &[UInt8], + ) -> Result { + // assume equality of left and right length + assert_eq!(left_input.len(), right_input.len()); + let chained_input: Vec<_> = left_input + .to_vec() + .into_iter() + .chain(right_input.to_vec().into_iter()) + .collect(); + >::evaluate(parameters, &chained_input) + } +} + +impl> AllocVar, F> + for PoseidonRoundParamsVar +{ + #[tracing::instrument(target = "r1cs", skip(_cs, f))] + fn new_variable>>( + _cs: impl Into>, + f: impl FnOnce() -> Result, + _mode: AllocationMode, + ) -> Result { + let params = f()?.borrow().clone(); + Ok(Self { params }) + } +} diff --git a/arkworks/crypto-primitives/src/crh/poseidon/mod.rs b/arkworks/crypto-primitives/src/crh/poseidon/mod.rs new file mode 100644 index 00000000..d118700c --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/poseidon/mod.rs @@ -0,0 +1,226 @@ +use crate::crh::poseidon::sbox::PoseidonSbox; +use crate::{Error, Vec, CRH as CRHTrait}; +use ark_std::marker::PhantomData; +use ark_std::rand::Rng; + +use crate::crh::TwoToOneCRH; +use ark_ff::fields::PrimeField; +use ark_ff::ToConstraintField; + +pub mod sbox; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +// Choice is arbitrary +pub const PADDING_CONST: u64 = 101; +pub const ZERO_CONST: u64 = 0; + +pub trait PoseidonRoundParams: Default + Clone { + /// The size of the permutation, in field elements. + const WIDTH: usize; + /// Number of full SBox rounds in beginning + const FULL_ROUNDS_BEGINNING: usize; + /// Number of full SBox rounds in end + const FULL_ROUNDS_END: usize; + /// Number of partial rounds + const PARTIAL_ROUNDS: usize; + /// The S-box to apply in the sub words layer. + const SBOX: PoseidonSbox; +} + +/// The Poseidon permutation. +#[derive(Default, Clone)] +pub struct Poseidon { + pub params: P, + /// The round key constants + pub round_keys: Vec, + /// The MDS matrix to apply in the mix layer. + pub mds_matrix: Vec>, +} + +impl> Poseidon { + fn permute(&self, input: &[F]) -> Vec { + let width = P::WIDTH; + assert_eq!(input.len(), width); + + let full_rounds_beginning = P::FULL_ROUNDS_BEGINNING; + let partial_rounds = P::PARTIAL_ROUNDS; + let full_rounds_end = P::FULL_ROUNDS_END; + + let mut current_state = input.to_vec(); + let mut current_state_temp = vec![F::zero(); width]; + + let mut round_keys_offset = 0; + + // full Sbox rounds + for _ in 0..full_rounds_beginning { + // Sbox layer + for i in 0..width { + current_state[i] += self.round_keys[round_keys_offset]; + current_state[i] = P::SBOX.apply_sbox(current_state[i]); + round_keys_offset += 1; + } + + // linear layer + for j in 0..width { + for i in 0..width { + current_state_temp[i] += current_state[j] * self.mds_matrix[i][j]; + } + } + + // Output of this round becomes input to next round + for i in 0..width { + current_state[i] = current_state_temp[i]; + current_state_temp[i] = F::zero(); + } + } + + // middle partial Sbox rounds + for _ in full_rounds_beginning..(full_rounds_beginning + partial_rounds) { + for i in 0..width { + current_state[i] += &self.round_keys[round_keys_offset]; + round_keys_offset += 1; + } + + // partial Sbox layer, apply Sbox to only 1 element of the state. + // Here the last one is chosen but the choice is arbitrary. + current_state[width - 1] = P::SBOX.apply_sbox(current_state[width - 1]); + + // linear layer + for j in 0..width { + for i in 0..width { + current_state_temp[i] += current_state[j] * self.mds_matrix[i][j]; + } + } + + // Output of this round becomes input to next round + for i in 0..width { + current_state[i] = current_state_temp[i]; + current_state_temp[i] = F::zero(); + } + } + + // last full Sbox rounds + for _ in full_rounds_beginning + partial_rounds + ..(full_rounds_beginning + partial_rounds + full_rounds_end) + { + // Sbox layer + for i in 0..width { + current_state[i] += self.round_keys[round_keys_offset]; + current_state[i] = P::SBOX.apply_sbox(current_state[i]); + round_keys_offset += 1; + } + + // linear layer + for j in 0..width { + for i in 0..width { + current_state_temp[i] += current_state[j] * self.mds_matrix[i][j]; + } + } + + // Output of this round becomes input to next round + for i in 0..width { + current_state[i] = current_state_temp[i]; + current_state_temp[i] = F::zero(); + } + } + + // Finally the current_state becomes the output + current_state + } + + pub fn hash_2(&self, xl: F, xr: F) -> F { + // Only 2 inputs to the permutation are set to the input of this hash + // function, one is set to the padding constant and rest are 0. Always keep + // the 1st input as 0 + let input = vec![ + F::from(ZERO_CONST), + xl, + xr, + F::from(PADDING_CONST), + F::from(ZERO_CONST), + F::from(ZERO_CONST), + ]; + + // Never take the first output + self.permute(&input)[1] + } + + pub fn hash_4(&self, inputs: [F; 4]) -> F { + // Only 4 inputs to the permutation are set to the input of this hash + // function, one is set to the padding constant and one is set to 0. Always + // keep the 1st input as 0 + let input = vec![ + F::from(ZERO_CONST), + inputs[0], + inputs[1], + inputs[2], + inputs[3], + F::from(PADDING_CONST), + ]; + + // Never take the first output + self.permute(&input)[1] + } +} + +pub struct CRH> { + field: PhantomData, + params: PhantomData

, +} + +impl> CRHTrait for CRH { + const INPUT_SIZE_BITS: usize = 32; + type Output = F; + type Parameters = Poseidon; + + fn setup(_rng: &mut R) -> Result { + // automatic generation of parameters are not implemented yet + // therefore, the developers must specify the parameters themselves + unimplemented!() + } + + // https://github.com/arkworks-rs/algebra/blob/master/ff/src/to_field_vec.rs + fn evaluate(parameters: &Self::Parameters, input: &[u8]) -> Result { + let eval_time = start_timer!(|| "PoseidonCRH::Eval"); + let elts: Vec = input.to_field_elements().unwrap_or_default(); + let result = match elts.len() { + 2 => parameters.hash_2(elts[0], elts[1]), + 4 => parameters.hash_4([elts[0], elts[1], elts[2], elts[3]]), + _ => panic!("incorrect number of windows (elements) for poseidon hash"), + }; + + end_timer!(eval_time); + + Ok(result) + } +} + +impl> TwoToOneCRH for CRH { + const LEFT_INPUT_SIZE_BITS: usize = Self::INPUT_SIZE_BITS / 2; + const RIGHT_INPUT_SIZE_BITS: usize = Self::INPUT_SIZE_BITS / 2; + type Output = F; + type Parameters = Poseidon; + + fn setup(rng: &mut R) -> Result { + ::setup(rng) + } + + /// A simple implementation of TwoToOneCRH by asserting left and right input has same length and chain them together. + fn evaluate( + parameters: &Self::Parameters, + left_input: &[u8], + right_input: &[u8], + ) -> Result { + assert_eq!(left_input.len(), right_input.len()); + assert!(left_input.len() * 8 <= Self::LEFT_INPUT_SIZE_BITS); + let chained: Vec<_> = left_input + .iter() + .chain(right_input.iter()) + .copied() + .collect(); + + ::evaluate(parameters, &chained) + } +} diff --git a/arkworks/crypto-primitives/src/crh/poseidon/sbox/constraints.rs b/arkworks/crypto-primitives/src/crh/poseidon/sbox/constraints.rs new file mode 100644 index 00000000..b883fefd --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/poseidon/sbox/constraints.rs @@ -0,0 +1,64 @@ +use super::PoseidonSbox; +use ark_ff::PrimeField; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::fields::FieldVar; +use ark_relations::r1cs::SynthesisError; + +pub trait SboxConstraints { + fn synthesize_sbox( + &self, + input: FpVar, + round_key: F, + ) -> Result, SynthesisError>; +} + +impl SboxConstraints for PoseidonSbox { + fn synthesize_sbox( + &self, + input_var: FpVar, + round_key: F, + ) -> Result, SynthesisError> { + match self { + PoseidonSbox::Exponentiation(val) => match val { + 3 => synthesize_exp3_sbox::(input_var, round_key), + 5 => synthesize_exp5_sbox::(input_var, round_key), + _ => synthesize_exp3_sbox::(input_var, round_key), + }, + PoseidonSbox::Inverse => synthesize_inverse_sbox::(input_var, round_key), + } + } +} + +// Allocate variables in circuit and enforce constraints when Sbox as cube +fn synthesize_exp3_sbox( + input_var: FpVar, + round_key: F, +) -> Result, SynthesisError> { + let inp_plus_const: FpVar = input_var + round_key; + let sqr = inp_plus_const.clone() * inp_plus_const.clone(); + let cube = inp_plus_const.clone() * sqr; + Ok(cube) +} + +// Allocate variables in circuit and enforce constraints when Sbox as cube +fn synthesize_exp5_sbox( + input_var: FpVar, + round_key: F, +) -> Result, SynthesisError> { + let inp_plus_const: FpVar = input_var + round_key; + let sqr = inp_plus_const.clone() * inp_plus_const.clone(); + let fourth = sqr.clone() * sqr.clone(); + let fifth = inp_plus_const.clone() * fourth; + Ok(fifth) +} + +// Allocate variables in circuit and enforce constraints when Sbox as +// inverse +fn synthesize_inverse_sbox( + input_var: FpVar, + round_key: F, +) -> Result, SynthesisError> { + let inp_plus_const: FpVar = input_var + round_key; + let input_inv = inp_plus_const.inverse().unwrap(); + Ok(input_inv) +} diff --git a/arkworks/crypto-primitives/src/crh/poseidon/sbox/mod.rs b/arkworks/crypto-primitives/src/crh/poseidon/sbox/mod.rs new file mode 100644 index 00000000..aa0efe8d --- /dev/null +++ b/arkworks/crypto-primitives/src/crh/poseidon/sbox/mod.rs @@ -0,0 +1,52 @@ +use ark_ff::PrimeField; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +/// An S-Box that can be used with Poseidon. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum PoseidonSbox { + Exponentiation(usize), + Inverse, +} + +impl PoseidonSbox { + pub fn apply_sbox(&self, elem: F) -> F { + match self { + PoseidonSbox::Exponentiation(val) => { + match val { + 2 => elem * &elem, + 3 => elem * &elem * &elem, + 4 => { + let sqr = elem.square(); + sqr * &sqr.clone() + } + 5 => { + let sqr = elem.square(); + sqr * &sqr.clone() * &elem + } + 6 => { + let sqr = elem.square(); + let quad = sqr * &sqr; + sqr * &quad + } + 7 => { + let sqr = elem.square(); + let quad = sqr * &sqr; + sqr * &quad * &elem + } + 17 => { + let sqr = elem.square(); + let quad = sqr * &sqr; + let eighth = quad * &quad; + let sixteenth = eighth * &eighth; + sixteenth * &elem + } + // default to cubed + _ => elem * &elem * &elem, + } + } + PoseidonSbox::Inverse => elem.inverse().unwrap_or(F::zero()), + } + } +} diff --git a/arkworks/crypto-primitives/src/encryption/constraints.rs b/arkworks/crypto-primitives/src/encryption/constraints.rs new file mode 100644 index 00000000..de2a8f91 --- /dev/null +++ b/arkworks/crypto-primitives/src/encryption/constraints.rs @@ -0,0 +1,26 @@ +use crate::encryption::AsymmetricEncryptionScheme; + +use ark_r1cs_std::prelude::*; +use ark_relations::r1cs::SynthesisError; +use core::fmt::Debug; + +use ark_ff::fields::Field; + +pub trait AsymmetricEncryptionGadget { + type OutputVar: AllocVar + + EqGadget + + Clone + + Sized + + Debug; + type ParametersVar: AllocVar + Clone; + type PlaintextVar: AllocVar + Clone; + type PublicKeyVar: AllocVar + Clone; + type RandomnessVar: AllocVar + Clone; + + fn encrypt( + parameters: &Self::ParametersVar, + message: &Self::PlaintextVar, + randomness: &Self::RandomnessVar, + public_key: &Self::PublicKeyVar, + ) -> Result; +} diff --git a/arkworks/crypto-primitives/src/encryption/elgamal/constraints.rs b/arkworks/crypto-primitives/src/encryption/elgamal/constraints.rs new file mode 100644 index 00000000..219ec7fe --- /dev/null +++ b/arkworks/crypto-primitives/src/encryption/elgamal/constraints.rs @@ -0,0 +1,302 @@ +use ark_r1cs_std::prelude::*; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use crate::encryption::elgamal::{ + Ciphertext, ElGamal, Parameters, Plaintext, PublicKey, Randomness, +}; +use crate::encryption::AsymmetricEncryptionGadget; +use ark_ec::ProjectiveCurve; +use ark_ff::{ + fields::{Field, PrimeField}, + to_bytes, Zero, +}; +use ark_std::{borrow::Borrow, marker::PhantomData, vec::Vec}; + +pub type ConstraintF = <::BaseField as Field>::BasePrimeField; + +#[derive(Clone, Debug)] +pub struct RandomnessVar(Vec>); + +impl AllocVar, F> for RandomnessVar +where + C: ProjectiveCurve, + F: PrimeField, +{ + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let r = to_bytes![&f().map(|b| b.borrow().0).unwrap_or(C::ScalarField::zero())].unwrap(); + match mode { + AllocationMode::Constant => Ok(Self(UInt8::constant_vec(&r))), + AllocationMode::Input => UInt8::new_input_vec(cs, &r).map(Self), + AllocationMode::Witness => UInt8::new_witness_vec(cs, &r).map(Self), + } + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = "C: ProjectiveCurve, GG: CurveVar>"))] +pub struct ParametersVar>> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + generator: GG, + #[doc(hidden)] + _curve: PhantomData, +} + +impl AllocVar, ConstraintF> for ParametersVar +where + C: ProjectiveCurve, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let generator = GG::new_variable(cs, || f().map(|g| g.borrow().generator), mode)?; + Ok(Self { + generator, + _curve: PhantomData, + }) + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = "C: ProjectiveCurve, GG: CurveVar>"))] +pub struct PlaintextVar>> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + pub plaintext: GG, + #[doc(hidden)] + _curve: PhantomData, +} + +impl AllocVar, ConstraintF> for PlaintextVar +where + C: ProjectiveCurve, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let plaintext = GG::new_variable(cs, f, mode)?; + Ok(Self { + plaintext, + _curve: PhantomData, + }) + } +} + +#[derive(Derivative)] +#[derivative(Clone(bound = "C: ProjectiveCurve, GG: CurveVar>"))] +pub struct PublicKeyVar>> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + pub pk: GG, + #[doc(hidden)] + _curve: PhantomData, +} + +impl AllocVar, ConstraintF> for PublicKeyVar +where + C: ProjectiveCurve, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let pk = GG::new_variable(cs, f, mode)?; + Ok(Self { + pk, + _curve: PhantomData, + }) + } +} + +#[derive(Derivative, Debug)] +#[derivative(Clone(bound = "C: ProjectiveCurve, GG: CurveVar>"))] +pub struct OutputVar>> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + pub c1: GG, + pub c2: GG, + #[doc(hidden)] + _curve: PhantomData, +} + +impl AllocVar, ConstraintF> for OutputVar +where + C: ProjectiveCurve, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let prep = f().map(|g| *g.borrow()); + let c1 = GG::new_variable(cs.clone(), || prep.map(|g| g.borrow().0), mode)?; + let c2 = GG::new_variable(cs.clone(), || prep.map(|g| g.borrow().1), mode)?; + Ok(Self { + c1, + c2, + _curve: PhantomData, + }) + } +} + +impl EqGadget> for OutputVar +where + C: ProjectiveCurve, + GC: CurveVar>, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + #[inline] + fn is_eq(&self, other: &Self) -> Result>, SynthesisError> { + self.c1.is_eq(&other.c1)?.and(&self.c2.is_eq(&other.c2)?) + } +} + +pub struct ElGamalEncGadget>> +where + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, +{ + #[doc(hidden)] + _curve: PhantomData<*const C>, + _group_var: PhantomData<*const GG>, +} + +impl AsymmetricEncryptionGadget, ConstraintF> for ElGamalEncGadget +where + C: ProjectiveCurve, + GG: CurveVar>, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, + ConstraintF: PrimeField, +{ + type OutputVar = OutputVar; + type ParametersVar = ParametersVar; + type PlaintextVar = PlaintextVar; + type PublicKeyVar = PublicKeyVar; + type RandomnessVar = RandomnessVar>; + + fn encrypt( + parameters: &Self::ParametersVar, + message: &Self::PlaintextVar, + randomness: &Self::RandomnessVar, + public_key: &Self::PublicKeyVar, + ) -> Result { + // flatten randomness to little-endian bit vector + let randomness = randomness + .0 + .iter() + .flat_map(|b| b.to_bits_le().unwrap()) + .collect::>(); + + // compute s = randomness*pk + let s = public_key.pk.clone().scalar_mul_le(randomness.iter())?; + + // compute c1 = randomness*generator + let c1 = parameters + .generator + .clone() + .scalar_mul_le(randomness.iter())?; + + // compute c2 = m + s + let c2 = message.plaintext.clone() + s; + + Ok(Self::OutputVar { + c1, + c2, + _curve: PhantomData, + }) + } +} + +#[cfg(test)] +mod test { + use crate::encryption::constraints::AsymmetricEncryptionGadget; + use ark_std::{test_rng, UniformRand}; + + use ark_ed_on_bls12_381::{constraints::EdwardsVar, EdwardsProjective as JubJub, Fq}; + + use crate::encryption::elgamal::{constraints::ElGamalEncGadget, ElGamal, Randomness}; + use crate::encryption::AsymmetricEncryptionScheme; + use ark_r1cs_std::prelude::*; + use ark_relations::r1cs::ConstraintSystem; + + #[test] + fn test_elgamal_gadget() { + let rng = &mut test_rng(); + + type MyEnc = ElGamal; + type MyGadget = ElGamalEncGadget; + + // compute primitive result + let parameters = MyEnc::setup(rng).unwrap(); + let (pk, _) = MyEnc::keygen(¶meters, rng).unwrap(); + let msg = JubJub::rand(rng).into(); + let randomness = Randomness::rand(rng); + let primitive_result = MyEnc::encrypt(¶meters, &pk, &msg, &randomness).unwrap(); + + // construct constraint system + let cs = ConstraintSystem::::new_ref(); + let randomness_var = + >::RandomnessVar::new_witness( + ark_relations::ns!(cs, "gadget_randomness"), + || Ok(&randomness), + ) + .unwrap(); + let parameters_var = + >::ParametersVar::new_constant( + ark_relations::ns!(cs, "gadget_parameters"), + ¶meters, + ) + .unwrap(); + let msg_var = + >::PlaintextVar::new_witness( + ark_relations::ns!(cs, "gadget_message"), + || Ok(&msg), + ) + .unwrap(); + let pk_var = + >::PublicKeyVar::new_witness( + ark_relations::ns!(cs, "gadget_public_key"), + || Ok(&pk), + ) + .unwrap(); + + // use gadget + let result_var = + MyGadget::encrypt(¶meters_var, &msg_var, &randomness_var, &pk_var).unwrap(); + + // check that result equals expected ciphertext in the constraint system + let expected_var = + >::OutputVar::new_input( + ark_relations::ns!(cs, "gadget_expected"), + || Ok(&primitive_result), + ) + .unwrap(); + expected_var.enforce_equal(&result_var).unwrap(); + + assert_eq!(primitive_result.0, result_var.c1.value().unwrap()); + assert_eq!(primitive_result.1, result_var.c2.value().unwrap()); + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/arkworks/crypto-primitives/src/encryption/elgamal/mod.rs b/arkworks/crypto-primitives/src/encryption/elgamal/mod.rs new file mode 100644 index 00000000..a1475bdc --- /dev/null +++ b/arkworks/crypto-primitives/src/encryption/elgamal/mod.rs @@ -0,0 +1,134 @@ +#[cfg(feature = "r1cs")] +pub mod constraints; + +use crate::encryption::AsymmetricEncryptionScheme; +use crate::Error; +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ff::{fields::PrimeField, UniformRand}; +use ark_std::marker::PhantomData; +use ark_std::rand::Rng; + +pub struct ElGamal { + _group: PhantomData, +} + +pub struct Parameters { + pub generator: C::Affine, +} + +pub type PublicKey = ::Affine; + +pub struct SecretKey(pub C::ScalarField); + +pub struct Randomness(pub C::ScalarField); + +impl UniformRand for Randomness { + #[inline] + fn rand(rng: &mut R) -> Self { + Randomness(::ScalarField::rand(rng)) + } +} + +pub type Plaintext = ::Affine; + +pub type Ciphertext = ( + ::Affine, + ::Affine, +); + +impl AsymmetricEncryptionScheme for ElGamal +where + C::ScalarField: PrimeField, +{ + type Parameters = Parameters; + type PublicKey = PublicKey; + type SecretKey = SecretKey; + type Randomness = Randomness; + type Plaintext = Plaintext; + type Ciphertext = Ciphertext; + + fn setup(rng: &mut R) -> Result { + // get a random generator + let generator = C::rand(rng).into(); + + Ok(Parameters { generator }) + } + + fn keygen( + pp: &Self::Parameters, + rng: &mut R, + ) -> Result<(Self::PublicKey, Self::SecretKey), Error> { + // get a random element from the scalar field + let secret_key: ::ScalarField = C::ScalarField::rand(rng); + + // compute secret_key*generator to derive the public key + let public_key = pp.generator.mul(secret_key.into_repr()).into(); + + Ok((public_key, SecretKey(secret_key))) + } + + fn encrypt( + pp: &Self::Parameters, + pk: &Self::PublicKey, + message: &Self::Plaintext, + r: &Self::Randomness, + ) -> Result { + // compute s = r*pk + let s = pk.mul(r.0.into_repr()).into(); + + // compute c1 = r*generator + let c1 = pp.generator.mul(r.0.into_repr()).into(); + + // compute c2 = m + s + let c2 = *message + s; + + Ok((c1, c2)) + } + + fn decrypt( + _pp: &Self::Parameters, + sk: &Self::SecretKey, + ciphertext: &Self::Ciphertext, + ) -> Result { + let c1: ::Affine = ciphertext.0; + let c2: ::Affine = ciphertext.1; + + // compute s = secret_key * c1 + let s = c1.mul(sk.0.into_repr()); + let s_inv = -s; + + // compute message = c2 - s + let m = c2 + s_inv.into_affine(); + + Ok(m) + } +} + +#[cfg(test)] +mod test { + use ark_std::{test_rng, UniformRand}; + + use ark_ed_on_bls12_381::EdwardsProjective as JubJub; + + use crate::encryption::elgamal::{ElGamal, Randomness}; + use crate::encryption::AsymmetricEncryptionScheme; + + #[test] + fn test_elgamal_encryption() { + let rng = &mut test_rng(); + + // setup and key generation + let parameters = ElGamal::::setup(rng).unwrap(); + let (pk, sk) = ElGamal::::keygen(¶meters, rng).unwrap(); + + // get a random msg and encryption randomness + let msg = JubJub::rand(rng).into(); + let r = Randomness::rand(rng); + + // encrypt and decrypt the message + let cipher = ElGamal::::encrypt(¶meters, &pk, &msg, &r).unwrap(); + let check_msg = ElGamal::::decrypt(¶meters, &sk, &cipher).unwrap(); + + assert_eq!(msg, check_msg); + } +} diff --git a/arkworks/crypto-primitives/src/encryption/mod.rs b/arkworks/crypto-primitives/src/encryption/mod.rs new file mode 100644 index 00000000..d1213a76 --- /dev/null +++ b/arkworks/crypto-primitives/src/encryption/mod.rs @@ -0,0 +1,38 @@ +#[cfg(feature = "r1cs")] +pub mod constraints; +#[cfg(feature = "r1cs")] +pub use constraints::*; + +pub mod elgamal; + +use crate::Error; +use ark_std::rand::Rng; + +pub trait AsymmetricEncryptionScheme { + type Parameters; + type PublicKey; + type SecretKey; + type Randomness; + type Plaintext; + type Ciphertext; + + fn setup(rng: &mut R) -> Result; + + fn keygen( + pp: &Self::Parameters, + rng: &mut R, + ) -> Result<(Self::PublicKey, Self::SecretKey), Error>; + + fn encrypt( + pp: &Self::Parameters, + pk: &Self::PublicKey, + message: &Self::Plaintext, + r: &Self::Randomness, + ) -> Result; + + fn decrypt( + pp: &Self::Parameters, + sk: &Self::SecretKey, + ciphertext: &Self::Ciphertext, + ) -> Result; +} diff --git a/arkworks/crypto-primitives/src/lib.rs b/arkworks/crypto-primitives/src/lib.rs new file mode 100644 index 00000000..3fcf7047 --- /dev/null +++ b/arkworks/crypto-primitives/src/lib.rs @@ -0,0 +1,61 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms, + // missing_docs +)] +#![forbid(unsafe_code)] + +#[macro_use] +extern crate ark_std; + +#[macro_use] +extern crate derivative; + +pub(crate) use ark_std::{borrow::ToOwned, boxed::Box, vec::Vec}; + +pub mod commitment; +pub mod crh; +pub mod merkle_tree; + +pub mod encryption; +pub mod prf; +pub mod signature; +pub mod snark; + +pub use self::{ + commitment::CommitmentScheme, + crh::CRH, + merkle_tree::{MerkleTree, Path}, + prf::PRF, + signature::SignatureScheme, + snark::{CircuitSpecificSetupSNARK, UniversalSetupSNARK, SNARK}, +}; + +#[cfg(feature = "r1cs")] +pub use self::{ + commitment::CommitmentGadget, crh::CRHGadget, merkle_tree::constraints::PathVar, + prf::PRFGadget, signature::SigRandomizePkGadget, snark::SNARKGadget, +}; + +pub type Error = Box; + +#[derive(Debug)] +pub enum CryptoError { + IncorrectInputLength(usize), + NotPrimeOrder, +} + +impl core::fmt::Display for CryptoError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let msg = match self { + CryptoError::IncorrectInputLength(len) => format!("input length is wrong: {}", len), + CryptoError::NotPrimeOrder => "element is not prime order".to_owned(), + }; + write!(f, "{}", msg) + } +} + +impl ark_std::error::Error for CryptoError {} diff --git a/arkworks/crypto-primitives/src/merkle_tree/constraints.rs b/arkworks/crypto-primitives/src/merkle_tree/constraints.rs new file mode 100644 index 00000000..f7e89aa6 --- /dev/null +++ b/arkworks/crypto-primitives/src/merkle_tree/constraints.rs @@ -0,0 +1,402 @@ +use crate::crh::TwoToOneCRHGadget; +use crate::merkle_tree::Config; +use crate::{CRHGadget, Path}; +use ark_ff::Field; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::boolean::Boolean; +use ark_r1cs_std::eq::EqGadget; +#[allow(unused)] +use ark_r1cs_std::prelude::*; +use ark_r1cs_std::ToBytesGadget; +use ark_relations::r1cs::{Namespace, SynthesisError}; +use ark_std::borrow::Borrow; +use ark_std::vec::Vec; +/// Represents a merkle tree path gadget. +pub struct PathVar +where + P: Config, + LeafH: CRHGadget, + TwoToOneH: TwoToOneCRHGadget, + ConstraintF: Field, +{ + /// `path[i]` is 0 (false) iff ith non-leaf node from top to bottom is left. + path: Vec>, + /// `auth_path[i]` is the entry of sibling of ith non-leaf node from top to bottom. + auth_path: Vec, + /// The sibling of leaf. + leaf_sibling: LeafH::OutputVar, + /// Is this leaf the right child? + leaf_is_right_child: Boolean, +} + +impl AllocVar, ConstraintF> + for PathVar +where + P: Config, + LeafH: CRHGadget, + TwoToOneH: TwoToOneCRHGadget, + ConstraintF: Field, +{ + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + f().and_then(|val| { + let leaf_sibling = LeafH::OutputVar::new_variable( + ark_relations::ns!(cs, "leaf_sibling"), + || Ok(val.borrow().leaf_sibling_hash.clone()), + mode, + )?; + let leaf_position_bit = Boolean::new_variable( + ark_relations::ns!(cs, "leaf_position_bit"), + || Ok(val.borrow().leaf_index & 1 == 1), + mode, + )?; + let pos_list: Vec<_> = val.borrow().position_list().collect(); + let path = Vec::new_variable( + ark_relations::ns!(cs, "path_bits"), + || Ok(&pos_list[..(pos_list.len() - 1)]), + mode, + )?; + + let auth_path = Vec::new_variable( + ark_relations::ns!(cs, "auth_path_nodes"), + || Ok(&val.borrow().auth_path[..]), + mode, + )?; + Ok(PathVar { + path, + auth_path, + leaf_sibling, + leaf_is_right_child: leaf_position_bit, + }) + }) + } +} + +impl PathVar +where + P: Config, + LeafH: CRHGadget, + TwoToOneH: TwoToOneCRHGadget, + ConstraintF: Field, +{ + /// Calculate the root of the Merkle tree assuming that `leaf` is the leaf on the path defined by `self`. + fn calculate_root( + &self, + leaf_hash_params: &LeafH::ParametersVar, + two_to_one_hash_params: &TwoToOneH::ParametersVar, + leaf: &impl ToBytesGadget, + ) -> Result { + let leaf_bytes = leaf.to_bytes()?; + let claimed_leaf_hash = LeafH::evaluate(leaf_hash_params, &leaf_bytes)?; + let leaf_sibling_hash = &self.leaf_sibling; + + // calculate hash for the bottom non_leaf_layer + + // We assume that when a bit is 0, it indicates that the currently hashed value H is the left child, + // and when bit is 1, it indicates our H is the right child. + // Thus `left_hash` is sibling if the bit `leaf_is_right_child` is 1, and is leaf otherwise. + + let left_hash = self + .leaf_is_right_child + .select(leaf_sibling_hash, &claimed_leaf_hash)? + .to_bytes()?; + let right_hash = self + .leaf_is_right_child + .select(&claimed_leaf_hash, leaf_sibling_hash)? + .to_bytes()?; + + let mut curr_hash = TwoToOneH::evaluate(two_to_one_hash_params, &left_hash, &right_hash)?; + // To traverse up a MT, we iterate over the path from bottom to top (i.e. in reverse) + + // At any given bit, the bit being 0 indicates our currently hashed value is the left, + // and the bit being 1 indicates our currently hashed value is on the right. + // Thus `left_hash` is the sibling if bit is 1, and it's the computed hash if bit is 0 + for (bit, sibling) in self.path.iter().rev().zip(self.auth_path.iter().rev()) { + let left_hash = bit.select(sibling, &curr_hash)?; + let right_hash = bit.select(&curr_hash, sibling)?; + + curr_hash = TwoToOneH::evaluate( + two_to_one_hash_params, + &left_hash.to_bytes()?, + &right_hash.to_bytes()?, + )?; + } + + Ok(curr_hash) + } + + /// Check that hashing a Merkle tree path according to `self`, and + /// with `leaf` as the leaf, leads to a Merkle tree root equalling `root`. + pub fn verify_membership( + &self, + leaf_hash_params: &LeafH::ParametersVar, + two_to_one_hash_params: &TwoToOneH::ParametersVar, + root: &TwoToOneH::OutputVar, + leaf: &impl ToBytesGadget, + ) -> Result, SynthesisError> { + let expected_root = self.calculate_root(leaf_hash_params, two_to_one_hash_params, leaf)?; + Ok(expected_root.is_eq(root)?) + } + + /// Check that `old_leaf` is the leaf of the Merkle tree on the path defined by + /// `self`, and then compute the new root when replacing `old_leaf` by `new_leaf`. + pub fn update_leaf( + &self, + leaf_hash_params: &LeafH::ParametersVar, + two_to_one_hash_params: &TwoToOneH::ParametersVar, + old_root: &TwoToOneH::OutputVar, + old_leaf: &impl ToBytesGadget, + new_leaf: &impl ToBytesGadget, + ) -> Result { + self.verify_membership(leaf_hash_params, two_to_one_hash_params, old_root, old_leaf)? + .enforce_equal(&Boolean::TRUE); + Ok(self.calculate_root(leaf_hash_params, two_to_one_hash_params, new_leaf)?) + } + + /// Check that `old_leaf` is the leaf of the Merkle tree on the path defined by + /// `self`, and then compute the expected new root when replacing `old_leaf` by `new_leaf`. + /// Return a boolean indicating whether expected new root equals `new_root`. + pub fn update_and_check( + &self, + leaf_hash_params: &LeafH::ParametersVar, + two_to_one_hash_params: &TwoToOneH::ParametersVar, + old_root: &TwoToOneH::OutputVar, + new_root: &TwoToOneH::OutputVar, + old_leaf: &impl ToBytesGadget, + new_leaf: &impl ToBytesGadget, + ) -> Result, SynthesisError> { + let actual_new_root = self.update_leaf( + leaf_hash_params, + two_to_one_hash_params, + old_root, + old_leaf, + new_leaf, + )?; + Ok(actual_new_root.is_eq(&new_root)?) + } +} + +#[cfg(test)] +mod tests { + use crate::crh::{pedersen, TwoToOneCRH, TwoToOneCRHGadget}; + + use crate::merkle_tree::Config; + use crate::{CRHGadget, MerkleTree, PathVar, CRH}; + use ark_ed_on_bls12_381::{constraints::EdwardsVar, EdwardsProjective as JubJub, Fq}; + #[allow(unused)] + use ark_r1cs_std::prelude::*; + #[allow(unused)] + use ark_relations::r1cs::ConstraintSystem; + + #[derive(Clone)] + pub(super) struct Window4x256; + impl pedersen::Window for Window4x256 { + const WINDOW_SIZE: usize = 4; + const NUM_WINDOWS: usize = 256; + } + + type H = pedersen::CRH; + type HG = pedersen::constraints::CRHGadget; + + struct JubJubMerkleTreeParams; + + impl Config for JubJubMerkleTreeParams { + type LeafHash = H; + type TwoToOneHash = H; + } + + type JubJubMerkleTree = MerkleTree; + + /// Generate a merkle tree, its constraints, and test its constraints + fn merkle_tree_test( + leaves: &[[u8; 30]], + use_bad_root: bool, + update_query: Option<(usize, [u8; 30])>, + ) -> () { + let mut rng = ark_std::test_rng(); + + let leaf_crh_params = ::setup(&mut rng).unwrap(); + let two_to_one_crh_params = ::setup(&mut rng).unwrap(); + let mut tree = + JubJubMerkleTree::new(&leaf_crh_params, &two_to_one_crh_params, leaves).unwrap(); + let root = tree.root(); + let cs = ConstraintSystem::::new_ref(); + for (i, leaf) in leaves.iter().enumerate() { + let proof = tree.generate_proof(i).unwrap(); + assert!(proof + .verify(&leaf_crh_params, &two_to_one_crh_params, &root, &leaf) + .unwrap()); + + // Allocate Merkle Tree Root + let root = >::OutputVar::new_witness( + ark_relations::ns!(cs, "new_digest"), + || { + if use_bad_root { + Ok(::Output::default()) + } else { + Ok(root) + } + }, + ) + .unwrap(); + + let constraints_from_digest = cs.num_constraints(); + println!("constraints from digest: {}", constraints_from_digest); + + // Allocate Parameters for CRH + let leaf_crh_params_var = >::ParametersVar::new_constant( + ark_relations::ns!(cs, "leaf_crh_parameter"), + &leaf_crh_params, + ) + .unwrap(); + let two_to_one_crh_params_var = + >::ParametersVar::new_constant( + ark_relations::ns!(cs, "two_to_one_crh_parameter"), + &two_to_one_crh_params, + ) + .unwrap(); + + let constraints_from_params = cs.num_constraints() - constraints_from_digest; + println!("constraints from parameters: {}", constraints_from_params); + + // Allocate Leaf + let leaf_g = UInt8::constant_vec(leaf); + + let constraints_from_leaf = + cs.num_constraints() - constraints_from_params - constraints_from_digest; + println!("constraints from leaf: {}", constraints_from_leaf); + + // Allocate Merkle Tree Path + let cw: PathVar<_, HG, HG, Fq> = + PathVar::new_witness(ark_relations::ns!(cs, "new_witness"), || Ok(&proof)).unwrap(); + // check pathvar correctness + assert_eq!(cw.leaf_sibling.value().unwrap(), proof.leaf_sibling_hash); + assert_eq!( + cw.leaf_is_right_child.value().unwrap(), + proof.leaf_index & 1 == 1 + ); + let position_list: Vec<_> = proof.position_list().collect(); + for (i, path_node) in cw.path.iter().enumerate() { + assert_eq!(path_node.value().unwrap(), position_list[i]); + } + for (i, auth_path_node) in cw.auth_path.iter().enumerate() { + assert_eq!(auth_path_node.value().unwrap(), proof.auth_path[i]) + } + + let constraints_from_path = cs.num_constraints() + - constraints_from_params + - constraints_from_digest + - constraints_from_leaf; + println!("constraints from path: {}", constraints_from_path); + let leaf_g: &[_] = leaf_g.as_slice(); + assert!(cs.is_satisfied().unwrap()); + assert!(cw + .verify_membership( + &leaf_crh_params_var, + &two_to_one_crh_params_var, + &root, + &leaf_g, + ) + .unwrap() + .value() + .unwrap()); + let setup_constraints = constraints_from_leaf + + constraints_from_digest + + constraints_from_params + + constraints_from_path; + println!( + "number of constraints: {}", + cs.num_constraints() - setup_constraints + ); + } + + assert!( + cs.is_satisfied().unwrap(), + "verification constraints not satisfied" + ); + + // check update + if let Some(update_query) = update_query { + let cs = ConstraintSystem::::new_ref(); + // allocate parameters for CRH + let leaf_crh_params_var = >::ParametersVar::new_constant( + ark_relations::ns!(cs, "leaf_crh_parameter"), + &leaf_crh_params, + ) + .unwrap(); + let two_to_one_crh_params_var = + >::ParametersVar::new_constant( + ark_relations::ns!(cs, "two_to_one_crh_parameter"), + &two_to_one_crh_params, + ) + .unwrap(); + + // allocate old leaf and new leaf + let old_leaf_var = + UInt8::new_input_vec(ark_relations::ns!(cs, "old_leaf"), &leaves[update_query.0]) + .unwrap(); + let new_leaf_var = + UInt8::new_input_vec(ark_relations::ns!(cs, "new_leaf"), &update_query.1).unwrap(); + // + // suppose the verifier already knows old root, new root, old leaf, new leaf, and the original path (so they are public) + let old_root = tree.root(); + let old_root_var = >::OutputVar::new_input( + ark_relations::ns!(cs, "old_root"), + || Ok(old_root), + ) + .unwrap(); + let old_path = tree.generate_proof(update_query.0).unwrap(); + let old_path_var: PathVar<_, HG, HG, Fq> = + PathVar::new_input(ark_relations::ns!(cs, "old_path"), || Ok(old_path)).unwrap(); + let new_root = { + tree.update(update_query.0, &update_query.1).unwrap(); + tree.root() + }; + let new_root_var = >::OutputVar::new_input( + ark_relations::ns!(cs, "old_root"), + || Ok(new_root), + ) + .unwrap(); + // verifier need to get a proof (the witness) to show the known new root is correct + assert!(old_path_var + .update_and_check( + &leaf_crh_params_var, + &two_to_one_crh_params_var, + &old_root_var, + &new_root_var, + &old_leaf_var.as_slice(), + &new_leaf_var.as_slice(), + ) + .unwrap() + .value() + .unwrap()); + assert!(cs.is_satisfied().unwrap()) + } + } + + #[test] + fn good_root_test() { + let mut leaves = Vec::new(); + for i in 0..4u8 { + let input = [i; 30]; + leaves.push(input); + } + merkle_tree_test(&leaves, false, Some((3usize, [7u8; 30]))); + } + + #[test] + #[should_panic] + fn bad_root_test() { + let mut leaves = Vec::new(); + for i in 0..4u8 { + let input = [i; 30]; + leaves.push(input); + } + merkle_tree_test(&leaves, true, None); + } +} diff --git a/arkworks/crypto-primitives/src/merkle_tree/mod.rs b/arkworks/crypto-primitives/src/merkle_tree/mod.rs new file mode 100644 index 00000000..bcb83c00 --- /dev/null +++ b/arkworks/crypto-primitives/src/merkle_tree/mod.rs @@ -0,0 +1,554 @@ +#![allow(unused)] // temporary +#![allow(clippy::needless_range_loop)] + +use crate::crh::TwoToOneCRH; +use crate::CRH; +use ark_ff::ToBytes; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_std::vec::Vec; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +pub trait Config { + type LeafHash: CRH; + type TwoToOneHash: TwoToOneCRH; +} + +pub type TwoToOneDigest

= <

::TwoToOneHash as TwoToOneCRH>::Output; +pub type LeafDigest

= <

::LeafHash as CRH>::Output; +pub type TwoToOneParam

= <

::TwoToOneHash as TwoToOneCRH>::Parameters; +pub type LeafParam

= <

::LeafHash as CRH>::Parameters; + +/// Stores the hashes of a particular path (in order) from root to leaf. +/// For example: +/// ```tree_diagram +/// [A] +/// / \ +/// [B] C +/// / \ / \ +/// D [E] F H +/// .. / \ .... +/// [I] J +/// ``` +/// Suppose we want to prove I, then `leaf_sibling_hash` is J, `auth_path` is `[C,D]` +#[derive(Clone, CanonicalSerialize, CanonicalDeserialize)] +pub struct Path { + pub leaf_sibling_hash: LeafDigest

, + /// The sibling of path node ordered from higher layer to lower layer (does not include root node). + pub auth_path: Vec>, + /// stores the leaf index of the node + pub leaf_index: usize, +} + +impl Path

{ + /// The position of on_path node in `leaf_and_sibling_hash` and `non_leaf_and_sibling_hash_path`. + /// `position[i]` is 0 (false) iff `i`th on-path node from top to bottom is on the left. + /// + /// This function simply converts `self.leaf_index` to boolean array in big endian form. + fn position_list(&'_ self) -> impl '_ + Iterator { + (0..self.auth_path.len() + 1) + .map(move |i| ((self.leaf_index >> i) & 1) != 0) + .rev() + } +} + +/// Convert `computed_hash` and `sibling_hash` to bytes. `index` is the first `path.len()` bits of +/// the position of tree. +/// +/// If the least significant bit of `index` is 0, then `input_1` will be left and `input_2` will be right. +/// Otherwise, `input_1` will be right and `input_2` will be left. +/// +/// Returns: (left, right) +fn select_left_right_bytes( + index: usize, + computed_hash: &B, + sibling_hash: &B, +) -> Result<(Vec, Vec), crate::Error> { + let is_left = index & 1 == 0; + let mut left_bytes = ark_ff::to_bytes!(computed_hash)?; + let mut right_bytes = ark_ff::to_bytes!(sibling_hash)?; + if !is_left { + core::mem::swap(&mut left_bytes, &mut right_bytes); + } + Ok((left_bytes, right_bytes)) +} + +impl Path

{ + /// Verify that a leaf is at `self.index` of the merkle tree. + /// * `leaf_size`: leaf size in number of bytes + /// + /// `verify` infers the tree height by setting `tree_height = self.auth_path.len() + 2` + pub fn verify( + &self, + leaf_hash_params: &LeafParam

, + two_to_one_hash_params: &TwoToOneParam

, + root_hash: &TwoToOneDigest

, + leaf: &L, + ) -> Result { + // calculate leaf hash + let claimed_leaf_hash = + P::LeafHash::evaluate(&leaf_hash_params, &ark_ff::to_bytes!(&leaf)?)?; + // check hash along the path from bottom to root + let (left_bytes, right_bytes) = + select_left_right_bytes(self.leaf_index, &claimed_leaf_hash, &self.leaf_sibling_hash)?; + + let mut curr_path_node = + P::TwoToOneHash::evaluate(&two_to_one_hash_params, &left_bytes, &right_bytes)?; + + // we will use `index` variable to track the position of path + let mut index = self.leaf_index; + index >>= 1; + + // Check levels between leaf level and root + for level in (0..self.auth_path.len()).rev() { + // check if path node at this level is left or right + let (left_bytes, right_bytes) = + select_left_right_bytes(index, &curr_path_node, &self.auth_path[level])?; + // update curr_path_node + curr_path_node = + P::TwoToOneHash::evaluate(&two_to_one_hash_params, &left_bytes, &right_bytes)?; + index >>= 1; + } + + // check if final hash is root + if &curr_path_node != root_hash { + return Ok(false); + } + + Ok(true) + } +} + +/// Defines a merkle tree data structure. +/// This merkle tree has runtime fixed height, and assumes number of leaves is 2^height. +/// +/// TODO: add RFC-6962 compatible merkle tree in the future. +/// For this release, padding will not be supported because of security concerns: if the leaf hash and two to one hash uses same underlying +/// CRH, a malicious prover can prove a leaf while the actual node is an inner node. In the future, we can prefix leaf hashes in different layers to +/// solve the problem. +#[derive(Clone)] +pub struct MerkleTree { + /// stores the non-leaf nodes in level order. The first element is the root node. + /// The ith nodes (starting at 1st) children are at indices `2*i`, `2*i+1` + non_leaf_nodes: Vec>, + /// store the hash of leaf nodes from left to right + leaf_nodes: Vec>, + /// Store the two-to-one hash parameters + two_to_one_hash_param: TwoToOneParam

, + /// Store the leaf hash parameters + leaf_hash_param: LeafParam

, + /// Stores the height of the MerkleTree + height: usize, +} + +impl MerkleTree

{ + /// Create an empty merkle tree such that all leaves are zero-filled. + /// Consider using a sparse merkle tree if you need the tree to be low memory + pub fn blank( + leaf_hash_param: &LeafParam

, + two_to_one_hash_param: &TwoToOneParam

, + height: usize, + ) -> Result { + let leaf = vec![0u8; P::LeafHash::INPUT_SIZE_BITS / 8]; + let leaves = vec![leaf; 1 << (height - 1)]; + Self::new(leaf_hash_param, two_to_one_hash_param, &leaves) + } + + /// Returns a new merkle tree. `leaves.len()` should be power of two. + pub fn new( + leaf_hash_param: &LeafParam

, + two_to_one_hash_param: &TwoToOneParam

, + leaves: &[L], + ) -> Result { + let leaf_nodes_size = leaves.len(); // size of the leaf layer + assert!( + leaf_nodes_size.is_power_of_two(), + "`leaves.len() should be power of two" + ); + let non_leaf_nodes_size = leaf_nodes_size - 1; + + let tree_height = tree_height(leaf_nodes_size); + + let hash_of_empty: TwoToOneDigest

= P::TwoToOneHash::evaluate( + two_to_one_hash_param, + &vec![0u8; P::TwoToOneHash::LEFT_INPUT_SIZE_BITS / 8], + &vec![0u8; P::TwoToOneHash::RIGHT_INPUT_SIZE_BITS / 8], + )?; + + // initialize the merkle tree as array of nodes in level order + let mut non_leaf_nodes: Vec> = (0..non_leaf_nodes_size) + .map(|_| hash_of_empty.clone()) + .collect(); + let mut leaf_nodes: Vec> = Vec::with_capacity(leaf_nodes_size); + + // Compute the starting indices for each non-leaf level of the tree + let mut index = 0; + let mut level_indices = Vec::with_capacity(tree_height - 1); + for _ in 0..(tree_height - 1) { + level_indices.push(index); + index = left_child(index); + } + + // compute and store hash values for each leaf + for leaf in leaves.iter() { + leaf_nodes.push(P::LeafHash::evaluate( + leaf_hash_param, + &ark_ff::to_bytes!(leaf)?, + )?) + } + + // compute the hash values for the non-leaf bottom layer + { + let start_index = level_indices.pop().unwrap(); + let upper_bound = left_child(start_index); + for current_index in start_index..upper_bound { + // `left_child(current_index)` and `right_child(current_index) returns the position of + // leaf in the whole tree (represented as a list in level order). We need to shift it + // by `-upper_bound` to get the index in `leaf_nodes` list. + let left_leaf_index = left_child(current_index) - upper_bound; + let right_leaf_index = right_child(current_index) - upper_bound; + // compute hash + let left_bytes = ark_ff::to_bytes!(&leaf_nodes[left_leaf_index])?; + let right_bytes = ark_ff::to_bytes!(&leaf_nodes[right_leaf_index])?; + non_leaf_nodes[current_index] = + P::TwoToOneHash::evaluate(&two_to_one_hash_param, &left_bytes, &right_bytes)? + } + } + + // compute the hash values for nodes in every other layer in the tree + level_indices.reverse(); + for &start_index in &level_indices { + // The layer beginning `start_index` ends at `upper_bound` (exclusive). + let upper_bound = left_child(start_index); + for current_index in start_index..upper_bound { + let left_index = left_child(current_index); + let right_index = right_child(current_index); + let left_bytes = ark_ff::to_bytes!(&non_leaf_nodes[left_index])?; + let right_bytes = ark_ff::to_bytes!(&non_leaf_nodes[right_index])?; + non_leaf_nodes[current_index] = + P::TwoToOneHash::evaluate(&two_to_one_hash_param, &left_bytes, &right_bytes)? + } + } + + Ok(MerkleTree { + leaf_nodes, + non_leaf_nodes, + height: tree_height, + two_to_one_hash_param: two_to_one_hash_param.clone(), + leaf_hash_param: leaf_hash_param.clone(), + }) + } + + /// Returns the root of the Merkle tree. + pub fn root(&self) -> TwoToOneDigest

{ + self.non_leaf_nodes[0].clone() + } + + /// Returns the height of the Merkle tree. + pub fn height(&self) -> usize { + self.height + } + + /// Returns the authentication path from leaf at `index` to root. + pub fn generate_proof(&self, index: usize) -> Result, crate::Error> { + // gather basic tree information + let tree_height = tree_height(self.leaf_nodes.len()); + + // Get Leaf hash, and leaf sibling hash, + let leaf_index_in_tree = convert_index_to_last_level(index, tree_height); + let leaf_sibling_hash = if index & 1 == 0 { + // leaf is left child + self.leaf_nodes[index + 1].clone() + } else { + // leaf is right child + self.leaf_nodes[index - 1].clone() + }; + + // path.len() = `tree height - 2`, the two missing elements being the leaf sibling hash and the root + let mut path = Vec::with_capacity(tree_height - 2); + // Iterate from the bottom layer after the leaves, to the top, storing all sibling node's hash values. + let mut current_node = parent(leaf_index_in_tree).unwrap(); + while !is_root(current_node) { + let sibling_node = sibling(current_node).unwrap(); + path.push(self.non_leaf_nodes[sibling_node].clone()); + current_node = parent(current_node).unwrap(); + } + + debug_assert_eq!(path.len(), tree_height - 2); + + // we want to make path from root to bottom + path.reverse(); + + Ok(Path { + leaf_index: index, + auth_path: path, + leaf_sibling_hash, + }) + } + + /// Given the index and new leaf, return the hash of leaf and an updated path in order from root to bottom non-leaf level. + /// This does not mutate the underlying tree. + fn updated_path( + &self, + index: usize, + new_leaf: &L, + ) -> Result<(LeafDigest

, Vec>), crate::Error> { + // calculate the hash of leaf + let new_leaf_hash = + P::LeafHash::evaluate(&self.leaf_hash_param, &ark_ff::to_bytes!(&new_leaf)?)?; + + // calculate leaf sibling hash and locate its position (left or right) + let (leaf_left, leaf_right) = if index & 1 == 0 { + // leaf on left + (&new_leaf_hash, &self.leaf_nodes[index + 1]) + } else { + (&self.leaf_nodes[index - 1], &new_leaf_hash) + }; + + // calculate the updated hash at bottom non-leaf-level + let mut path_bottom_to_top = Vec::with_capacity(self.height - 1); + { + path_bottom_to_top.push(P::TwoToOneHash::evaluate( + &self.two_to_one_hash_param, + &ark_ff::to_bytes!(&leaf_left)?, + &ark_ff::to_bytes!(&leaf_right)?, + )?); + } + + // then calculate the updated hash from bottom to root + let leaf_index_in_tree = convert_index_to_last_level(index, self.height); + let mut prev_index = parent(leaf_index_in_tree).unwrap(); + while !is_root(prev_index) { + let (left_hash_bytes, right_hash_bytes) = if is_left_child(prev_index) { + ( + ark_ff::to_bytes!(path_bottom_to_top.last().unwrap())?, + ark_ff::to_bytes!(&self.non_leaf_nodes[sibling(prev_index).unwrap()])?, + ) + } else { + ( + ark_ff::to_bytes!(&self.non_leaf_nodes[sibling(prev_index).unwrap()])?, + ark_ff::to_bytes!(path_bottom_to_top.last().unwrap())?, + ) + }; + path_bottom_to_top.push(P::TwoToOneHash::evaluate( + &self.two_to_one_hash_param, + &left_hash_bytes, + &right_hash_bytes, + )?); + prev_index = parent(prev_index).unwrap(); + } + + debug_assert_eq!(path_bottom_to_top.len(), self.height - 1); + let path_top_to_bottom: Vec<_> = path_bottom_to_top.into_iter().rev().collect(); + Ok((new_leaf_hash, path_top_to_bottom)) + } + + /// Update the leaf at `index` to updated leaf. + /// ```tree_diagram + /// [A] + /// / \ + /// [B] C + /// / \ / \ + /// D [E] F H + /// .. / \ .... + /// [I] J + /// ``` + /// update(3, {new leaf}) would swap the leaf value at `[I]` and cause a recomputation of `[A]`, `[B]`, and `[E]`. + pub fn update(&mut self, index: usize, new_leaf: &L) -> Result<(), crate::Error> { + assert!(index < self.leaf_nodes.len(), "index out of range"); + let (updated_leaf_hash, mut updated_path) = self.updated_path(index, new_leaf)?; + self.leaf_nodes[index] = updated_leaf_hash; + let mut curr_index = convert_index_to_last_level(index, self.height); + for _ in 0..self.height - 1 { + curr_index = parent(curr_index).unwrap(); + self.non_leaf_nodes[curr_index] = updated_path.pop().unwrap(); + } + Ok(()) + } + + /// Update the leaf and check if the updated root is equal to `asserted_new_root`. + /// + /// Tree will not be modified if the check fails. + pub fn check_update( + &mut self, + index: usize, + new_leaf: &L, + asserted_new_root: &TwoToOneDigest

, + ) -> Result { + assert!(index < self.leaf_nodes.len(), "index out of range"); + let (updated_leaf_hash, mut updated_path) = self.updated_path(index, new_leaf)?; + if &updated_path[0] != asserted_new_root { + return Ok(false); + } + self.leaf_nodes[index] = updated_leaf_hash; + let mut curr_index = convert_index_to_last_level(index, self.height); + for _ in 0..self.height - 1 { + curr_index = parent(curr_index).unwrap(); + self.non_leaf_nodes[curr_index] = updated_path.pop().unwrap(); + } + Ok(true) + } +} + +/// Returns the height of the tree, given the number of leaves. +#[inline] +fn tree_height(num_leaves: usize) -> usize { + if num_leaves == 1 { + return 1; + } + + (ark_std::log2(num_leaves) as usize) + 1 +} +/// Returns true iff the index represents the root. +#[inline] +fn is_root(index: usize) -> bool { + index == 0 +} + +/// Returns the index of the left child, given an index. +#[inline] +fn left_child(index: usize) -> usize { + 2 * index + 1 +} + +/// Returns the index of the right child, given an index. +#[inline] +fn right_child(index: usize) -> usize { + 2 * index + 2 +} + +/// Returns the index of the sibling, given an index. +#[inline] +fn sibling(index: usize) -> Option { + if index == 0 { + None + } else if is_left_child(index) { + Some(index + 1) + } else { + Some(index - 1) + } +} + +/// Returns true iff the given index represents a left child. +#[inline] +fn is_left_child(index: usize) -> bool { + index % 2 == 1 +} + +/// Returns the index of the parent, given an index. +#[inline] +fn parent(index: usize) -> Option { + if index > 0 { + Some((index - 1) >> 1) + } else { + None + } +} + +#[inline] +fn convert_index_to_last_level(index: usize, tree_height: usize) -> usize { + index + (1 << (tree_height - 1)) - 1 +} + +#[cfg(test)] +mod tests { + use crate::{ + crh::{pedersen, *}, + merkle_tree::*, + }; + use ark_ed_on_bls12_381::EdwardsProjective as JubJub; + use ark_ff::{BigInteger256, ToBytes}; + use ark_std::{test_rng, UniformRand}; + + #[derive(Clone)] + pub(super) struct Window4x256; + impl pedersen::Window for Window4x256 { + const WINDOW_SIZE: usize = 4; + const NUM_WINDOWS: usize = 256; + } + + type H = pedersen::CRH; + + struct JubJubMerkleTreeParams; + + impl Config for JubJubMerkleTreeParams { + type LeafHash = H; + type TwoToOneHash = H; + } + type JubJubMerkleTree = MerkleTree; + + fn merkle_tree_test(leaves: &[L], update_query: &[(usize, L)]) -> () { + let mut rng = ark_std::test_rng(); + let mut leaves = leaves.to_vec(); + let leaf_crh_params = ::setup(&mut rng).unwrap(); + let two_to_one_crh_params = ::setup(&mut rng).unwrap(); + let mut tree = JubJubMerkleTree::new( + &leaf_crh_params.clone(), + &two_to_one_crh_params.clone(), + &leaves, + ) + .unwrap(); + let mut root = tree.root(); + // test merkle tree functionality without update + for (i, leaf) in leaves.iter().enumerate() { + let proof = tree.generate_proof(i).unwrap(); + assert!(proof + .verify(&leaf_crh_params, &two_to_one_crh_params, &root, &leaf) + .unwrap()); + } + + // test merkle tree update functionality + for (i, v) in update_query { + tree.update(*i, v); + leaves[*i] = v.clone(); + } + // update the root + root = tree.root(); + // verify again + for (i, leaf) in leaves.iter().enumerate() { + let proof = tree.generate_proof(i).unwrap(); + assert!(proof + .verify(&leaf_crh_params, &two_to_one_crh_params, &root, &leaf) + .unwrap()); + } + } + + #[test] + fn good_root_test() { + let mut rng = test_rng(); + + let mut leaves = Vec::new(); + for _ in 0..2u8 { + leaves.push(BigInteger256::rand(&mut rng)); + } + merkle_tree_test( + &leaves, + &vec![ + (0, BigInteger256::rand(&mut rng)), + (1, BigInteger256::rand(&mut rng)), + ], + ); + + let mut leaves = Vec::new(); + for _ in 0..4u8 { + leaves.push(BigInteger256::rand(&mut rng)); + } + merkle_tree_test(&leaves, &vec![(3, BigInteger256::rand(&mut rng))]); + + let mut leaves = Vec::new(); + for _ in 0..128u8 { + leaves.push(BigInteger256::rand(&mut rng)); + } + merkle_tree_test( + &leaves, + &vec![ + (2, BigInteger256::rand(&mut rng)), + (3, BigInteger256::rand(&mut rng)), + (5, BigInteger256::rand(&mut rng)), + (111, BigInteger256::rand(&mut rng)), + (127, BigInteger256::rand(&mut rng)), + ], + ); + } +} diff --git a/arkworks/crypto-primitives/src/prf/blake2s/constraints.rs b/arkworks/crypto-primitives/src/prf/blake2s/constraints.rs new file mode 100644 index 00000000..a7bd093a --- /dev/null +++ b/arkworks/crypto-primitives/src/prf/blake2s/constraints.rs @@ -0,0 +1,537 @@ +use ark_ff::PrimeField; +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; + +use crate::{prf::PRFGadget, Vec}; +use ark_r1cs_std::prelude::*; + +use core::borrow::Borrow; + +// 2.1. Parameters +// The following table summarizes various parameters and their ranges: +// | BLAKE2b | BLAKE2s | +// --------------+------------------+------------------+ +// Bits in word | w = 64 | w = 32 | +// Rounds in F | r = 12 | r = 10 | +// Block bytes | bb = 128 | bb = 64 | +// Hash bytes | 1 <= nn <= 64 | 1 <= nn <= 32 | +// Key bytes | 0 <= kk <= 64 | 0 <= kk <= 32 | +// Input bytes | 0 <= ll < 2**128 | 0 <= ll < 2**64 | +// --------------+------------------+------------------+ +// G Rotation | (R1, R2, R3, R4) | (R1, R2, R3, R4) | +// constants = | (32, 24, 16, 63) | (16, 12, 8, 7) | +// --------------+------------------+------------------+ +// + +const R1: usize = 16; +const R2: usize = 12; +const R3: usize = 8; +const R4: usize = 7; + +// Round | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 | +// ----------+-------------------------------------------------+ +// SIGMA[0] | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 | +// SIGMA[1] | 14 10 4 8 9 15 13 6 1 12 0 2 11 7 5 3 | +// SIGMA[2] | 11 8 12 0 5 2 15 13 10 14 3 6 7 1 9 4 | +// SIGMA[3] | 7 9 3 1 13 12 11 14 2 6 5 10 4 0 15 8 | +// SIGMA[4] | 9 0 5 7 2 4 10 15 14 1 11 12 6 8 3 13 | +// SIGMA[5] | 2 12 6 10 0 11 8 3 4 13 7 5 15 14 1 9 | +// SIGMA[6] | 12 5 1 15 14 13 4 10 0 7 6 3 9 2 8 11 | +// SIGMA[7] | 13 11 7 14 12 1 3 9 5 0 15 4 8 6 2 10 | +// SIGMA[8] | 6 15 14 9 11 3 0 8 12 2 13 7 1 4 10 5 | +// SIGMA[9] | 10 2 8 4 7 6 1 5 15 11 9 14 3 12 13 0 | +// ----------+-------------------------------------------------+ +// + +const SIGMA: [[usize; 16]; 10] = [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3], + [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4], + [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8], + [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13], + [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9], + [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11], + [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10], + [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5], + [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0], +]; + +// 3.1. Mixing Function G +// The G primitive function mixes two input words, "x" and "y", into +// four words indexed by "a", "b", "c", and "d" in the working vector +// v[0..15]. The full modified vector is returned. The rotation +// constants (R1, R2, R3, R4) are given in Section 2.1. +// FUNCTION G( v[0..15], a, b, c, d, x, y ) +// | +// | v[a] := (v[a] + v[b] + x) mod 2**w +// | v[d] := (v[d] ^ v[a]) >>> R1 +// | v[c] := (v[c] + v[d]) mod 2**w +// | v[b] := (v[b] ^ v[c]) >>> R2 +// | v[a] := (v[a] + v[b] + y) mod 2**w +// | v[d] := (v[d] ^ v[a]) >>> R3 +// | v[c] := (v[c] + v[d]) mod 2**w +// | v[b] := (v[b] ^ v[c]) >>> R4 +// | +// | RETURN v[0..15] +// | +// END FUNCTION. +// + +fn mixing_g( + v: &mut [UInt32], + a: usize, + b: usize, + c: usize, + d: usize, + x: &UInt32, + y: &UInt32, +) -> Result<(), SynthesisError> { + v[a] = UInt32::addmany(&[v[a].clone(), v[b].clone(), x.clone()])?; + v[d] = v[d].xor(&v[a])?.rotr(R1); + v[c] = UInt32::addmany(&[v[c].clone(), v[d].clone()])?; + v[b] = v[b].xor(&v[c])?.rotr(R2); + v[a] = UInt32::addmany(&[v[a].clone(), v[b].clone(), y.clone()])?; + v[d] = v[d].xor(&v[a])?.rotr(R3); + v[c] = UInt32::addmany(&[v[c].clone(), v[d].clone()])?; + v[b] = v[b].xor(&v[c])?.rotr(R4); + + Ok(()) +} + +// 3.2. Compression Function F +// Compression function F takes as an argument the state vector "h", +// message block vector "m" (last block is padded with zeros to full +// block size, if required), 2w-bit offset counter "t", and final block +// indicator flag "f". Local vector v[0..15] is used in processing. F +// returns a new state vector. The number of rounds, "r", is 12 for +// BLAKE2b and 10 for BLAKE2s. Rounds are numbered from 0 to r - 1. +// FUNCTION F( h[0..7], m[0..15], t, f ) +// | +// | // Initialize local work vector v[0..15] +// | v[0..7] := h[0..7] // First half from state. +// | v[8..15] := IV[0..7] // Second half from IV. +// | +// | v[12] := v[12] ^ (t mod 2**w) // Low word of the offset. +// | v[13] := v[13] ^ (t >> w) // High word. +// | +// | IF f = TRUE THEN // last block flag? +// | | v[14] := v[14] ^ 0xFF..FF // Invert all bits. +// | END IF. +// | +// | // Cryptographic mixing +// | FOR i = 0 TO r - 1 DO // Ten or twelve rounds. +// | | +// | | // Message word selection permutation for this round. +// | | s[0..15] := SIGMA[i mod 10][0..15] +// | | +// | | v := G( v, 0, 4, 8, 12, m[s[ 0]], m[s[ 1]] ) +// | | v := G( v, 1, 5, 9, 13, m[s[ 2]], m[s[ 3]] ) +// | | v := G( v, 2, 6, 10, 14, m[s[ 4]], m[s[ 5]] ) +// | | v := G( v, 3, 7, 11, 15, m[s[ 6]], m[s[ 7]] ) +// | | +// | | v := G( v, 0, 5, 10, 15, m[s[ 8]], m[s[ 9]] ) +// | | v := G( v, 1, 6, 11, 12, m[s[10]], m[s[11]] ) +// | | v := G( v, 2, 7, 8, 13, m[s[12]], m[s[13]] ) +// | | v := G( v, 3, 4, 9, 14, m[s[14]], m[s[15]] ) +// | | +// | END FOR +// | +// | FOR i = 0 TO 7 DO // XOR the two halves. +// | | h[i] := h[i] ^ v[i] ^ v[i + 8] +// | END FOR. +// | +// | RETURN h[0..7] // New state. +// | +// END FUNCTION. +// + +fn blake2s_compression( + h: &mut [UInt32], + m: &[UInt32], + t: u64, + f: bool, +) -> Result<(), SynthesisError> { + assert_eq!(h.len(), 8); + assert_eq!(m.len(), 16); + + // static const uint32_t blake2s_iv[8] = + // { + // 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, + // 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 + // }; + // + + let mut v = Vec::with_capacity(16); + v.extend_from_slice(h); + v.push(UInt32::constant(0x6A09E667)); + v.push(UInt32::constant(0xBB67AE85)); + v.push(UInt32::constant(0x3C6EF372)); + v.push(UInt32::constant(0xA54FF53A)); + v.push(UInt32::constant(0x510E527F)); + v.push(UInt32::constant(0x9B05688C)); + v.push(UInt32::constant(0x1F83D9AB)); + v.push(UInt32::constant(0x5BE0CD19)); + + assert_eq!(v.len(), 16); + + v[12] = v[12].xor(&UInt32::constant(t as u32))?; + v[13] = v[13].xor(&UInt32::constant((t >> 32) as u32))?; + + if f { + v[14] = v[14].xor(&UInt32::constant(u32::max_value()))?; + } + + for i in 0..10 { + let s = SIGMA[i % 10]; + + mixing_g(&mut v, 0, 4, 8, 12, &m[s[0]], &m[s[1]])?; + mixing_g(&mut v, 1, 5, 9, 13, &m[s[2]], &m[s[3]])?; + mixing_g(&mut v, 2, 6, 10, 14, &m[s[4]], &m[s[5]])?; + mixing_g(&mut v, 3, 7, 11, 15, &m[s[6]], &m[s[7]])?; + mixing_g(&mut v, 0, 5, 10, 15, &m[s[8]], &m[s[9]])?; + mixing_g(&mut v, 1, 6, 11, 12, &m[s[10]], &m[s[11]])?; + mixing_g(&mut v, 2, 7, 8, 13, &m[s[12]], &m[s[13]])?; + mixing_g(&mut v, 3, 4, 9, 14, &m[s[14]], &m[s[15]])?; + } + + for i in 0..8 { + h[i] = h[i].xor(&v[i])?; + h[i] = h[i].xor(&v[i + 8])?; + } + + Ok(()) +} + +// FUNCTION BLAKE2( d[0..dd-1], ll, kk, nn ) +// | +// | h[0..7] := IV[0..7] // Initialization Vector. +// | +// | // Parameter block p[0] +// | h[0] := h[0] ^ 0x01010000 ^ (kk << 8) ^ nn +// | +// | // Process padded key and data blocks +// | IF dd > 1 THEN +// | | FOR i = 0 TO dd - 2 DO +// | | | h := F( h, d[i], (i + 1) * bb, FALSE ) +// | | END FOR. +// | END IF. +// | +// | // Final block. +// | IF kk = 0 THEN +// | | h := F( h, d[dd - 1], ll, TRUE ) +// | ELSE +// | | h := F( h, d[dd - 1], ll + bb, TRUE ) +// | END IF. +// | +// | RETURN first "nn" bytes from little-endian word array h[]. +// | +// END FUNCTION. +// + +pub fn evaluate_blake2s( + input: &[Boolean], +) -> Result>, SynthesisError> { + assert!(input.len() % 8 == 0); + let mut parameters = [0; 8]; + parameters[0] = 0x01010000 ^ 32; + evaluate_blake2s_with_parameters(input, ¶meters) +} + +pub fn evaluate_blake2s_with_parameters( + input: &[Boolean], + parameters: &[u32; 8], +) -> Result>, SynthesisError> { + assert!(input.len() % 8 == 0); + + let mut h = Vec::with_capacity(8); + h.push(UInt32::constant(0x6A09E667).xor(&UInt32::constant(parameters[0]))?); + h.push(UInt32::constant(0xBB67AE85).xor(&UInt32::constant(parameters[1]))?); + h.push(UInt32::constant(0x3C6EF372).xor(&UInt32::constant(parameters[2]))?); + h.push(UInt32::constant(0xA54FF53A).xor(&UInt32::constant(parameters[3]))?); + h.push(UInt32::constant(0x510E527F).xor(&UInt32::constant(parameters[4]))?); + h.push(UInt32::constant(0x9B05688C).xor(&UInt32::constant(parameters[5]))?); + h.push(UInt32::constant(0x1F83D9AB).xor(&UInt32::constant(parameters[6]))?); + h.push(UInt32::constant(0x5BE0CD19).xor(&UInt32::constant(parameters[7]))?); + + let mut blocks: Vec>> = vec![]; + + for block in input.chunks(512) { + let mut this_block = Vec::with_capacity(16); + for word in block.chunks(32) { + let mut tmp = word.to_vec(); + while tmp.len() < 32 { + tmp.push(Boolean::constant(false)); + } + this_block.push(UInt32::from_bits_le(&tmp)); + } + while this_block.len() < 16 { + this_block.push(UInt32::constant(0)); + } + blocks.push(this_block); + } + + if blocks.is_empty() { + blocks.push((0..16).map(|_| UInt32::constant(0)).collect()); + } + + for (i, block) in blocks[0..blocks.len() - 1].iter().enumerate() { + blake2s_compression(&mut h, block, ((i as u64) + 1) * 64, false)?; + } + + blake2s_compression( + &mut h, + &blocks[blocks.len() - 1], + (input.len() / 8) as u64, + true, + )?; + + Ok(h) +} + +use crate::prf::Blake2s; + +pub struct Blake2sGadget; +#[derive(Clone, Debug)] +pub struct OutputVar(pub Vec>); + +impl EqGadget for OutputVar { + #[tracing::instrument(target = "r1cs")] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + self.0.is_eq(&other.0) + } + + /// If `should_enforce == true`, enforce that `self` and `other` are equal; + /// else, enforce a vacuously true statement. + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + self.0.conditional_enforce_equal(&other.0, should_enforce) + } + + /// If `should_enforce == true`, enforce that `self` and `other` are not + /// equal; else, enforce a vacuously true statement. + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + self.0 + .as_slice() + .conditional_enforce_not_equal(other.0.as_slice(), should_enforce) + } +} + +impl ToBytesGadget for OutputVar { + #[inline] + fn to_bytes(&self) -> Result>, SynthesisError> { + Ok(self.0.clone()) + } +} + +impl AllocVar<[u8; 32], ConstraintF> for OutputVar { + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let bytes = f().map(|b| *b.borrow()).unwrap_or([0u8; 32]); + match mode { + AllocationMode::Constant => Ok(Self(UInt8::constant_vec(&bytes))), + AllocationMode::Input => UInt8::new_input_vec(cs, &bytes).map(Self), + AllocationMode::Witness => UInt8::new_witness_vec(cs, &bytes).map(Self), + } + } +} + +impl R1CSVar for OutputVar { + type Value = [u8; 32]; + + fn cs(&self) -> ConstraintSystemRef { + self.0.cs() + } + + fn value(&self) -> Result { + let mut value = [0u8; 32]; + for (val_i, self_i) in value.iter_mut().zip(&self.0) { + *val_i = self_i.value()?; + } + Ok(value) + } +} + +impl PRFGadget for Blake2sGadget { + type OutputVar = OutputVar; + + #[tracing::instrument(target = "r1cs", skip(cs))] + fn new_seed(cs: impl Into>, seed: &[u8; 32]) -> Vec> { + let ns = cs.into(); + let cs = ns.cs(); + UInt8::new_witness_vec(ark_relations::ns!(cs, "New Blake2s seed"), seed).unwrap() + } + + #[tracing::instrument(target = "r1cs", skip(seed, input))] + fn evaluate(seed: &[UInt8], input: &[UInt8]) -> Result { + assert_eq!(seed.len(), 32); + let input: Vec<_> = seed + .iter() + .chain(input) + .flat_map(|b| b.to_bits_le().unwrap()) + .collect(); + let result: Vec<_> = evaluate_blake2s(&input)? + .into_iter() + .flat_map(|int| int.to_bytes().unwrap()) + .collect(); + Ok(OutputVar(result)) + } +} + +#[cfg(test)] +mod test { + use ark_ed_on_bls12_381::Fq as Fr; + use ark_std::rand::Rng; + + use crate::prf::blake2s::{constraints::evaluate_blake2s, Blake2s as B2SPRF}; + use ark_relations::r1cs::ConstraintSystem; + use blake2::VarBlake2s; + + use super::Blake2sGadget; + use ark_r1cs_std::prelude::*; + + #[test] + fn test_blake2s_constraints() { + let cs = ConstraintSystem::::new_ref(); + let input_bits: Vec<_> = (0..512) + .map(|_| { + Boolean::new_witness(ark_relations::ns!(cs, "input bit"), || Ok(true)).unwrap() + }) + .collect(); + evaluate_blake2s(&input_bits).unwrap(); + assert!(cs.is_satisfied().unwrap()); + assert_eq!(cs.num_constraints(), 21792); + } + + #[test] + fn test_blake2s_prf() { + use crate::prf::{PRFGadget, PRF}; + + let mut rng = ark_std::test_rng(); + let cs = ConstraintSystem::::new_ref(); + + let mut seed = [0u8; 32]; + rng.fill(&mut seed); + + let mut input = [0u8; 32]; + rng.fill(&mut input); + + let seed_var = Blake2sGadget::new_seed(cs.clone(), &seed); + let input_var = + UInt8::new_witness_vec(ark_relations::ns!(cs, "declare_input"), &input).unwrap(); + let out = B2SPRF::evaluate(&seed, &input).unwrap(); + let actual_out_var = >::OutputVar::new_witness( + ark_relations::ns!(cs, "declare_output"), + || Ok(out), + ) + .unwrap(); + + let output_var = Blake2sGadget::evaluate(&seed_var, &input_var).unwrap(); + output_var.enforce_equal(&actual_out_var).unwrap(); + + if !cs.is_satisfied().unwrap() { + println!( + "which is unsatisfied: {:?}", + cs.which_is_unsatisfied().unwrap() + ); + } + assert!(cs.is_satisfied().unwrap()); + } + + #[test] + fn test_blake2s_precomp_constraints() { + // Test that 512 fixed leading bits (constants) + // doesn't result in more constraints. + + let cs = ConstraintSystem::::new_ref(); + let mut rng = ark_std::test_rng(); + let input_bits: Vec<_> = (0..512) + .map(|_| Boolean::constant(rng.gen())) + .chain((0..512).map(|_| { + Boolean::new_witness(ark_relations::ns!(cs, "input bit"), || Ok(true)).unwrap() + })) + .collect(); + evaluate_blake2s(&input_bits).unwrap(); + assert!(cs.is_satisfied().unwrap()); + assert_eq!(cs.num_constraints(), 21792); + } + + #[test] + fn test_blake2s_constant_constraints() { + let cs = ConstraintSystem::::new_ref(); + let mut rng = ark_std::test_rng(); + let input_bits: Vec<_> = (0..512) + .map(|_| Boolean::::constant(rng.gen())) + .collect(); + evaluate_blake2s(&input_bits).unwrap(); + assert_eq!(cs.num_constraints(), 0); + } + + #[test] + fn test_blake2s() { + let mut rng = ark_std::test_rng(); + + for input_len in (0..32).chain((32..256).filter(|a| a % 8 == 0)) { + use digest::*; + let mut h = VarBlake2s::new_keyed(&[], 32); + + let data: Vec = (0..input_len).map(|_| rng.gen()).collect(); + + h.update(&data); + + let mut hash_result = Vec::with_capacity(digest::VariableOutput::output_size(&h)); + h.finalize_variable(|res| hash_result.extend_from_slice(res)); + + let cs = ConstraintSystem::::new_ref(); + + let mut input_bits = vec![]; + + for input_byte in data.into_iter() { + for bit_i in 0..8 { + let cs = ark_relations::ns!(cs, "input bit"); + + input_bits.push( + Boolean::new_witness(cs, || Ok((input_byte >> bit_i) & 1u8 == 1u8)) + .unwrap(), + ); + } + } + + let r = evaluate_blake2s(&input_bits).unwrap(); + + assert!(cs.is_satisfied().unwrap()); + + let mut s = hash_result + .iter() + .flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8)); + + for chunk in r { + for b in chunk.to_bits_le() { + match b { + Boolean::Is(b) => { + assert!(s.next().unwrap() == b.value().unwrap()); + } + Boolean::Not(b) => { + assert!(s.next().unwrap() != b.value().unwrap()); + } + Boolean::Constant(b) => { + assert!(input_len == 0); + assert!(s.next().unwrap() == b); + } + } + } + } + } + } +} diff --git a/arkworks/crypto-primitives/src/prf/blake2s/mod.rs b/arkworks/crypto-primitives/src/prf/blake2s/mod.rs new file mode 100644 index 00000000..085c3a22 --- /dev/null +++ b/arkworks/crypto-primitives/src/prf/blake2s/mod.rs @@ -0,0 +1,88 @@ +use crate::Vec; +use ark_std::convert::TryFrom; +use blake2::{Blake2s as B2s, VarBlake2s}; +use digest::Digest; + +use super::PRF; +use crate::CryptoError; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +#[derive(Clone)] +pub struct Blake2s; + +impl PRF for Blake2s { + type Input = [u8; 32]; + type Output = [u8; 32]; + type Seed = [u8; 32]; + + fn evaluate(seed: &Self::Seed, input: &Self::Input) -> Result { + let eval_time = start_timer!(|| "Blake2s::Eval"); + let mut h = B2s::new(); + h.update(seed.as_ref()); + h.update(input.as_ref()); + let mut result = [0u8; 32]; + result.copy_from_slice(&h.finalize()); + end_timer!(eval_time); + Ok(result) + } +} + +#[derive(Clone)] +pub struct Blake2sWithParameterBlock { + pub digest_length: u8, + pub key_length: u8, + pub fan_out: u8, + pub depth: u8, + pub leaf_length: u32, + pub node_offset: u32, + pub xof_digest_length: u16, + pub node_depth: u8, + pub inner_length: u8, + pub salt: [u8; 8], + pub personalization: [u8; 8], +} + +impl Blake2sWithParameterBlock { + pub fn parameters(&self) -> [u32; 8] { + let mut parameters = [0; 8]; + parameters[0] = u32::from_le_bytes([ + self.digest_length, + self.key_length, + self.fan_out, + self.depth, + ]); + parameters[1] = self.leaf_length; + parameters[2] = self.node_offset; + parameters[3] = u32::from_le_bytes([ + self.xof_digest_length as u8, + (self.xof_digest_length >> 8) as u8, + self.node_depth, + self.inner_length, + ]); + + let salt_bytes_1 = <[u8; 4]>::try_from(&self.salt[0..4]).unwrap(); + let salt_bytes_2 = <[u8; 4]>::try_from(&self.salt[4..8]).unwrap(); + let personalization_bytes_1 = <[u8; 4]>::try_from(&self.personalization[0..4]).unwrap(); + let personalization_bytes_2 = <[u8; 4]>::try_from(&self.personalization[4..8]).unwrap(); + + parameters[4] = u32::from_le_bytes(salt_bytes_1); + parameters[5] = u32::from_le_bytes(salt_bytes_2); + parameters[6] = u32::from_le_bytes(personalization_bytes_1); + parameters[7] = u32::from_le_bytes(personalization_bytes_2); + + parameters + } + + pub fn evaluate(&self, input: &[u8]) -> Vec { + use digest::*; + let eval_time = start_timer!(|| "Blake2sWithParameterBlock::Eval"); + let mut h = VarBlake2s::with_parameter_block(&self.parameters()); + h.update(input.as_ref()); + end_timer!(eval_time); + let mut buf = Vec::with_capacity(digest::VariableOutput::output_size(&h)); + h.finalize_variable(|res| buf.extend_from_slice(res)); + buf + } +} diff --git a/arkworks/crypto-primitives/src/prf/constraints.rs b/arkworks/crypto-primitives/src/prf/constraints.rs new file mode 100644 index 00000000..bbca88c7 --- /dev/null +++ b/arkworks/crypto-primitives/src/prf/constraints.rs @@ -0,0 +1,20 @@ +use ark_ff::Field; +use core::fmt::Debug; + +use crate::{prf::PRF, Vec}; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use ark_r1cs_std::prelude::*; + +pub trait PRFGadget { + type OutputVar: EqGadget + + ToBytesGadget + + AllocVar + + R1CSVar + + Clone + + Debug; + + fn new_seed(cs: impl Into>, seed: &P::Seed) -> Vec>; + + fn evaluate(seed: &[UInt8], input: &[UInt8]) -> Result; +} diff --git a/arkworks/crypto-primitives/src/prf/mod.rs b/arkworks/crypto-primitives/src/prf/mod.rs new file mode 100644 index 00000000..261bae15 --- /dev/null +++ b/arkworks/crypto-primitives/src/prf/mod.rs @@ -0,0 +1,22 @@ +#![allow(clippy::upper_case_acronyms)] + +use ark_ff::bytes::{FromBytes, ToBytes}; +use core::{fmt::Debug, hash::Hash}; + +use crate::CryptoError; + +#[cfg(feature = "r1cs")] +pub mod constraints; +#[cfg(feature = "r1cs")] +pub use constraints::*; + +pub mod blake2s; +pub use self::blake2s::*; + +pub trait PRF { + type Input: FromBytes + Default; + type Output: ToBytes + Eq + Clone + Debug + Default + Hash; + type Seed: FromBytes + ToBytes + Clone + Default + Debug; + + fn evaluate(seed: &Self::Seed, input: &Self::Input) -> Result; +} diff --git a/arkworks/crypto-primitives/src/signature/constraints.rs b/arkworks/crypto-primitives/src/signature/constraints.rs new file mode 100644 index 00000000..a669c0ad --- /dev/null +++ b/arkworks/crypto-primitives/src/signature/constraints.rs @@ -0,0 +1,36 @@ +use ark_ff::Field; +use ark_r1cs_std::prelude::*; +use ark_relations::r1cs::SynthesisError; + +use crate::signature::SignatureScheme; + +pub trait SigVerifyGadget { + type ParametersVar: AllocVar + Clone; + + type PublicKeyVar: ToBytesGadget + AllocVar + Clone; + + type SignatureVar: ToBytesGadget + AllocVar + Clone; + + fn verify( + parameters: &Self::ParametersVar, + public_key: &Self::PublicKeyVar, + // TODO: Should we make this take in bytes or something different? + message: &[UInt8], + signature: &Self::SignatureVar, + ) -> Result, SynthesisError>; +} + +pub trait SigRandomizePkGadget { + type ParametersVar: AllocVar + Clone; + + type PublicKeyVar: ToBytesGadget + + EqGadget + + AllocVar + + Clone; + + fn randomize( + parameters: &Self::ParametersVar, + public_key: &Self::PublicKeyVar, + randomness: &[UInt8], + ) -> Result; +} diff --git a/arkworks/crypto-primitives/src/signature/mod.rs b/arkworks/crypto-primitives/src/signature/mod.rs new file mode 100644 index 00000000..0b3babc9 --- /dev/null +++ b/arkworks/crypto-primitives/src/signature/mod.rs @@ -0,0 +1,104 @@ +use crate::Error; +use ark_ff::bytes::ToBytes; +use ark_std::hash::Hash; +use ark_std::rand::Rng; + +#[cfg(feature = "r1cs")] +pub mod constraints; +#[cfg(feature = "r1cs")] +pub use constraints::*; + +pub mod schnorr; + +pub trait SignatureScheme { + type Parameters: Clone + Send + Sync; + type PublicKey: ToBytes + Hash + Eq + Clone + Default + Send + Sync; + type SecretKey: ToBytes + Clone + Default; + type Signature: Clone + Default + Send + Sync; + + fn setup(rng: &mut R) -> Result; + + fn keygen( + pp: &Self::Parameters, + rng: &mut R, + ) -> Result<(Self::PublicKey, Self::SecretKey), Error>; + + fn sign( + pp: &Self::Parameters, + sk: &Self::SecretKey, + message: &[u8], + rng: &mut R, + ) -> Result; + + fn verify( + pp: &Self::Parameters, + pk: &Self::PublicKey, + message: &[u8], + signature: &Self::Signature, + ) -> Result; + + fn randomize_public_key( + pp: &Self::Parameters, + public_key: &Self::PublicKey, + randomness: &[u8], + ) -> Result; + + fn randomize_signature( + pp: &Self::Parameters, + signature: &Self::Signature, + randomness: &[u8], + ) -> Result; +} + +#[cfg(test)] +mod test { + use crate::signature::{schnorr, *}; + use ark_ec::group::Group; + use ark_ed_on_bls12_381::EdwardsProjective as JubJub; + use ark_ff::to_bytes; + use ark_std::{test_rng, UniformRand}; + use blake2::Blake2s; + + fn sign_and_verify(message: &[u8]) { + let rng = &mut test_rng(); + let parameters = S::setup::<_>(rng).unwrap(); + let (pk, sk) = S::keygen(¶meters, rng).unwrap(); + let sig = S::sign(¶meters, &sk, &message, rng).unwrap(); + assert!(S::verify(¶meters, &pk, &message, &sig).unwrap()); + } + + fn failed_verification(message: &[u8], bad_message: &[u8]) { + let rng = &mut test_rng(); + let parameters = S::setup::<_>(rng).unwrap(); + let (pk, sk) = S::keygen(¶meters, rng).unwrap(); + let sig = S::sign(¶meters, &sk, message, rng).unwrap(); + assert!(!S::verify(¶meters, &pk, bad_message, &sig).unwrap()); + } + + fn randomize_and_verify(message: &[u8], randomness: &[u8]) { + let rng = &mut test_rng(); + let parameters = S::setup::<_>(rng).unwrap(); + let (pk, sk) = S::keygen(¶meters, rng).unwrap(); + let sig = S::sign(¶meters, &sk, message, rng).unwrap(); + assert!(S::verify(¶meters, &pk, message, &sig).unwrap()); + let randomized_pk = S::randomize_public_key(¶meters, &pk, randomness).unwrap(); + let randomized_sig = S::randomize_signature(¶meters, &sig, randomness).unwrap(); + assert!(S::verify(¶meters, &randomized_pk, &message, &randomized_sig).unwrap()); + } + + #[test] + fn schnorr_signature_test() { + let message = "Hi, I am a Schnorr signature!"; + let rng = &mut test_rng(); + sign_and_verify::>(message.as_bytes()); + failed_verification::>( + message.as_bytes(), + "Bad message".as_bytes(), + ); + let random_scalar = to_bytes!(::ScalarField::rand(rng)).unwrap(); + randomize_and_verify::>( + message.as_bytes(), + &random_scalar.as_slice(), + ); + } +} diff --git a/arkworks/crypto-primitives/src/signature/schnorr/constraints.rs b/arkworks/crypto-primitives/src/signature/schnorr/constraints.rs new file mode 100644 index 00000000..0e86c2af --- /dev/null +++ b/arkworks/crypto-primitives/src/signature/schnorr/constraints.rs @@ -0,0 +1,159 @@ +use crate::Vec; +use ark_ec::ProjectiveCurve; +use ark_ff::Field; +use ark_r1cs_std::prelude::*; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use crate::signature::SigRandomizePkGadget; + +use core::{borrow::Borrow, marker::PhantomData}; + +use crate::signature::schnorr::{Parameters, PublicKey, Schnorr}; +use digest::Digest; + +type ConstraintF = <::BaseField as Field>::BasePrimeField; + +#[derive(Clone)] +pub struct ParametersVar>> +where + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + generator: GC, + _curve: PhantomData, +} + +#[derive(Derivative)] +#[derivative( + Debug(bound = "C: ProjectiveCurve, GC: CurveVar>"), + Clone(bound = "C: ProjectiveCurve, GC: CurveVar>") +)] +pub struct PublicKeyVar>> +where + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + pub_key: GC, + #[doc(hidden)] + _group: PhantomData<*const C>, +} + +pub struct SchnorrRandomizePkGadget>> +where + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + #[doc(hidden)] + _group: PhantomData<*const C>, + #[doc(hidden)] + _group_gadget: PhantomData<*const GC>, +} + +impl SigRandomizePkGadget, ConstraintF> + for SchnorrRandomizePkGadget +where + C: ProjectiveCurve, + GC: CurveVar>, + D: Digest + Send + Sync, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + type ParametersVar = ParametersVar; + type PublicKeyVar = PublicKeyVar; + + #[tracing::instrument(target = "r1cs", skip(parameters, public_key, randomness))] + fn randomize( + parameters: &Self::ParametersVar, + public_key: &Self::PublicKeyVar, + randomness: &[UInt8>], + ) -> Result { + let base = parameters.generator.clone(); + let randomness = randomness + .iter() + .flat_map(|b| b.to_bits_le().unwrap()) + .collect::>(); + let rand_pk = &public_key.pub_key + &base.scalar_mul_le(randomness.iter())?; + Ok(PublicKeyVar { + pub_key: rand_pk, + _group: PhantomData, + }) + } +} + +impl AllocVar, ConstraintF> for ParametersVar +where + C: ProjectiveCurve, + GC: CurveVar>, + D: Digest, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let generator = GC::new_variable(cs, || f().map(|g| g.borrow().generator), mode)?; + Ok(Self { + generator, + _curve: PhantomData, + }) + } +} + +impl AllocVar, ConstraintF> for PublicKeyVar +where + C: ProjectiveCurve, + GC: CurveVar>, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + fn new_variable>>( + cs: impl Into>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let pub_key = GC::new_variable(cs, f, mode)?; + Ok(Self { + pub_key, + _group: PhantomData, + }) + } +} + +impl EqGadget> for PublicKeyVar +where + C: ProjectiveCurve, + GC: CurveVar>, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + #[inline] + fn is_eq(&self, other: &Self) -> Result>, SynthesisError> { + self.pub_key.is_eq(&other.pub_key) + } + + #[inline] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean>, + ) -> Result<(), SynthesisError> { + self.pub_key + .conditional_enforce_equal(&other.pub_key, condition) + } + + #[inline] + fn conditional_enforce_not_equal( + &self, + other: &Self, + condition: &Boolean>, + ) -> Result<(), SynthesisError> { + self.pub_key + .conditional_enforce_not_equal(&other.pub_key, condition) + } +} + +impl ToBytesGadget> for PublicKeyVar +where + C: ProjectiveCurve, + GC: CurveVar>, + for<'a> &'a GC: GroupOpsBounds<'a, C, GC>, +{ + fn to_bytes(&self) -> Result>>, SynthesisError> { + self.pub_key.to_bytes() + } +} diff --git a/arkworks/crypto-primitives/src/signature/schnorr/mod.rs b/arkworks/crypto-primitives/src/signature/schnorr/mod.rs new file mode 100644 index 00000000..62104cf3 --- /dev/null +++ b/arkworks/crypto-primitives/src/signature/schnorr/mod.rs @@ -0,0 +1,233 @@ +use crate::{Error, SignatureScheme, Vec}; +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ff::{ + bytes::ToBytes, + fields::{Field, PrimeField}, + to_bytes, One, ToConstraintField, UniformRand, Zero, +}; +use ark_std::io::{Result as IoResult, Write}; +use ark_std::rand::Rng; +use ark_std::{hash::Hash, marker::PhantomData}; +use digest::Digest; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +pub struct Schnorr { + _group: PhantomData, + _hash: PhantomData, +} + +#[derive(Derivative)] +#[derivative(Clone(bound = "C: ProjectiveCurve, H: Digest"), Debug)] +pub struct Parameters { + _hash: PhantomData, + pub generator: C::Affine, + pub salt: [u8; 32], +} + +pub type PublicKey = ::Affine; + +#[derive(Clone, Default, Debug)] +pub struct SecretKey(pub C::ScalarField); + +impl ToBytes for SecretKey { + #[inline] + fn write(&self, writer: W) -> IoResult<()> { + self.0.write(writer) + } +} + +#[derive(Clone, Default, Debug)] +pub struct Signature { + pub prover_response: C::ScalarField, + pub verifier_challenge: C::ScalarField, +} + +impl SignatureScheme for Schnorr +where + C::ScalarField: PrimeField, +{ + type Parameters = Parameters; + type PublicKey = PublicKey; + type SecretKey = SecretKey; + type Signature = Signature; + + fn setup(rng: &mut R) -> Result { + let setup_time = start_timer!(|| "SchnorrSig::Setup"); + + let mut salt = [0u8; 32]; + rng.fill_bytes(&mut salt); + let generator = C::rand(rng).into(); + + end_timer!(setup_time); + Ok(Parameters { + _hash: PhantomData, + generator, + salt, + }) + } + + fn keygen( + parameters: &Self::Parameters, + rng: &mut R, + ) -> Result<(Self::PublicKey, Self::SecretKey), Error> { + let keygen_time = start_timer!(|| "SchnorrSig::KeyGen"); + + let secret_key = C::ScalarField::rand(rng); + let public_key = parameters.generator.mul(secret_key.into_repr()).into(); + + end_timer!(keygen_time); + Ok((public_key, SecretKey(secret_key))) + } + + fn sign( + parameters: &Self::Parameters, + sk: &Self::SecretKey, + message: &[u8], + rng: &mut R, + ) -> Result { + let sign_time = start_timer!(|| "SchnorrSig::Sign"); + // (k, e); + let (random_scalar, verifier_challenge) = loop { + // Sample a random scalar `k` from the prime scalar field. + let random_scalar: C::ScalarField = C::ScalarField::rand(rng); + // Commit to the random scalar via r := k · G. + // This is the prover's first msg in the Sigma protocol. + let prover_commitment = parameters + .generator + .mul(random_scalar.into_repr()) + .into_affine(); + + // Hash everything to get verifier challenge. + let mut hash_input = Vec::new(); + hash_input.extend_from_slice(¶meters.salt); + hash_input.extend_from_slice(&to_bytes![prover_commitment]?); + hash_input.extend_from_slice(message); + + // Compute the supposed verifier response: e := H(salt || r || msg); + if let Some(verifier_challenge) = + C::ScalarField::from_random_bytes(&D::digest(&hash_input)) + { + break (random_scalar, verifier_challenge); + }; + }; + + // k - xe; + let prover_response = random_scalar - (verifier_challenge * sk.0); + let signature = Signature { + prover_response, + verifier_challenge, + }; + + end_timer!(sign_time); + Ok(signature) + } + + fn verify( + parameters: &Self::Parameters, + pk: &Self::PublicKey, + message: &[u8], + signature: &Self::Signature, + ) -> Result { + let verify_time = start_timer!(|| "SchnorrSig::Verify"); + + let Signature { + prover_response, + verifier_challenge, + } = signature; + let mut claimed_prover_commitment = parameters.generator.mul(prover_response.into_repr()); + let public_key_times_verifier_challenge = pk.mul(verifier_challenge.into_repr()); + claimed_prover_commitment += &public_key_times_verifier_challenge; + let claimed_prover_commitment = claimed_prover_commitment.into_affine(); + + let mut hash_input = Vec::new(); + hash_input.extend_from_slice(¶meters.salt); + hash_input.extend_from_slice(&to_bytes![claimed_prover_commitment]?); + hash_input.extend_from_slice(&message); + + let obtained_verifier_challenge = if let Some(obtained_verifier_challenge) = + C::ScalarField::from_random_bytes(&D::digest(&hash_input)) + { + obtained_verifier_challenge + } else { + return Ok(false); + }; + end_timer!(verify_time); + Ok(verifier_challenge == &obtained_verifier_challenge) + } + + fn randomize_public_key( + parameters: &Self::Parameters, + public_key: &Self::PublicKey, + randomness: &[u8], + ) -> Result { + let rand_pk_time = start_timer!(|| "SchnorrSig::RandomizePubKey"); + + let randomized_pk = *public_key; + let base = parameters.generator; + let mut encoded = C::zero(); + for bit in bytes_to_bits(randomness) + .into_iter() + .rev() + .skip_while(|b| !b) + { + encoded.double_in_place(); + if bit { + encoded.add_assign_mixed(&base) + } + } + encoded.add_assign_mixed(&randomized_pk); + + end_timer!(rand_pk_time); + + Ok(encoded.into()) + } + + fn randomize_signature( + _parameter: &Self::Parameters, + signature: &Self::Signature, + randomness: &[u8], + ) -> Result { + let rand_signature_time = start_timer!(|| "SchnorrSig::RandomizeSig"); + let Signature { + prover_response, + verifier_challenge, + } = signature; + let mut base = C::ScalarField::one(); + let mut multiplier = C::ScalarField::zero(); + for bit in bytes_to_bits(randomness) { + if bit { + multiplier += &base; + } + base.double_in_place(); + } + + let new_sig = Signature { + prover_response: *prover_response - (*verifier_challenge * multiplier), + verifier_challenge: *verifier_challenge, + }; + end_timer!(rand_signature_time); + Ok(new_sig) + } +} + +pub fn bytes_to_bits(bytes: &[u8]) -> Vec { + let mut bits = Vec::with_capacity(bytes.len() * 8); + for byte in bytes { + for i in 0..8 { + let bit = (*byte >> (8 - i - 1)) & 1; + bits.push(bit == 1); + } + } + bits +} + +impl, D: Digest> + ToConstraintField for Parameters +{ + #[inline] + fn to_field_elements(&self) -> Option> { + self.generator.into_projective().to_field_elements() + } +} diff --git a/arkworks/crypto-primitives/src/snark/constraints.rs b/arkworks/crypto-primitives/src/snark/constraints.rs new file mode 100644 index 00000000..05a76b2a --- /dev/null +++ b/arkworks/crypto-primitives/src/snark/constraints.rs @@ -0,0 +1,649 @@ +use ark_ff::{BigInteger, FpParameters, PrimeField}; +use ark_nonnative_field::params::{get_params, OptimizationType}; +use ark_nonnative_field::{AllocatedNonNativeFieldVar, NonNativeFieldVar}; +use ark_r1cs_std::prelude::*; +use ark_r1cs_std::{ + bits::boolean::Boolean, + fields::fp::{AllocatedFp, FpVar}, + R1CSVar, +}; +use ark_relations::r1cs::OptimizationGoal; +use ark_relations::{ + lc, ns, + r1cs::{ + ConstraintSynthesizer, ConstraintSystemRef, LinearCombination, Namespace, SynthesisError, + }, +}; +use ark_snark::{CircuitSpecificSetupSNARK, UniversalSetupSNARK, SNARK}; +use ark_std::{ + borrow::Borrow, + fmt, + marker::PhantomData, + vec::{IntoIter, Vec}, +}; + +/// This implements constraints for SNARK verifiers. +pub trait SNARKGadget> { + type ProcessedVerifyingKeyVar: AllocVar + Clone; + type VerifyingKeyVar: AllocVar + + ToBytesGadget + + Clone; + type InputVar: AllocVar, ConstraintF> + FromFieldElementsGadget + Clone; + type ProofVar: AllocVar + Clone; + + /// Information about the R1CS constraints required to check proofs relative + /// a given verification key. In the context of a LPCP-based pairing-based SNARK + /// like that of [[Groth16]](https://eprint.iacr.org/2016/260), + /// this is independent of the R1CS matrices, + /// whereas for more "complex" SNARKs like [[Marlin]](https://eprint.iacr.org/2019/1047), + /// this can encode information about the highest degree of polynomials + /// required to verify proofs. + type VerifierSize: PartialOrd + Clone + fmt::Debug; + + /// Returns information about the R1CS constraints required to check proofs relative + /// to the verification key `circuit_vk`. + fn verifier_size(circuit_vk: &S::VerifyingKey) -> Self::VerifierSize; + + /// Optionally allocates `S::Proof` in `cs` without performing + /// additional checks, such as subgroup membership checks. Use this *only* + /// if you know it is safe to do so. Such "safe" scenarios can include + /// the case where `proof` is a public input (`mode == AllocationMode::Input`), + /// and the corresponding checks are performed by the SNARK verifier outside + /// the circuit. Another example is the when `mode == AllocationMode::Constant`. + /// + /// The default implementation does not omit such checks, and just invokes + /// `Self::ProofVar::new_variable`. + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_proof_unchecked>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + Self::ProofVar::new_variable(cs, f, mode) + } + + /// Optionally allocates `S::VerifyingKey` in `cs` without performing + /// additional checks, such as subgroup membership checks. Use this *only* + /// if you know it is safe to do so. Such "safe" scenarios can include + /// the case where `vk` is a public input (`mode == AllocationMode::Input`), + /// and the corresponding checks are performed by the SNARK verifier outside + /// the circuit. Another example is the when `mode == AllocationMode::Constant`. + /// + /// The default implementation does not omit such checks, and just invokes + /// `Self::VerifyingKeyVar::new_variable`. + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_verification_key_unchecked>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + Self::VerifyingKeyVar::new_variable(cs, f, mode) + } + + fn verify_with_processed_vk( + circuit_pvk: &Self::ProcessedVerifyingKeyVar, + x: &Self::InputVar, + proof: &Self::ProofVar, + ) -> Result, SynthesisError>; + + fn verify( + circuit_vk: &Self::VerifyingKeyVar, + x: &Self::InputVar, + proof: &Self::ProofVar, + ) -> Result, SynthesisError>; +} + +pub trait CircuitSpecificSetupSNARKGadget< + F: PrimeField, + ConstraintF: PrimeField, + S: CircuitSpecificSetupSNARK, +>: SNARKGadget +{ +} + +pub trait UniversalSetupSNARKGadget< + F: PrimeField, + ConstraintF: PrimeField, + S: UniversalSetupSNARK, +>: SNARKGadget +{ + type BoundCircuit: From + ConstraintSynthesizer + Clone; +} + +/// Gadgets to convert elements between different fields for recursive proofs +pub trait FromFieldElementsGadget: Sized { + fn repack_input(src: &Vec) -> Vec; + fn from_field_elements(src: &Vec>) -> Result; +} + +/// Conversion of field elements by converting them to boolean sequences +/// Used by Groth16 and Gm17 +#[derive(Clone)] +pub struct BooleanInputVar { + val: Vec>>, + _snark_field_: PhantomData, +} + +impl BooleanInputVar { + pub fn new(val: Vec>>) -> Self { + Self { + val, + _snark_field_: PhantomData, + } + } +} + +impl IntoIterator for BooleanInputVar { + type Item = Vec>; + type IntoIter = IntoIter>>; + + fn into_iter(self) -> Self::IntoIter { + self.val.into_iter() + } +} + +impl AllocVar, CF> for BooleanInputVar { + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + if mode == AllocationMode::Input { + Self::new_input(cs, f) + } else { + let ns = cs.into(); + let cs = ns.cs(); + + let t = f()?; + let obj = t.borrow(); + + // convert the elements into booleans (little-endian) + let mut res = Vec::>>::new(); + for elem in obj.iter() { + let mut bits = elem.into_repr().to_bits_le(); + bits.truncate(F::size_in_bits()); + + let mut booleans = Vec::>::new(); + for bit in bits.iter() { + booleans.push(Boolean::new_variable(ns!(cs, "bit"), || Ok(*bit), mode)?); + } + + res.push(booleans); + } + + Ok(Self { + val: res, + _snark_field_: PhantomData, + }) + } + } + + fn new_input>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let obj = f()?; + + // Step 1: obtain the bits of the F field elements (little-endian) + let mut src_bits = Vec::::new(); + for elem in obj.borrow().iter() { + let mut bits = elem.into_repr().to_bits_le(); + bits.truncate(F::size_in_bits()); + for _ in bits.len()..F::size_in_bits() { + bits.push(false); + } + bits.reverse(); + + src_bits.append(&mut bits); + } + + // Step 2: repack the bits as CF field elements + // Deciding how many bits can be embedded, + // if CF has the same number of bits as F, but is larger, + // then it is okay to put the entire field element in. + let capacity = if CF::size_in_bits() == F::size_in_bits() { + let fq = <::Params as FpParameters>::MODULUS; + let fr = <::Params as FpParameters>::MODULUS; + + let fq_u64: &[u64] = fq.as_ref(); + let fr_u64: &[u64] = fr.as_ref(); + + let mut fq_not_smaller_than_fr = true; + for (left, right) in fq_u64.iter().zip(fr_u64.iter()).rev() { + if left < right { + fq_not_smaller_than_fr = false; + break; + } + + if left > right { + break; + } + } + + if fq_not_smaller_than_fr { + CF::size_in_bits() + } else { + CF::size_in_bits() - 1 + } + } else { + CF::size_in_bits() - 1 + }; + + // Step 3: allocate the CF field elements as input + let mut src_booleans = Vec::>::new(); + for chunk in src_bits.chunks(capacity) { + let elem = CF::from_repr(::BigInt::from_bits_be(chunk)).unwrap(); // big endian + + let elem_gadget = FpVar::::new_input(ns!(cs, "input"), || Ok(elem))?; + + let mut booleans = elem_gadget.to_bits_le()?; + booleans.truncate(chunk.len()); + booleans.reverse(); + + src_booleans.append(&mut booleans); + } + + // Step 4: unpack them back to bits + let res = src_booleans + .chunks(F::size_in_bits()) + .map(|f| { + let mut res = f.to_vec(); + res.reverse(); + res + }) + .collect::>>>(); + + Ok(Self { + val: res, + _snark_field_: PhantomData, + }) + } +} + +impl FromFieldElementsGadget for BooleanInputVar { + fn repack_input(src: &Vec) -> Vec { + // Step 1: obtain the bits of the F field elements + let mut src_bits = Vec::::new(); + for (_, elem) in src.iter().enumerate() { + let mut bits = elem.into_repr().to_bits_le(); + bits.truncate(F::size_in_bits()); + for _ in bits.len()..F::size_in_bits() { + bits.push(false); + } + bits.reverse(); + + src_bits.append(&mut bits); + } + + // Step 2: repack the bits as CF field elements + // Deciding how many bits can be embedded. + let capacity = if CF::size_in_bits() == F::size_in_bits() { + let fq = <::Params as FpParameters>::MODULUS; + let fr = <::Params as FpParameters>::MODULUS; + + let fq_u64: &[u64] = fq.as_ref(); + let fr_u64: &[u64] = fr.as_ref(); + + let mut fq_not_smaller_than_fr = true; + for (left, right) in fq_u64.iter().zip(fr_u64.iter()).rev() { + if left < right { + fq_not_smaller_than_fr = false; + break; + } + + if left > right { + break; + } + } + + if fq_not_smaller_than_fr { + CF::size_in_bits() + } else { + CF::size_in_bits() - 1 + } + } else { + CF::size_in_bits() - 1 + }; + + // Step 3: directly pack the bits + let mut dest = Vec::::new(); + for chunk in src_bits.chunks(capacity) { + let elem = CF::from_repr(::BigInt::from_bits_be(chunk)).unwrap(); // big endian + dest.push(elem); + } + + dest + } + + fn from_field_elements(src: &Vec>) -> Result { + // Step 1: obtain the booleans of the CF field variables + let mut src_booleans = Vec::>::new(); + for elem in src.iter() { + let mut bits = elem.to_bits_le()?; + bits.reverse(); + src_booleans.extend_from_slice(&bits); + } + + // Step 2: repack the bits as F field elements + // Deciding how many bits can be embedded. + let capacity = if CF::size_in_bits() == F::size_in_bits() { + let fq = <::Params as FpParameters>::MODULUS; + let fr = <::Params as FpParameters>::MODULUS; + + let fq_u64: &[u64] = fq.as_ref(); + let fr_u64: &[u64] = fr.as_ref(); + + let mut fr_not_smaller_than_fq = true; + for (left, right) in fr_u64.iter().zip(fq_u64.iter()).rev() { + if left < right { + fr_not_smaller_than_fq = false; + break; + } + + if left > right { + break; + } + } + + if fr_not_smaller_than_fq { + F::size_in_bits() + } else { + F::size_in_bits() - 1 + } + } else { + F::size_in_bits() - 1 + }; + + // Step 3: group them based on the used capacity of F + let res = src_booleans + .chunks(capacity) + .map(|x| { + let mut res = x.to_vec(); + res.reverse(); + res + }) + .collect::>>>(); + Ok(Self { + val: res, + _snark_field_: PhantomData, + }) + } +} + +/// Conversion of field elements by allocating them as nonnative field elements +/// Used by Marlin +pub struct NonNativeFieldInputVar +where + F: PrimeField, + CF: PrimeField, +{ + pub val: Vec>, +} + +impl NonNativeFieldInputVar +where + F: PrimeField, + CF: PrimeField, +{ + pub fn new(val: Vec>) -> Self { + Self { val } + } +} + +impl IntoIterator for NonNativeFieldInputVar +where + F: PrimeField, + CF: PrimeField, +{ + type Item = NonNativeFieldVar; + type IntoIter = IntoIter>; + + fn into_iter(self) -> Self::IntoIter { + self.val.into_iter() + } +} + +impl Clone for NonNativeFieldInputVar +where + F: PrimeField, + CF: PrimeField, +{ + fn clone(&self) -> Self { + Self { + val: self.val.clone(), + } + } +} + +impl AllocVar, CF> for NonNativeFieldInputVar +where + F: PrimeField, + CF: PrimeField, +{ + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + if mode == AllocationMode::Input { + Self::new_input(cs, f) + } else { + // directly allocate them as nonnative field elements + + let ns = cs.into(); + let cs = ns.cs(); + + let t = f()?; + let obj = t.borrow(); + let mut allocated = Vec::>::new(); + + for elem in obj.iter() { + let elem_allocated = NonNativeFieldVar::::new_variable( + ns!(cs, "allocating element"), + || Ok(elem), + mode, + )?; + allocated.push(elem_allocated); + } + + Ok(Self { val: allocated }) + } + } + + fn new_input>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + ) -> Result { + // allocate the nonnative field elements by squeezing the bits like in BooleanInputVar + + let ns = cs.into(); + let cs = ns.cs(); + + let optimization_type = match cs.optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + }; + + let params = get_params(F::size_in_bits(), CF::size_in_bits(), optimization_type); + + let obj = f()?; + + // Step 1: use BooleanInputVar to allocate the values as bits + // This is to make sure that we are using as few elements as possible + let boolean_allocation = + BooleanInputVar::new_input(ns!(cs, "boolean"), || Ok(obj.borrow()))?; + + // Step 2: allocating the nonnative field elements as witnesses + let mut field_allocation = Vec::>::new(); + + for elem in obj.borrow().iter() { + let mut elem_allocated = AllocatedNonNativeFieldVar::::new_witness( + ns!(cs, "allocating element"), + || Ok(elem), + )?; + + // due to the consistency check below + elem_allocated.is_in_the_normal_form = true; + elem_allocated.num_of_additions_over_normal_form = CF::zero(); + + field_allocation.push(elem_allocated); + } + + // Step 3: check consistency + for (field_bits, field_elem) in boolean_allocation.val.iter().zip(field_allocation.iter()) { + let mut field_bits = field_bits.clone(); + field_bits.reverse(); + + let bit_per_top_limb = + F::size_in_bits() - (params.num_limbs - 1) * params.bits_per_limb; + let bit_per_non_top_limb = params.bits_per_limb; + + // must use lc to save computation + for (j, limb) in field_elem.limbs.iter().enumerate() { + let bits_slice = if j == 0 { + field_bits[0..bit_per_top_limb].to_vec() + } else { + field_bits[bit_per_top_limb + (j - 1) * bit_per_non_top_limb + ..bit_per_top_limb + j * bit_per_non_top_limb] + .to_vec() + }; + + let mut bit_sum = FpVar::::zero(); + let mut cur = CF::one(); + + for bit in bits_slice.iter().rev() { + bit_sum += as From>>::from((*bit).clone()) * cur; + cur.double_in_place(); + } + + limb.enforce_equal(&bit_sum)?; + } + } + + let mut wrapped_field_allocation = Vec::>::new(); + for field_gadget in field_allocation.iter() { + wrapped_field_allocation.push(NonNativeFieldVar::Var(field_gadget.clone())); + } + Ok(Self { + val: wrapped_field_allocation, + }) + } +} + +impl FromFieldElementsGadget for NonNativeFieldInputVar +where + F: PrimeField, + CF: PrimeField, +{ + fn repack_input(src: &Vec) -> Vec { + BooleanInputVar::repack_input(src) + } + + fn from_field_elements(src: &Vec>) -> Result { + let cs = src.cs(); + + if cs == ConstraintSystemRef::None { + // Step 1: use BooleanInputVar to convert them into booleans + let boolean_allocation = BooleanInputVar::::from_field_elements(src)?; + + // Step 2: construct the nonnative field gadgets from bits + let mut field_allocation = Vec::>::new(); + + // reconstruct the field elements and check consistency + for field_bits in boolean_allocation.val.iter() { + let mut field_bits = field_bits.clone(); + field_bits.resize(F::size_in_bits(), Boolean::::Constant(false)); + + let mut cur = F::one(); + + let mut value = F::zero(); + for bit in field_bits.iter().rev() { + if bit.value().unwrap_or_default() { + value += &cur; + } + cur.double_in_place(); + } + + field_allocation.push(NonNativeFieldVar::Constant(value)); + } + + Ok(Self { + val: field_allocation, + }) + } else { + let optimization_type = match cs.optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + }; + + let params = get_params(F::size_in_bits(), CF::size_in_bits(), optimization_type); + + // Step 1: use BooleanInputVar to convert them into booleans + let boolean_allocation = BooleanInputVar::::from_field_elements(src)?; + + // Step 2: construct the nonnative field gadgets from bits + let mut field_allocation = Vec::>::new(); + + // reconstruct the field elements and check consistency + for field_bits in boolean_allocation.val.iter() { + let mut field_bits = field_bits.clone(); + field_bits.resize(F::size_in_bits(), Boolean::::Constant(false)); + field_bits.reverse(); + + let mut limbs = Vec::>::new(); + + let bit_per_top_limb = + F::size_in_bits() - (params.num_limbs - 1) * params.bits_per_limb; + let bit_per_non_top_limb = params.bits_per_limb; + + // must use lc to save computation + for j in 0..params.num_limbs { + let bits_slice = if j == 0 { + field_bits[0..bit_per_top_limb].to_vec() + } else { + field_bits[bit_per_top_limb + (j - 1) * bit_per_non_top_limb + ..bit_per_top_limb + j * bit_per_non_top_limb] + .to_vec() + }; + + let mut lc = LinearCombination::::zero(); + let mut cur = CF::one(); + + let mut limb_value = CF::zero(); + for bit in bits_slice.iter().rev() { + lc = &lc + bit.lc() * cur; + if bit.value().unwrap_or_default() { + limb_value += &cur; + } + cur.double_in_place(); + } + + let limb = AllocatedFp::::new_witness(ns!(cs, "limb"), || Ok(limb_value))?; + lc = lc - limb.variable; + cs.enforce_constraint(lc!(), lc!(), lc).unwrap(); + + limbs.push(FpVar::from(limb)); + } + + field_allocation.push(NonNativeFieldVar::::Var( + AllocatedNonNativeFieldVar:: { + cs: cs.clone(), + limbs, + num_of_additions_over_normal_form: CF::zero(), + is_in_the_normal_form: true, + target_phantom: PhantomData, + }, + )) + } + + Ok(Self { + val: field_allocation, + }) + } + } +} diff --git a/arkworks/crypto-primitives/src/snark/mod.rs b/arkworks/crypto-primitives/src/snark/mod.rs new file mode 100644 index 00000000..967580a4 --- /dev/null +++ b/arkworks/crypto-primitives/src/snark/mod.rs @@ -0,0 +1,6 @@ +#[cfg(feature = "r1cs")] +pub mod constraints; +#[cfg(feature = "r1cs")] +pub use constraints::*; + +pub use ark_snark::*; diff --git a/arkworks/curves/.github/ISSUE_TEMPLATE/bug_report.md b/arkworks/curves/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e01ca941 --- /dev/null +++ b/arkworks/curves/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! + +--- + +∂ + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + + + diff --git a/arkworks/curves/.github/ISSUE_TEMPLATE/feature_request.md b/arkworks/curves/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..7d5ed5df --- /dev/null +++ b/arkworks/curves/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Feature Request +about: Create a proposal to request a feature + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/arkworks/curves/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/curves/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/curves/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/curves/.github/workflows/ci.yml b/arkworks/curves/.github/workflows/ci.yml new file mode 100644 index 00000000..fe06a7d2 --- /dev/null +++ b/arkworks/curves/.github/workflows/ci.yml @@ -0,0 +1,168 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + check: + name: Check + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --workspace --benches + if: matrix.rust == 'nightly' + + + + directories: # Job that list subdirectories + name: List directories for parallelizing tests + runs-on: ubuntu-latest + outputs: + dir: ${{ steps.set-dirs.outputs.dir }} # generate output name dir by using inner step output + steps: + - uses: actions/checkout@v2 + - id: set-dirs # Give it an id to handle to get step outputs in the outputs key above + run: echo "::set-output name=dir::$(ls -d */ | jq -R -s -c 'split("\n")[:-1]')" + # Define step output named dir base on ls command transformed to JSON thanks to jq + test: + name: Test + runs-on: ubuntu-latest + needs: [directories] # Depends on previous job + strategy: + matrix: + dir: ${{fromJson(needs.directories.outputs.dir)}} # List matrix strategy from directories dynamically + # rust: + # - stable + # - nightly + exclude: + - dir: scripts/ + - dir: curve-constraint-tests/ + - dir: curve-benches/ + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Run tests + run: | + cd ${{matrix.dir}} + cargo test --all-features + + docs: + name: Check Documentation + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo doc --all --no-deps --document-private-items --all-features + uses: actions-rs/cargo@v1 + with: + command: doc + args: --all --no-deps --document-private-items --all-features + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: check + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --workspace --exclude ark-curve-constraint-tests --exclude ark-curve-benches --target aarch64-unknown-none + + - name: build + uses: actions-rs/cargo@v1 + with: + command: build + args: --workspace --exclude ark-curve-constraint-tests --exclude ark-curve-benches --target aarch64-unknown-none diff --git a/arkworks/curves/.github/workflows/linkify_changelog.yml b/arkworks/curves/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..0cbe85f1 --- /dev/null +++ b/arkworks/curves/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push \ No newline at end of file diff --git a/arkworks/curves/.gitignore b/arkworks/curves/.gitignore new file mode 100644 index 00000000..9b5e101e --- /dev/null +++ b/arkworks/curves/.gitignore @@ -0,0 +1,11 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo diff --git a/arkworks/curves/CHANGELOG.md b/arkworks/curves/CHANGELOG.md new file mode 100644 index 00000000..0acc9f16 --- /dev/null +++ b/arkworks/curves/CHANGELOG.md @@ -0,0 +1,63 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +- [\#60](https://github.com/arkworks-rs/curves/pull/60) Change the scalar group generator of `Fr` of `bls12_377` Fr from `11` to `22`. +- [\#61](https://github.com/arkworks-rs/curves/pull/61) Remove `ATE_LOOP_COUNT_IS_NEGATIVE` from BN254 curve parameter. + +### Features + +### Improvements + +### Bug fixes + +## v0.2.0 + +### Breaking changes + +- Requires all crates from `arkworks-rs/algebra` to have version `v0.2.0` or greater. + +### Features + +- [\#3](https://github.com/arkworks-rs/curves/pull/3) Add constraints for + `ark-bls12-377`, + `ark-ed-on-bls12-377`, + `ark-ed-on-bls12-381`, + `ark-ed-on-bn254`, + `ark-ed-on-cp6-782`, + `ark-ed-on-bw6-761`, + `ark-ed-on-mnt4-298`, + `ark-ed-on-mnt4-753`, + `ark-mnt4-298`, + `ark-mnt6-298`, + `ark-mnt4-753`, + `ark-mnt6-753`. +- [\#7](https://github.com/arkworks-rs/curves/pull/7) Add benchmarks for Edwards curves. +- [\#19](https://github.com/arkworks-rs/curves/pull/19) Change field constants to be provided as normal strings, instead of in Montgomery form. +- [\#53](https://github.com/arkworks-rs/curves/pull/53) Add benchmarks for Pallas and Vesta curves. + +### Improvements + +- [\#42](https://github.com/arkworks-rs/curves/pull/42) Remove the dependency of `rand_xorshift`. + +### Bug fixes + +- [\#28](https://github.com/arkworks-rs/curves/pull/28), [\#49](https://github.com/arkworks-rs/curves/pull/49) Fix broken documentation links. +- [\#38](https://github.com/arkworks-rs/curves/pull/38) Compile with `panic='abort'` in release mode, for safety of the library across FFI boundaries. +- [\#45](https://github.com/arkworks-rs/curves/pull/45) Fix `ark-ed-on-mnt4-753`. + +## v0.1.0 + +Initial Release diff --git a/arkworks/curves/CONTRIBUTING.md b/arkworks/curves/CONTRIBUTING.md new file mode 100644 index 00000000..434bbbca --- /dev/null +++ b/arkworks/curves/CONTRIBUTING.md @@ -0,0 +1,65 @@ +# Contributing + +Thank you for considering making contributions to `curves`! + +Contributing to this repo can be done in several forms, such as participating in discussion or proposing code changes. +To ensure a smooth workflow for all contributors, the following general procedure for contributing has been established: + +1) Either open or find an issue you'd like to help with +2) Participate in thoughtful discussion on that issue +3) If you would like to contribute: + * If the issue is a feature proposal, ensure that the proposal has been accepted + * Ensure that nobody else has already begun working on this issue. + If they have, please try to contact them to collaborate + * If nobody has been assigned for the issue and you would like to work on it, make a comment on the issue to inform the community of your intentions to begin work. (So we can avoid duplication of efforts) + * We suggest using standard Github best practices for contributing: fork the repo, branch from the HEAD of `master`, make some commits on your branch, and submit a PR from the branch to `master`. + More detail on this is below + * Be sure to include a relevant change log entry in the Pending section of CHANGELOG.md (see file for log format) + * If the change is breaking, we may add migration instructions. + +Note that for very small or clear problems (such as typos), or well isolated improvements, it is not required to an open issue to submit a PR. +But be aware that for more complex problems/features touching multiple parts of the codebase, if a PR is opened before an adequate design discussion has taken place in a github issue, that PR runs a larger likelihood of being rejected. + +Looking for a good place to start contributing? How about checking out some good first issues + +## Branch Structure + +`curves` has its default branch as `master`, which is where PRs are merged into. Releases will be periodically made, on no set schedule. +All other branches should be assumed to be miscellaneous feature development branches. + +All downstream users of the library should be using tagged versions of the library pulled from cargo. + +## How to work on a fork +Please skip this section if you're familiar with contributing to opensource github projects. + +First fork the repo from the github UI, and clone it locally. +Then in the repo, you want to add the repo you forked from as a new remote. You do this as: +```bash +git remote add upstream git@github.com:arkworks-rs/curves.git +``` + +Then the way you make code contributions is to first think of a branch name that describes your change. +Then do the following: +```bash +git checkout master +git pull upstream master +git checkout -b $NEW_BRANCH_NAME +``` +and then work as normal on that branch, and pull request to upstream master when you're done =) + +## Updating documentation + +All PRs should aim to leave the code more documented than it started with. +Please don't assume that its easy to infer what the code is doing, +as that is usually not the case for these complex protocols. +(Even when you already understand the paper!) + +Its often very useful to describe what is the high level view of what a code block is doing, +and either refer to the relevant section of a paper or include a short proof/argument for why it makes sense before the actual logic. + +## Performance improvements + +All performance improvements should be accompanied with benchmarks improving, or otherwise have it be clear that things have improved. +For some areas of the codebase, performance roughly follows the number of field multiplications, but there are also many areas where +hard to predict low level system effects such as cache locality and superscalar operations become important for performance. +Thus performance can often become very non-intuitive / diverge from minimizing the number of arithmetic operations. \ No newline at end of file diff --git a/arkworks/curves/Cargo.toml b/arkworks/curves/Cargo.toml new file mode 100644 index 00000000..e3c83d63 --- /dev/null +++ b/arkworks/curves/Cargo.toml @@ -0,0 +1,57 @@ +[workspace] + +members = [ + "curve-benches", + "curve-constraint-tests", + + "bls12_377", + "ed_on_bls12_377", + + "bw6_761", + "ed_on_bw6_761", + + "cp6_782", + "ed_on_cp6_782", + + "bls12_381", + "ed_on_bls12_381", + + "bn254", + "ed_on_bn254", + + "mnt4_298", + "mnt6_298", + "ed_on_mnt4_298", + + "mnt4_753", + "mnt6_753", + "ed_on_mnt4_753", + + "pallas", + "vesta", +] + +[profile.release] +opt-level = 3 +lto = "thin" +incremental = true +panic = 'abort' + +[profile.bench] +opt-level = 3 +debug = false +rpath = false +lto = "thin" +incremental = true +debug-assertions = false + +[profile.dev] +opt-level = 0 +panic = 'abort' + +[profile.test] +opt-level = 3 +lto = "thin" +incremental = true +debug-assertions = true +debug = true diff --git a/arkworks/curves/LICENSE-APACHE b/arkworks/curves/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/curves/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/curves/LICENSE-MIT b/arkworks/curves/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/curves/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/curves/README.md b/arkworks/curves/README.md new file mode 100644 index 00000000..4f760ba9 --- /dev/null +++ b/arkworks/curves/README.md @@ -0,0 +1,35 @@ +# Curve implementations + +This repository contains implementations of some popular elliptic curves. The curve API implemented here matches the curve traits defined [here](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs) in the [arkworks-rs/algebra](https://github.com/arkworks-rs/algebra) repository. + +### BLS12-381 and embedded curves +* [`ark-bls12-381`](bls12_381): Implements the BLS12-381 pairing-friendly curve +* [`ark-ed-on-bls12-381`](ed_on_bls12_381): Implements a Twisted Edwards curve atop the scalar field of BLS12-381 + +### BLS12-377 and related curves +* [`ark-bls12-377`](bls12_377): Implements the BLS12-377 pairing-friendly curve +* [`ark-ed-on-bls12-377`](ed_on_bls12_377): Implements a Twisted Edwards curve atop the scalar field of BLS12-377 + +* [`ark-bw6-761`](bw6_761): Implements the BW6-761 pairing-friendly curve, which is a curve whose scalar field equals the base field of BLS12-377 +* [`ark-ed-on-bw6-761`](ed_on_bw6_761): Implements a Twisted Edwards curve atop the scalar field of BW6-761 + +* [`ark-cp6-782`](cp6_782): Implements the CP6-782 pairing-friendly curve, which is a curve whose scalar field equals the base field of BLS12-377 +* [`ark-ed-on-cp6-782`](ed_on_cp6_782): Implements a Twisted Edwards curve atop the scalar field of CP6-782. This is the same curve as in `ark-ed-on-bw6-761` + +### BN254 and related curves +* [`ark-bn254`](bn254): Implements the BN254 pairing-friendly curve +* [`ark-ed-on-bn254`](ed_on_bn254): Implements a Twisted Edwards curve atop the scalar field of BN254 + +### MNT-298 cycle of curves and related curves +* [`ark-mnt4-298`](mnt4_298): Implements the MNT4-298 pairing-friendly curve. This curve forms a pairing-friendly cycle with MNT6-298 +* [`ark-mnt6-298`](mnt6_298): Implements the MNT6-298 pairing-friendly curve. This curve forms a pairing-friendly cycle with MNT4-298 +* [`ark-ed-on-mnt4-298`](ed_on_mnt4_298): Implements a Twisted Edwards curve atop the scalar field of MNT4-298 + +### MNT-753 cycle of curves and related curves +* [`ark-mnt4-753`](mnt4_753): Implements the MNT4-753 pairing-friendly curve. This curve forms a pairing-friendly cycle with MNT6-753 +* [`ark-mnt6-753`](mnt6_753): Implements the MNT6-753 pairing-friendly curve. This curve forms a pairing-friendly cycle with MNT4-753 +* [`ark-ed-on-mnt4-753`](ed_on_mnt4_753): Implements a Twisted Edwards curve atop the scalar field of MNT4-753 + +### [Pasta](https://electriccoin.co/blog/the-pasta-curves-for-halo-2-and-beyond/) cycle of curves +* [`ark-pallas`](pallas): Implements Pallas, a prime-order curve that forms an amicable pair with Vesta +* [`ark-vesta`](vesta): Implements Vesta, a prime-order curve that forms an amicable pair with Pallas diff --git a/arkworks/curves/bls12_377/Cargo.toml b/arkworks/curves/bls12_377/Cargo.toml new file mode 100644 index 00000000..fc9e6a66 --- /dev/null +++ b/arkworks/curves/bls12_377/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "ark-bls12-377" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The BLS12-377 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-bls12-377/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { path = "../../algebra/ff", version="^0.3.0", default-features = false } +ark-ec = { path = "../../algebra/ec", version="^0.3.0", default-features = false } +ark-r1cs-std = { path = "../../r1cs-std", version="^0.3.0", default-features = false, optional = true } +ark-std = { path = "../../std", version="^0.3.0", default-features = false } + +[dev-dependencies] +ark-relations = { path = "../../snark/relations", version="^0.3.0", default-features = false } +ark-serialize = { path = "../../algebra/serialize", version="^0.3.0", default-features = false } +ark-algebra-test-templates = { version="^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [ "curve" ] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] + +curve = [ "scalar_field", "base_field" ] +scalar_field = [] +base_field = [] +r1cs = [ "base_field", "ark-r1cs-std" ] \ No newline at end of file diff --git a/arkworks/curves/bls12_377/LICENSE-APACHE b/arkworks/curves/bls12_377/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/curves/bls12_377/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/curves/bls12_377/LICENSE-MIT b/arkworks/curves/bls12_377/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/curves/bls12_377/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/curves/bls12_377/src/constraints/curves.rs b/arkworks/curves/bls12_377/src/constraints/curves.rs new file mode 100644 index 00000000..5441eba0 --- /dev/null +++ b/arkworks/curves/bls12_377/src/constraints/curves.rs @@ -0,0 +1,29 @@ +use crate::Parameters; +use ark_r1cs_std::groups::bls12; + +/// An element of G1 in the BLS12-377 bilinear group. +pub type G1Var = bls12::G1Var; +/// An element of G2 in the BLS12-377 bilinear group. +pub type G2Var = bls12::G2Var; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +pub type G1PreparedVar = bls12::G1PreparedVar; +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +pub type G2PreparedVar = bls12::G2PreparedVar; + +#[test] +fn test() { + use ark_ec::models::bls12::Bls12Parameters; + ark_curve_constraint_tests::curves::sw_test::< + ::G1Parameters, + G1Var, + >() + .unwrap(); + ark_curve_constraint_tests::curves::sw_test::< + ::G2Parameters, + G2Var, + >() + .unwrap(); +} diff --git a/arkworks/curves/bls12_377/src/constraints/fields.rs b/arkworks/curves/bls12_377/src/constraints/fields.rs new file mode 100644 index 00000000..7a626e61 --- /dev/null +++ b/arkworks/curves/bls12_377/src/constraints/fields.rs @@ -0,0 +1,32 @@ +use crate::{Fq, Fq12Parameters, Fq2Parameters, Fq6Parameters}; + +use ark_r1cs_std::fields::{fp::FpVar, fp12::Fp12Var, fp2::Fp2Var, fp6_3over2::Fp6Var}; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq2`. +pub type Fq2Var = Fp2Var; +/// A variable that is the R1CS equivalent of `crate::Fq6`. +pub type Fq6Var = Fp6Var; +/// A variable that is the R1CS equivalent of `crate::Fq12`. +pub type Fq12Var = Fp12Var; + +#[test] +fn bls12_377_field_test() { + use super::*; + use crate::{Fq, Fq12, Fq2, Fq6}; + use ark_curve_constraint_tests::fields::*; + + field_test::<_, _, FqVar>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq2Var>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq6Var>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq12Var>().unwrap(); + frobenius_tests::(13).unwrap(); +} diff --git a/arkworks/curves/bls12_377/src/constraints/mod.rs b/arkworks/curves/bls12_377/src/constraints/mod.rs new file mode 100644 index 00000000..a67838f5 --- /dev/null +++ b/arkworks/curves/bls12_377/src/constraints/mod.rs @@ -0,0 +1,163 @@ +//! This module implements the R1CS equivalent of `crate`. +//! +//! It implements field variables for `crate::{Fq, Fq2, Fq6, Fq12}`, +//! group variables for `crate::{G1, G2}`, and implements constraint +//! generation for computing `Bls12_377::pairing`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_bls12_377::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `G1Var` and `G2Var`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_bls12_377::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `G1` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G1Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G1Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G1Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `G1`. +//! let zero = G1Var::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! Finally, one can check pairing computations as well: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_ec::PairingEngine; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_bls12_377::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate random `G1` and `G2` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G2Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G2Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G2Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let pairing_result_native = Bls12_377::pairing(a_native, b_native); +//! +//! // Prepare `a` and `b` for pairing. +//! let a_prep = constraints::PairingVar::prepare_g1(&a)?; +//! let b_prep = constraints::PairingVar::prepare_g2(&b)?; +//! let pairing_result = constraints::PairingVar::pairing(a_prep, b_prep)?; +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!(pairing_result.value()?, pairing_result_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! let a_prep_const = constraints::PairingVar::prepare_g1(&a_const)?; +//! let b_prep_const = constraints::PairingVar::prepare_g2(&b_const)?; +//! let pairing_result_const = constraints::PairingVar::pairing(a_prep_const, b_prep_const)?; +//! println!("Done here 3"); +//! +//! pairing_result.enforce_equal(&pairing_result_const)?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod fields; +pub use fields::*; + +#[cfg(feature = "curve")] +mod curves; +#[cfg(feature = "curve")] +mod pairing; + +#[cfg(feature = "curve")] +pub use curves::*; +#[cfg(feature = "curve")] +pub use pairing::*; diff --git a/arkworks/curves/bls12_377/src/constraints/pairing.rs b/arkworks/curves/bls12_377/src/constraints/pairing.rs new file mode 100644 index 00000000..f659f294 --- /dev/null +++ b/arkworks/curves/bls12_377/src/constraints/pairing.rs @@ -0,0 +1,10 @@ +use crate::Parameters; + +/// Specifies the constraints for computing a pairing in the BLS12-377 bilinear group. +pub type PairingVar = ark_r1cs_std::pairing::bls12::PairingVar; + +#[test] +fn test() { + use crate::Bls12_377; + ark_curve_constraint_tests::pairing::bilinearity_test::().unwrap() +} diff --git a/arkworks/curves/bls12_377/src/curves/g1.rs b/arkworks/curves/bls12_377/src/curves/g1.rs new file mode 100644 index 00000000..74f140de --- /dev/null +++ b/arkworks/curves/bls12_377/src/curves/g1.rs @@ -0,0 +1,51 @@ +use ark_ec::models::{ModelParameters, SWModelParameters}; +use ark_ff::{field_new, Zero}; + +use crate::{ + fields::{FQ_ONE, FQ_ZERO}, + Fq, Fr, +}; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 0 + const COEFF_A: Fq = FQ_ZERO; + + /// COEFF_B = 1 + #[rustfmt::skip] + const COEFF_B: Fq = FQ_ONE; + + /// COFACTOR = (x - 1)^2 / 3 = 30631250834960419227450344600217059328 + const COFACTOR: &'static [u64] = &[0x0, 0x170b5d4430000000]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r + /// = 5285428838741532253824584287042945485047145357130994810877 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "5285428838741532253824584287042945485047145357130994810877"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +/// G1_GENERATOR_X = +/// 81937999373150964239938255573465948239988671502647976594219695644855304257327692006745978603320413799295628339695 +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "81937999373150964239938255573465948239988671502647976594219695644855304257327692006745978603320413799295628339695"); + +/// G1_GENERATOR_Y = +/// 241266749859715473739788878240585681733927191168601896383759122102112907357779751001206799952863815012735208165030 +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "241266749859715473739788878240585681733927191168601896383759122102112907357779751001206799952863815012735208165030"); diff --git a/arkworks/curves/bls12_377/src/curves/g2.rs b/arkworks/curves/bls12_377/src/curves/g2.rs new file mode 100644 index 00000000..77cda3a6 --- /dev/null +++ b/arkworks/curves/bls12_377/src/curves/g2.rs @@ -0,0 +1,86 @@ +use ark_ec::models::{ModelParameters, SWModelParameters}; +use ark_ff::{field_new, Zero}; + +use crate::{fields::FQ_ZERO, g1, Fq, Fq2, Fr}; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq2; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = [0, 0] + #[rustfmt::skip] + const COEFF_A: Fq2 = field_new!(Fq2, + g1::Parameters::COEFF_A, + g1::Parameters::COEFF_A, + ); + + // As per https://eprint.iacr.org/2012/072.pdf, + // this curve has b' = b/i, where b is the COEFF_B of G1, and x^6 -i is + // the irreducible poly used to extend from Fp2 to Fp12. + // In our case, i = u (App A.3, T_6). + /// COEFF_B = [0, + /// 155198655607781456406391640216936120121836107652948796323930557600032281009004493664981332883744016074664192874906] + #[rustfmt::skip] + const COEFF_B: Fq2 = field_new!(Fq2, + FQ_ZERO, + field_new!(Fq, "155198655607781456406391640216936120121836107652948796323930557600032281009004493664981332883744016074664192874906"), + ); + + /// COFACTOR = + /// 7923214915284317143930293550643874566881017850177945424769256759165301436616933228209277966774092486467289478618404761412630691835764674559376407658497 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 0x0000000000000001, + 0x452217cc90000000, + 0xa0f3622fba094800, + 0xd693e8c36676bd09, + 0x8c505634fae2e189, + 0xfbb36b00e1dcc40c, + 0xddd88d99a6f6a829, + 0x26ba558ae9562a, + ]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r + /// = 6764900296503390671038341982857278410319949526107311149686707033187604810669 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "6764900296503390671038341982857278410319949526107311149686707033187604810669"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +#[rustfmt::skip] +pub const G2_GENERATOR_X: Fq2 = field_new!(Fq2, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1); +#[rustfmt::skip] +pub const G2_GENERATOR_Y: Fq2 = field_new!(Fq2, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1); + +/// G2_GENERATOR_X_C0 = +/// 233578398248691099356572568220835526895379068987715365179118596935057653620464273615301663571204657964920925606294 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C0: Fq = field_new!(Fq, "233578398248691099356572568220835526895379068987715365179118596935057653620464273615301663571204657964920925606294"); + +/// G2_GENERATOR_X_C1 = +/// 140913150380207355837477652521042157274541796891053068589147167627541651775299824604154852141315666357241556069118 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C1: Fq = field_new!(Fq, "140913150380207355837477652521042157274541796891053068589147167627541651775299824604154852141315666357241556069118"); + +/// G2_GENERATOR_Y_C0 = +/// 63160294768292073209381361943935198908131692476676907196754037919244929611450776219210369229519898517858833747423 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C0: Fq = field_new!(Fq, "63160294768292073209381361943935198908131692476676907196754037919244929611450776219210369229519898517858833747423"); + +/// G2_GENERATOR_Y_C1 = +/// 149157405641012693445398062341192467754805999074082136895788947234480009303640899064710353187729182149407503257491 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C1: Fq = field_new!(Fq, "149157405641012693445398062341192467754805999074082136895788947234480009303640899064710353187729182149407503257491"); diff --git a/arkworks/curves/bls12_377/src/curves/mod.rs b/arkworks/curves/bls12_377/src/curves/mod.rs new file mode 100644 index 00000000..33a87fda --- /dev/null +++ b/arkworks/curves/bls12_377/src/curves/mod.rs @@ -0,0 +1,33 @@ +use crate::*; +use ark_ec::{ + bls12, + bls12::{Bls12, Bls12Parameters, TwistType}, +}; + +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +pub struct Parameters; + +impl Bls12Parameters for Parameters { + const X: &'static [u64] = &[0x8508c00000000001]; + /// `x` is positive. + const X_IS_NEGATIVE: bool = false; + const TWIST_TYPE: TwistType = TwistType::D; + type Fp = Fq; + type Fp2Params = Fq2Parameters; + type Fp6Params = Fq6Parameters; + type Fp12Params = Fq12Parameters; + type G1Parameters = g1::Parameters; + type G2Parameters = g2::Parameters; +} + +pub type Bls12_377 = Bls12; + +pub type G1Affine = bls12::G1Affine; +pub type G1Projective = bls12::G1Projective; +pub type G2Affine = bls12::G2Affine; +pub type G2Projective = bls12::G2Projective; diff --git a/arkworks/curves/bls12_377/src/curves/tests.rs b/arkworks/curves/bls12_377/src/curves/tests.rs new file mode 100644 index 00000000..6010f1c3 --- /dev/null +++ b/arkworks/curves/bls12_377/src/curves/tests.rs @@ -0,0 +1,122 @@ +#![allow(unused_imports)] +use ark_ff::{ + fields::{Field, FpParameters, PrimeField, SquareRootField}, + One, Zero, +}; +use ark_serialize::CanonicalSerialize; +use ark_std::test_rng; + +use ark_ec::{models::SWModelParameters, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_std::rand::Rng; +use core::ops::{AddAssign, MulAssign}; + +use crate::{g1, g2, Bls12_377, Fq, Fq12, Fq2, Fr, G1Affine, G1Projective, G2Affine, G2Projective}; + +use ark_algebra_test_templates::{ + curves::{curve_tests, sw_tests}, + groups::group_test, +}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let mut sa = a; + sa.mul_assign(s); + let mut sb = b; + sb.mul_assign(s); + + let ans1 = Bls12_377::pairing(sa, b); + let ans2 = Bls12_377::pairing(a, sb); + let ans3 = Bls12_377::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq12::one()); + assert_ne!(ans2, Fq12::one()); + assert_ne!(ans3, Fq12::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq12::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq12::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq12::one()); +} + +#[test] +fn test_g1_generator_raw() { + let mut x = Fq::zero(); + let mut i = 0; + loop { + // y^2 = x^3 + b + let mut rhs = x; + rhs.square_in_place(); + rhs.mul_assign(&x); + rhs.add_assign(&g1::Parameters::COEFF_B); + + if let Some(y) = rhs.sqrt() { + let p = G1Affine::new(x, if y < -y { y } else { -y }, false); + assert!(!p.is_in_correct_subgroup_assuming_on_curve()); + + let g1 = p.scale_by_cofactor(); + if !g1.is_zero() { + assert_eq!(i, 1); + let g1 = G1Affine::from(g1); + + assert!(g1.is_in_correct_subgroup_assuming_on_curve()); + + assert_eq!(g1, G1Affine::prime_subgroup_generator()); + break; + } + } + + i += 1; + x.add_assign(&Fq::one()); + } +} diff --git a/arkworks/curves/bls12_377/src/fields/fq.rs b/arkworks/curves/bls12_377/src/fields/fq.rs new file mode 100644 index 00000000..ec0a79a3 --- /dev/null +++ b/arkworks/curves/bls12_377/src/fields/fq.rs @@ -0,0 +1,118 @@ +use ark_ff::{biginteger::BigInteger384 as BigInteger, fields::*}; + +pub type Fq = Fp384; + +pub struct FqParameters; + +impl Fp384Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 46u32; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 2022196864061697551u64, + 17419102863309525423u64, + 8564289679875062096u64, + 17152078065055548215u64, + 17966377291017729567u64, + 68610905582439508u64, + ]); +} +impl FpParameters for FqParameters { + /// MODULUS = 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0x8508c00000000001, + 0x170b5d4430000000, + 0x1ef3622fba094800, + 0x1a22d9f300f5138f, + 0xc63b05c06ca1493b, + 0x1ae3a4617c510ea, + ]); + + const MODULUS_BITS: u32 = 377; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 7; + + /// R = 85013442423176922659824578519796707547925331718418265885885478904210582549405549618995257669764901891699128663912 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 202099033278250856u64, + 5854854902718660529u64, + 11492539364873682930u64, + 8885205928937022213u64, + 5545221690922665192u64, + 39800542322357402u64, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xb786686c9400cd22, + 0x329fcaab00431b1, + 0x22a5f11162d6b46d, + 0xbfdf7d03827dc3ac, + 0x837e92f041790bf9, + 0x6dfccb1e914b88, + ]); + + const INV: u64 = 9586122913090633727u64; + + /// GENERATOR = -5 + /// Encoded in Montgomery form, so the value here is + /// (-5 * R) % q = 92261639910053574722182574790803529333160366917737991650341130812388023949653897454961487930322210790384999596794 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0xfc0b8000000002fa, + 0x97d39cf6e000018b, + 0x2072420fbfa05044, + 0xcbbcbd50d97c3802, + 0xbaf1ec35813f9eb, + 0x9974a2c0945ad2, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x4284600000000000, + 0xb85aea218000000, + 0x8f79b117dd04a400, + 0x8d116cf9807a89c7, + 0x631d82e03650a49d, + 0xd71d230be28875, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + // For T coprime to 2 + + // T = (MODULUS - 1) // 2^S = + // 3675842578061421676390135839012792950148785745837396071634149488243117337281387659330802195819009059 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0x7510c00000021423, + 0x88bee82520005c2d, + 0x67cc03d44e3c7bcd, + 0x1701b28524ec688b, + 0xe9185f1443ab18ec, + 0x6b8, + ]); + + // (T - 1) // 2 = + // 1837921289030710838195067919506396475074392872918698035817074744121558668640693829665401097909504529 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xba88600000010a11, + 0xc45f741290002e16, + 0xb3e601ea271e3de6, + 0xb80d94292763445, + 0x748c2f8a21d58c76, + 0x35c, + ]); +} + +#[allow(dead_code)] +pub const FQ_ONE: Fq = Fq::new(FqParameters::R); +#[allow(dead_code)] +pub const FQ_ZERO: Fq = Fq::new(BigInteger([0, 0, 0, 0, 0, 0])); diff --git a/arkworks/curves/bls12_377/src/fields/fq12.rs b/arkworks/curves/bls12_377/src/fields/fq12.rs new file mode 100644 index 00000000..20ea206e --- /dev/null +++ b/arkworks/curves/bls12_377/src/fields/fq12.rs @@ -0,0 +1,74 @@ +use super::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq12 = Fp12; + +#[derive(Clone, Copy)] +pub struct Fq12Parameters; + +impl Fp12Parameters for Fq12Parameters { + type Fp6Params = Fq6Parameters; + + const NONRESIDUE: Fq6 = field_new!(Fq6, FQ2_ZERO, FQ2_ONE, FQ2_ZERO); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP12_C1: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^(((q^0) - 1) / 6) + field_new!(Fq2, FQ_ONE, FQ_ZERO), + // Fp2::NONRESIDUE^(((q^1) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "92949345220277864758624960506473182677953048909283248980960104381795901929519566951595905490535835115111760994353"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^2) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410946"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^3) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "216465761340224619389371505802605247630151569547285782856803747159100223055385581585702401816380679166954762214499"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^4) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410945"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^5) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "123516416119946754630746545296132064952198520638002533875843642777304321125866014634106496325844844051843001220146"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^6) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "-1"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^7) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "165715080792691229252027773188420350858440463845631411558924158284924566418821255823372982649037525009328560463824"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^8) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047231"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^9) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "42198664672744474621281227892288285906241943207628877683080515507620245292955241189266486323192680957485559243678"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^10) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047232"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^11) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "135148009893022339379906188398761468584194992116912126664040619889416147222474808140862391813728516072597320238031"), + FQ_ZERO, + ), + ]; +} diff --git a/arkworks/curves/bls12_377/src/fields/fq2.rs b/arkworks/curves/bls12_377/src/fields/fq2.rs new file mode 100644 index 00000000..75f09ed2 --- /dev/null +++ b/arkworks/curves/bls12_377/src/fields/fq2.rs @@ -0,0 +1,38 @@ +use super::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq2 = Fp2; + +pub struct Fq2Parameters; + +impl Fp2Parameters for Fq2Parameters { + type Fp = Fq; + + /// NONRESIDUE = -5 + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "-5"); + + /// QUADRATIC_NONRESIDUE = U + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE: (Fq, Fq) = (FQ_ZERO, FQ_ONE); + + /// Coefficients for the Frobenius automorphism. + #[rustfmt::skip] + const FROBENIUS_COEFF_FP2_C1: &'static [Fq] = &[ + // NONRESIDUE**(((q^0) - 1) / 2) + FQ_ONE, + // NONRESIDUE**(((q^1) - 1) / 2) + field_new!(Fq, "-1"), + ]; + + #[inline(always)] + fn mul_fp_by_nonresidue(fe: &Self::Fp) -> Self::Fp { + let original = fe; + let mut fe = -fe.double(); + fe.double_in_place(); + fe - original + } +} + +pub const FQ2_ZERO: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ZERO); +pub const FQ2_ONE: Fq2 = field_new!(Fq2, FQ_ONE, FQ_ZERO); diff --git a/arkworks/curves/bls12_377/src/fields/fq6.rs b/arkworks/curves/bls12_377/src/fields/fq6.rs new file mode 100644 index 00000000..83a9ead1 --- /dev/null +++ b/arkworks/curves/bls12_377/src/fields/fq6.rs @@ -0,0 +1,78 @@ +use super::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq6 = Fp6; + +#[derive(Clone, Copy)] +pub struct Fq6Parameters; + +impl Fp6Parameters for Fq6Parameters { + type Fp2Params = Fq2Parameters; + + /// NONRESIDUE = U + #[rustfmt::skip] + const NONRESIDUE: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ONE); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C1: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^(((q^0) - 1) / 3) + field_new!(Fq2, FQ_ONE, FQ_ZERO), + // Fp2::NONRESIDUE^(((q^1) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410946"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^2) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410945"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^3) - 1) / 3) + field_new!(Fq2, field_new!(Fq, "-1"), FQ_ZERO), + // Fp2::NONRESIDUE^(((q^4) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047231"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^(((q^5) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047232"), + FQ_ZERO, + ), + ]; + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C2: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^((2*(q^0) - 2) / 3) + field_new!(Fq2, FQ_ONE, FQ_ZERO), + // Fp2::NONRESIDUE^((2*(q^1) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410945"), + FQ_ZERO + ), + // Fp2::NONRESIDUE^((2*(q^2) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047231"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^((2*(q^3) - 2) / 3) + field_new!(Fq2, FQ_ONE, FQ_ZERO), + // Fp2::NONRESIDUE^((2*(q^4) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "80949648264912719408558363140637477264845294720710499478137287262712535938301461879813459410945"), + FQ_ZERO, + ), + // Fp2::NONRESIDUE^((2*(q^5) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "258664426012969093929703085429980814127835149614277183275038967946009968870203535512256352201271898244626862047231"), + FQ_ZERO, + ), + ]; + + #[inline(always)] + fn mul_fp2_by_nonresidue(fe: &Fq2) -> Fq2 { + // Karatsuba multiplication with constant other = u. + let c0 = Fq2Parameters::mul_fp_by_nonresidue(&fe.c1); + let c1 = fe.c0; + field_new!(Fq2, c0, c1) + } +} diff --git a/arkworks/curves/bls12_377/src/fields/fr.rs b/arkworks/curves/bls12_377/src/fields/fr.rs new file mode 100644 index 00000000..0d257fe1 --- /dev/null +++ b/arkworks/curves/bls12_377/src/fields/fr.rs @@ -0,0 +1,120 @@ +///! Bls12-377 scalar field. +/// +/// Roots of unity computed from modulus and R using this sage code: +/// +/// ```ignore +/// q = 8444461749428370424248824938781546531375899335154063827935233455917409239041 +/// R = 6014086494747379908336260804527802945383293308637734276299549080986809532403 # Montgomery R +/// s = 47 +/// o = q - 1 +/// F = GF(q) +/// g = F.multiplicative_generator() +/// g = F.multiplicative_generator() +/// assert g.multiplicative_order() == o +/// g2 = g ** (o/2**s) +/// assert g2.multiplicative_order() == 2**s +/// def into_chunks(val, width, n): +/// return [int(int(val) // (2 ** (width * i)) % 2 ** width) for i in range(n)] +/// print("Gen: ", g * R % q) +/// print("Gen: ", into_chunks(g * R % q, 64, 4)) +/// print("2-adic gen: ", into_chunks(g2 * R % q, 64, 4)) +/// ``` +use ark_ff::{biginteger::BigInteger256 as BigInteger, fields::*}; + +pub type Fr = Fp256; + +pub struct FrParameters; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 47; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 12646347781564978760u64, + 6783048705277173164u64, + 268534165941069093u64, + 1121515446318641358u64, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 8444461749428370424248824938781546531375899335154063827935233455917409239041 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 725501752471715841u64, + 6461107452199829505u64, + 6968279316240510977u64, + 1345280370688173398u64, + ]); + + const MODULUS_BITS: u32 = 253; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 3; + + /// R = 6014086494747379908336260804527802945383293308637734276299549080986809532403 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 9015221291577245683u64, + 8239323489949974514u64, + 1646089257421115374u64, + 958099254763297437u64, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 2726216793283724667u64, + 14712177743343147295u64, + 12091039717619697043u64, + 81024008013859129u64, + ]); + + const INV: u64 = 725501752471715839u64; + + /// GENERATOR = 22 + /// Encoded in Montgomery form, so the value is + /// (22 * R) % q = 5642976643016801619665363617888466827793962762719196659561577942948671127251 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 2984901390528151251u64, + 10561528701063790279u64, + 5476750214495080041u64, + 898978044469942640u64, + ]); + + /// (r - 1)/2 = + /// 4222230874714185212124412469390773265687949667577031913967616727958704619520 + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x8508c00000000000, + 0xacd53b7f68000000, + 0x305a268f2e1bd800, + 0x955b2af4d1652ab, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where r - 1 = 2^s * t + // For T coprime to 2 + + /// t = (r - 1) / 2^s = + /// 60001509534603559531609739528203892656505753216962260608619555 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0xedfda00000021423, + 0x9a3cb86f6002b354, + 0xcabd34594aacc168, + 0x2556, + ]); + + /// (t - 1) / 2 = + /// 30000754767301779765804869764101946328252876608481130304309777 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x76fed00000010a11, + 0x4d1e5c37b00159aa, + 0x655e9a2ca55660b4, + 0x12ab, + ]); +} diff --git a/arkworks/curves/bls12_377/src/fields/mod.rs b/arkworks/curves/bls12_377/src/fields/mod.rs new file mode 100644 index 00000000..401c07b5 --- /dev/null +++ b/arkworks/curves/bls12_377/src/fields/mod.rs @@ -0,0 +1,27 @@ +#[cfg(feature = "scalar_field")] +pub mod fr; +#[cfg(feature = "scalar_field")] +pub use self::fr::*; + +#[cfg(feature = "base_field")] +pub mod fq; +#[cfg(feature = "base_field")] +pub use self::fq::*; + +#[cfg(feature = "curve")] +pub mod fq2; +#[cfg(feature = "curve")] +pub use self::fq2::*; + +#[cfg(feature = "curve")] +pub mod fq6; +#[cfg(feature = "curve")] +pub use self::fq6::*; + +#[cfg(feature = "curve")] +pub mod fq12; +#[cfg(feature = "curve")] +pub use self::fq12::*; + +#[cfg(all(feature = "curve", test))] +mod tests; diff --git a/arkworks/curves/bls12_377/src/fields/tests.rs b/arkworks/curves/bls12_377/src/fields/tests.rs new file mode 100644 index 00000000..66760bb6 --- /dev/null +++ b/arkworks/curves/bls12_377/src/fields/tests.rs @@ -0,0 +1,531 @@ +use ark_ff::{ + biginteger::{BigInteger, BigInteger384}, + fields::{ + fp6_3over2::Fp6Parameters, FftField, FftParameters, Field, Fp2Parameters, FpParameters, + PrimeField, SquareRootField, + }, + One, UniformRand, Zero, +}; +use ark_serialize::{buffer_bit_byte_size, CanonicalSerialize}; +use ark_std::rand::Rng; +use ark_std::test_rng; +use core::{ + cmp::Ordering, + ops::{AddAssign, MulAssign, SubAssign}, +}; + +use crate::{Fq, Fq12, Fq2, Fq2Parameters, Fq6, Fq6Parameters, FqParameters, Fr}; + +use ark_algebra_test_templates::fields::*; + +pub(crate) const ITERATIONS: usize = 5; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); + sqrt_field_test(b); + let byte_size = a.serialized_size(); + field_serialization_test::(byte_size); + } +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); + sqrt_field_test(a); + let byte_size = a.serialized_size(); + let (_, buffer_size) = buffer_bit_byte_size(Fq::size_in_bits()); + assert_eq!(byte_size, buffer_size); + field_serialization_test::(byte_size); + } +} + +#[test] +fn test_fq2() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let a: Fq2 = rng.gen(); + let b: Fq2 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + } + frobenius_test::(Fq::characteristic(), 13); + let byte_size = Fq2::zero().serialized_size(); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq6() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let g: Fq6 = rng.gen(); + let h: Fq6 = rng.gen(); + field_test(g, h); + } + + frobenius_test::(Fq::characteristic(), 13); + let byte_size = Fq6::zero().serialized_size(); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq12() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let g: Fq12 = rng.gen(); + let h: Fq12 = rng.gen(); + field_test(g, h); + } + frobenius_test::(Fq::characteristic(), 13); + let byte_size = Fq12::zero().serialized_size(); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq_repr_from() { + assert_eq!( + BigInteger384::from(100), + BigInteger384([100, 0, 0, 0, 0, 0]) + ); +} + +#[test] +fn test_fq_repr_is_odd() { + assert!(!BigInteger384::from(0).is_odd()); + assert!(BigInteger384::from(0).is_even()); + assert!(BigInteger384::from(1).is_odd()); + assert!(!BigInteger384::from(1).is_even()); + assert!(!BigInteger384::from(324834872).is_odd()); + assert!(BigInteger384::from(324834872).is_even()); + assert!(BigInteger384::from(324834873).is_odd()); + assert!(!BigInteger384::from(324834873).is_even()); +} + +#[test] +fn test_fq_repr_is_zero() { + assert!(BigInteger384::from(0).is_zero()); + assert!(!BigInteger384::from(1).is_zero()); + assert!(!BigInteger384([0, 0, 0, 0, 1, 0]).is_zero()); +} + +#[test] +fn test_fq_repr_num_bits() { + let mut a = BigInteger384::from(0); + assert_eq!(0, a.num_bits()); + a = BigInteger384::from(1); + for i in 1..385 { + assert_eq!(i, a.num_bits()); + a.mul2(); + } + assert_eq!(0, a.num_bits()); +} + +#[test] +fn test_fq_add_assign() { + // Test associativity + + let mut rng = test_rng(); + + for _ in 0..1000 { + // Generate a, b, c and ensure (a + b) + c == a + (b + c). + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + let c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.add_assign(&c); + + let mut tmp2 = b; + tmp2.add_assign(&c); + tmp2.add_assign(&a); + + assert_eq!(tmp1, tmp2); + } +} + +#[test] +fn test_fq_sub_assign() { + let mut rng = test_rng(); + + for _ in 0..1000 { + // Ensure that (a - b) + (b - a) = 0. + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.sub_assign(&b); + + let mut tmp2 = b; + tmp2.sub_assign(&a); + + tmp1.add_assign(&tmp2); + assert!(tmp1.is_zero()); + } +} + +#[test] +fn test_fq_mul_assign() { + let mut rng = test_rng(); + + for _ in 0..1000000 { + // Ensure that (a * b) * c = a * (b * c) + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + let c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.mul_assign(&b); + tmp1.mul_assign(&c); + + let mut tmp2 = b; + tmp2.mul_assign(&c); + tmp2.mul_assign(&a); + + assert_eq!(tmp1, tmp2); + } + + for _ in 0..1000000 { + // Ensure that r * (a + b + c) = r*a + r*b + r*c + + let r = Fq::rand(&mut rng); + let mut a = Fq::rand(&mut rng); + let mut b = Fq::rand(&mut rng); + let mut c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.add_assign(&c); + tmp1.mul_assign(&r); + + a.mul_assign(&r); + b.mul_assign(&r); + c.mul_assign(&r); + + a.add_assign(&b); + a.add_assign(&c); + + assert_eq!(tmp1, a); + } +} + +#[test] +fn test_fq_squaring() { + let mut rng = test_rng(); + + for _ in 0..1000000 { + // Ensure that (a * a) = a^2 + let a = Fq::rand(&mut rng); + + let mut tmp = a; + tmp.square_in_place(); + + let mut tmp2 = a; + tmp2.mul_assign(&a); + + assert_eq!(tmp, tmp2); + } +} + +#[test] +fn test_fq_inverse() { + assert!(Fq::zero().inverse().is_none()); + + let mut rng = test_rng(); + + let one = Fq::one(); + + for _ in 0..1000 { + // Ensure that a * a^-1 = 1 + let mut a = Fq::rand(&mut rng); + let ainv = a.inverse().unwrap(); + a.mul_assign(&ainv); + assert_eq!(a, one); + } +} + +#[test] +fn test_fq_double_in_place() { + let mut rng = test_rng(); + + for _ in 0..1000 { + // Ensure doubling a is equivalent to adding a to itself. + let mut a = Fq::rand(&mut rng); + let mut b = a; + b.add_assign(&a); + a.double_in_place(); + assert_eq!(a, b); + } +} + +#[test] +fn test_fq_negate() { + { + let a = -Fq::zero(); + + assert!(a.is_zero()); + } + + let mut rng = test_rng(); + + for _ in 0..1000 { + // Ensure (a - (-a)) = 0. + let mut a = Fq::rand(&mut rng); + let b = -a; + a.add_assign(&b); + + assert!(a.is_zero()); + } +} + +#[test] +fn test_fq_pow() { + let mut rng = test_rng(); + + for i in 0..1000 { + // Exponentiate by various small numbers and ensure it consists with repeated + // multiplication. + let a = Fq::rand(&mut rng); + let target = a.pow(&[i]); + let mut c = Fq::one(); + for _ in 0..i { + c.mul_assign(&a); + } + assert_eq!(c, target); + } + + for _ in 0..1000 { + // Exponentiating by the modulus should have no effect in a prime field. + let a = Fq::rand(&mut rng); + + assert_eq!(a, a.pow(Fq::characteristic())); + } +} + +#[test] +fn test_fq_sqrt() { + let mut rng = test_rng(); + + assert_eq!(Fq::zero().sqrt().unwrap(), Fq::zero()); + + for _ in 0..1000 { + // Ensure sqrt(a^2) = a or -a + let a = Fq::rand(&mut rng); + let nega = -a; + let mut b = a; + b.square_in_place(); + + let b = b.sqrt().unwrap(); + + assert!(a == b || nega == b); + } + + for _ in 0..1000 { + // Ensure sqrt(a)^2 = a for random a + let a = Fq::rand(&mut rng); + + if let Some(mut tmp) = a.sqrt() { + tmp.square_in_place(); + + assert_eq!(a, tmp); + } + } +} + +#[test] +fn test_fq_num_bits() { + assert_eq!(FqParameters::MODULUS_BITS, 377); + assert_eq!(FqParameters::CAPACITY, 376); +} + +#[test] +fn test_fq_root_of_unity() { + assert_eq!(FqParameters::TWO_ADICITY, 46); + assert_eq!( + Fq::multiplicative_generator().pow([ + 0x7510c00000021423, + 0x88bee82520005c2d, + 0x67cc03d44e3c7bcd, + 0x1701b28524ec688b, + 0xe9185f1443ab18ec, + 0x6b8 + ]), + Fq::two_adic_root_of_unity() + ); + assert_eq!( + Fq::two_adic_root_of_unity().pow([1 << FqParameters::TWO_ADICITY]), + Fq::one() + ); + assert!(Fq::multiplicative_generator().sqrt().is_none()); +} + +#[test] +fn test_fq_ordering() { + // BigInteger384's ordering is well-tested, but we still need to make sure the + // Fq elements aren't being compared in Montgomery form. + for i in 0..100 { + assert!(Fq::from(BigInteger384::from(i + 1)) > Fq::from(BigInteger384::from(i))); + } +} + +#[test] +fn test_fq_legendre() { + use ark_ff::fields::LegendreSymbol::*; + + assert_eq!(QuadraticResidue, Fq::one().legendre()); + assert_eq!(Zero, Fq::zero().legendre()); + assert_eq!( + QuadraticResidue, + Fq::from(BigInteger384::from(4)).legendre() + ); + assert_eq!( + QuadraticNonResidue, + Fq::from(BigInteger384::from(5)).legendre() + ); +} + +#[test] +fn test_fq2_ordering() { + let mut a = Fq2::new(Fq::zero(), Fq::zero()); + let mut b = a.clone(); + + assert!(a.cmp(&b) == Ordering::Equal); + b.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Equal); + b.c1.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c1.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Greater); + b.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Equal); +} + +#[test] +fn test_fq2_basics() { + assert_eq!(Fq2::new(Fq::zero(), Fq::zero(),), Fq2::zero()); + assert_eq!(Fq2::new(Fq::one(), Fq::zero(),), Fq2::one()); + assert!(Fq2::zero().is_zero()); + assert!(!Fq2::one().is_zero()); + assert!(!Fq2::new(Fq::zero(), Fq::one(),).is_zero()); +} + +#[test] +fn test_fq2_legendre() { + use ark_ff::fields::LegendreSymbol::*; + + assert_eq!(Zero, Fq2::zero().legendre()); + // i^2 = -1 + let mut m1 = -Fq2::one(); + assert_eq!(QuadraticResidue, m1.legendre()); + m1 = Fq6Parameters::mul_fp2_by_nonresidue(&m1); + assert_eq!(QuadraticNonResidue, m1.legendre()); +} + +#[test] +fn test_fq2_mul_nonresidue() { + let mut rng = test_rng(); + + let nqr = Fq2::new(Fq::zero(), Fq::one()); + + let quadratic_non_residue = Fq2::new( + Fq2Parameters::QUADRATIC_NONRESIDUE.0, + Fq2Parameters::QUADRATIC_NONRESIDUE.1, + ); + for _ in 0..1000 { + let mut a = Fq2::rand(&mut rng); + let mut b = a; + a = quadratic_non_residue * &a; + b.mul_assign(&nqr); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq6_mul_by_1() { + let mut rng = test_rng(); + + for _ in 0..1000 { + let c1 = Fq2::rand(&mut rng); + let mut a = Fq6::rand(&mut rng); + let mut b = a; + + a.mul_by_1(&c1); + b.mul_assign(&Fq6::new(Fq2::zero(), c1, Fq2::zero())); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq6_mul_by_01() { + let mut rng = test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c1 = Fq2::rand(&mut rng); + let mut a = Fq6::rand(&mut rng); + let mut b = a; + + a.mul_by_01(&c0, &c1); + b.mul_assign(&Fq6::new(c0, c1, Fq2::zero())); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq12_mul_by_014() { + let mut rng = test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c1 = Fq2::rand(&mut rng); + let c5 = Fq2::rand(&mut rng); + let mut a = Fq12::rand(&mut rng); + let mut b = a; + + a.mul_by_014(&c0, &c1, &c5); + b.mul_assign(&Fq12::new( + Fq6::new(c0, c1, Fq2::zero()), + Fq6::new(Fq2::zero(), c5, Fq2::zero()), + )); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq12_mul_by_034() { + let mut rng = test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c3 = Fq2::rand(&mut rng); + let c4 = Fq2::rand(&mut rng); + let mut a = Fq12::rand(&mut rng); + let mut b = a; + + a.mul_by_034(&c0, &c3, &c4); + b.mul_assign(&Fq12::new( + Fq6::new(c0, Fq2::zero(), Fq2::zero()), + Fq6::new(c3, c4, Fq2::zero()), + )); + + assert_eq!(a, b); + } +} diff --git a/arkworks/curves/bls12_377/src/lib.rs b/arkworks/curves/bls12_377/src/lib.rs new file mode 100644 index 00000000..2659771c --- /dev/null +++ b/arkworks/curves/bls12_377/src/lib.rs @@ -0,0 +1,38 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the BLS12_377 curve generated in [\[BCGMMW20, “Zexe”\]](https://eprint.iacr.org/2018/962). +//! The name denotes that it is a Barreto--Lynn--Scott curve of embedding degree 12, +//! defined over a 377-bit (prime) field. The main feature of this curve is that +//! both the scalar field and the base field are highly 2-adic. +//! (This is in contrast to the BLS12_381 curve for which only the scalar field is highly 2-adic.) +//! +//! +//! Curve information: +//! * Base field: q = 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 +//! * Scalar field: r = 8444461749428370424248824938781546531375899335154063827935233455917409239041 +//! * valuation(q - 1, 2) = 46 +//! * valuation(r - 1, 2) = 47 +//! * G1 curve equation: y^2 = x^3 + 1 +//! * G2 curve equation: y^2 = x^3 + B, where +//! * B = Fq2(0, 155198655607781456406391640216936120121836107652948796323930557600032281009004493664981332883744016074664192874906) + +#[cfg(feature = "curve")] +mod curves; + +mod fields; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +#[cfg(feature = "curve")] +pub use curves::*; + +pub use fields::*; diff --git a/arkworks/curves/bls12_381/Cargo.toml b/arkworks/curves/bls12_381/Cargo.toml new file mode 100644 index 00000000..baf2c8b6 --- /dev/null +++ b/arkworks/curves/bls12_381/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "ark-bls12-381" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The BLS12-381 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-bls12-381/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version="^0.3.0", default-features = false } +ark-ec = { version="^0.3.0", default-features = false } +ark-std = { version="^0.3.0", default-features = false } + +[dev-dependencies] +ark-serialize = { version="^0.3.0", default-features = false } +ark-algebra-test-templates = { version="^0.3.0", default-features = false } + +[features] +default = [ "curve" ] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] + +curve = [ "scalar_field" ] +scalar_field = [] diff --git a/arkworks/curves/bls12_381/LICENSE-APACHE b/arkworks/curves/bls12_381/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/curves/bls12_381/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/curves/bls12_381/LICENSE-MIT b/arkworks/curves/bls12_381/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/curves/bls12_381/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/curves/bls12_381/src/curves/g1.rs b/arkworks/curves/bls12_381/src/curves/g1.rs new file mode 100644 index 00000000..2cc63acf --- /dev/null +++ b/arkworks/curves/bls12_381/src/curves/g1.rs @@ -0,0 +1,53 @@ +use crate::*; +use ark_ec::{ + bls12, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::{field_new, Zero}; + +pub type G1Affine = bls12::G1Affine; +pub type G1Projective = bls12::G1Projective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 0 + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = 4 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "4"); + + /// COFACTOR = (x - 1)^2 / 3 = 76329603384216526031706109802092473003 + const COFACTOR: &'static [u64] = &[0x8c00aaab0000aaab, 0x396c8c005555e156]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r + /// = 52435875175126190458656871551744051925719901746859129887267498875565241663483 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "52435875175126190458656871551744051925719901746859129887267498875565241663483"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +/// G1_GENERATOR_X = +/// 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507 +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507"); + +/// G1_GENERATOR_Y = +/// 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569 +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569"); diff --git a/arkworks/curves/bls12_381/src/curves/g2.rs b/arkworks/curves/bls12_381/src/curves/g2.rs new file mode 100644 index 00000000..f1147e69 --- /dev/null +++ b/arkworks/curves/bls12_381/src/curves/g2.rs @@ -0,0 +1,77 @@ +use crate::*; +use ark_ec::{ + bls12, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::{field_new, Zero}; + +pub type G2Affine = bls12::G2Affine; +pub type G2Projective = bls12::G2Projective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq2; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = [0, 0] + const COEFF_A: Fq2 = field_new!(Fq2, g1::Parameters::COEFF_A, g1::Parameters::COEFF_A,); + + /// COEFF_B = [4, 4] + const COEFF_B: Fq2 = field_new!(Fq2, g1::Parameters::COEFF_B, g1::Parameters::COEFF_B,); + + /// COFACTOR = (x^8 - 4 x^7 + 5 x^6) - (4 x^4 + 6 x^3 - 4 x^2 - 4 x + 13) // + /// 9 + /// = 305502333931268344200999753193121504214466019254188142667664032982267604182971884026507427359259977847832272839041616661285803823378372096355777062779109 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 0xcf1c38e31c7238e5, + 0x1616ec6e786f0c70, + 0x21537e293a6691ae, + 0xa628f1cb4d9e82ef, + 0xa68a205b2e5a7ddf, + 0xcd91de4547085aba, + 0x91d50792876a202, + 0x5d543a95414e7f1, + ]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r + /// 26652489039290660355457965112010883481355318854675681319708643586776743290055 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "26652489039290660355457965112010883481355318854675681319708643586776743290055"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +pub const G2_GENERATOR_X: Fq2 = field_new!(Fq2, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1); +pub const G2_GENERATOR_Y: Fq2 = field_new!(Fq2, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1); + +/// G2_GENERATOR_X_C0 = +/// 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C0: Fq = field_new!(Fq, "352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160"); + +/// G2_GENERATOR_X_C1 = +/// 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C1: Fq = field_new!(Fq, "3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758"); + +/// G2_GENERATOR_Y_C0 = +/// 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C0: Fq = field_new!(Fq, "1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905"); + +/// G2_GENERATOR_Y_C1 = +/// 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C1: Fq = field_new!(Fq, "927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582"); diff --git a/arkworks/curves/bls12_381/src/curves/mod.rs b/arkworks/curves/bls12_381/src/curves/mod.rs new file mode 100644 index 00000000..ffa7ed12 --- /dev/null +++ b/arkworks/curves/bls12_381/src/curves/mod.rs @@ -0,0 +1,30 @@ +use ark_ec::bls12::{Bls12, Bls12Parameters, TwistType}; + +use crate::{Fq, Fq12Parameters, Fq2Parameters, Fq6Parameters}; + +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +pub use self::{ + g1::{G1Affine, G1Projective}, + g2::{G2Affine, G2Projective}, +}; + +pub type Bls12_381 = Bls12; + +pub struct Parameters; + +impl Bls12Parameters for Parameters { + const X: &'static [u64] = &[0xd201000000010000]; + const X_IS_NEGATIVE: bool = true; + const TWIST_TYPE: TwistType = TwistType::M; + type Fp = Fq; + type Fp2Params = Fq2Parameters; + type Fp6Params = Fq6Parameters; + type Fp12Params = Fq12Parameters; + type G1Parameters = self::g1::Parameters; + type G2Parameters = self::g2::Parameters; +} diff --git a/arkworks/curves/bls12_381/src/curves/tests.rs b/arkworks/curves/bls12_381/src/curves/tests.rs new file mode 100644 index 00000000..5ea217da --- /dev/null +++ b/arkworks/curves/bls12_381/src/curves/tests.rs @@ -0,0 +1,117 @@ +#![allow(unused_imports)] +use ark_ec::{models::SWModelParameters, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{ + fields::{Field, FpParameters, PrimeField, SquareRootField}, + One, Zero, +}; +use ark_serialize::CanonicalSerialize; +use ark_std::rand::Rng; +use ark_std::test_rng; +use core::ops::{AddAssign, MulAssign}; + +use crate::{g1, g2, Bls12_381, Fq, Fq12, Fq2, Fr, G1Affine, G1Projective, G2Affine, G2Projective}; +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let mut sa = a; + sa.mul_assign(s); + let mut sb = b; + sb.mul_assign(s); + + let ans1 = Bls12_381::pairing(sa, b); + let ans2 = Bls12_381::pairing(a, sb); + let ans3 = Bls12_381::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq12::one()); + assert_ne!(ans2, Fq12::one()); + assert_ne!(ans3, Fq12::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq12::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq12::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq12::one()); +} + +#[test] +fn test_g1_generator_raw() { + let mut x = Fq::zero(); + let mut i = 0; + loop { + // y^2 = x^3 + b + let mut rhs = x; + rhs.square_in_place(); + rhs.mul_assign(&x); + rhs.add_assign(&g1::Parameters::COEFF_B); + + if let Some(y) = rhs.sqrt() { + let p = G1Affine::new(x, if y < -y { y } else { -y }, false); + assert!(!p.is_in_correct_subgroup_assuming_on_curve()); + + let g1 = p.scale_by_cofactor(); + if !g1.is_zero() { + assert_eq!(i, 4); + let g1 = G1Affine::from(g1); + + assert!(g1.is_in_correct_subgroup_assuming_on_curve()); + + assert_eq!(g1, G1Affine::prime_subgroup_generator()); + break; + } + } + + i += 1; + x.add_assign(&Fq::one()); + } +} diff --git a/arkworks/curves/bls12_381/src/fields/fq.rs b/arkworks/curves/bls12_381/src/fields/fq.rs new file mode 100644 index 00000000..0db87501 --- /dev/null +++ b/arkworks/curves/bls12_381/src/fields/fq.rs @@ -0,0 +1,115 @@ +use ark_ff::{ + biginteger::BigInteger384 as BigInteger, + field_new, + fields::{FftParameters, Fp384, Fp384Parameters, FpParameters}, +}; + +pub type Fq = Fp384; + +pub struct FqParameters; + +impl Fp384Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 1; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, + ]); +} +impl FpParameters for FqParameters { + /// MODULUS = 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xb9feffffffffaaab, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, + ]); + + const MODULUS_BITS: u32 = 381; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 3; + + /// R = 3380320199399472671518931668520476396067793891014375699959770179129436917079669831430077592723774664465579537268733 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0x760900000002fffd, + 0xebf4000bc40c0002, + 0x5f48985753c758ba, + 0x77ce585370525745, + 0x5c071a97a256ec6d, + 0x15f65ec3fa80e493, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xf4df1f341c341746, + 0xa76e6a609d104f1, + 0x8de5476c4c95b6d5, + 0x67eb88a9939d83c0, + 0x9a793e85b519952d, + 0x11988fe592cae3aa, + ]); + + const INV: u64 = 0x89f3fffcfffcfffd; + + /// GENERATOR = 2 + /// Encoded in Montgomery form, so the value is + /// 2 * R % q = 2758230843577277949620073511305048635578704962089743514587482222134842183668501798417467556318533664893264801977679 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0x321300000006554f, + 0xb93c0018d6c40005, + 0x57605e0db0ddbb51, + 0x8b256521ed1f9bcb, + 0x6cf28d7901622c03, + 0x11ebab9dbb81e28c, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xdcff7fffffffd555, + 0xf55ffff58a9ffff, + 0xb39869507b587b12, + 0xb23ba5c279c2895f, + 0x258dd3db21a5d66b, + 0xd0088f51cbff34d, + ]); + + /// T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + /// For T coprime to 2 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0xdcff7fffffffd555, + 0xf55ffff58a9ffff, + 0xb39869507b587b12, + 0xb23ba5c279c2895f, + 0x258dd3db21a5d66b, + 0xd0088f51cbff34d, + ]); + + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xee7fbfffffffeaaa, + 0x7aaffffac54ffff, + 0xd9cc34a83dac3d89, + 0xd91dd2e13ce144af, + 0x92c6e9ed90d2eb35, + 0x680447a8e5ff9a6, + ]); +} + +pub const FQ_ONE: Fq = field_new!(Fq, "1"); +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); diff --git a/arkworks/curves/bls12_381/src/fields/fq12.rs b/arkworks/curves/bls12_381/src/fields/fq12.rs new file mode 100644 index 00000000..b21d5634 --- /dev/null +++ b/arkworks/curves/bls12_381/src/fields/fq12.rs @@ -0,0 +1,76 @@ +use crate::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq12 = Fp12; + +#[derive(Clone, Copy)] +pub struct Fq12Parameters; + +impl Fp12Parameters for Fq12Parameters { + type Fp6Params = Fq6Parameters; + + const NONRESIDUE: Fq6 = field_new!(Fq6, FQ2_ZERO, FQ2_ONE, FQ2_ZERO); + + const FROBENIUS_COEFF_FP12_C1: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^(((q^0) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "1"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^1) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "3850754370037169011952147076051364057158807420970682438676050522613628423219637725072182697113062777891589506424760"), + field_new!(Fq, "151655185184498381465642749684540099398075398968325446656007613510403227271200139370504932015952886146304766135027"), + ), + // Fp2::NONRESIDUE^(((q^2) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620351"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^3) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "2973677408986561043442465346520108879172042883009249989176415018091420807192182638567116318576472649347015917690530"), + field_new!(Fq, "1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257"), + ), + // Fp2::NONRESIDUE^(((q^4) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^5) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "3125332594171059424908108096204648978570118281977575435832422631601824034463382777937621250592425535493320683825557"), + field_new!(Fq, "877076961050607968509681729531255177986764537961432449499635504522207616027455086505066378536590128544573588734230"), + ), + // Fp2::NONRESIDUE^(((q^6) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "-1"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^7) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "151655185184498381465642749684540099398075398968325446656007613510403227271200139370504932015952886146304766135027"), + field_new!(Fq, "3850754370037169011952147076051364057158807420970682438676050522613628423219637725072182697113062777891589506424760"), + ), + // Fp2::NONRESIDUE^(((q^8) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^9) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "1028732146235106349975324479215795277384839936929757896155643118032610843298655225875571310552543014690878354869257"), + field_new!(Fq, "2973677408986561043442465346520108879172042883009249989176415018091420807192182638567116318576472649347015917690530"), + ), + // Fp2::NONRESIDUE^(((q^10) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939437"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^11) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "877076961050607968509681729531255177986764537961432449499635504522207616027455086505066378536590128544573588734230"), + field_new!(Fq, "3125332594171059424908108096204648978570118281977575435832422631601824034463382777937621250592425535493320683825557"), + ), + ]; +} diff --git a/arkworks/curves/bls12_381/src/fields/fq2.rs b/arkworks/curves/bls12_381/src/fields/fq2.rs new file mode 100644 index 00000000..917c1363 --- /dev/null +++ b/arkworks/curves/bls12_381/src/fields/fq2.rs @@ -0,0 +1,35 @@ +use crate::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq2 = Fp2; + +pub struct Fq2Parameters; + +impl Fp2Parameters for Fq2Parameters { + type Fp = Fq; + + /// NONRESIDUE = -1 + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "-1"); + + /// QUADRATIC_NONRESIDUE = (U + 1) + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE: (Fq, Fq) = (FQ_ONE, FQ_ONE); + + /// Coefficients for the Frobenius automorphism. + #[rustfmt::skip] + const FROBENIUS_COEFF_FP2_C1: &'static [Fq] = &[ + // Fq(-1)**(((q^0) - 1) / 2) + field_new!(Fq, "1"), + // Fq(-1)**(((q^1) - 1) / 2) + field_new!(Fq, "-1"), + ]; + + #[inline(always)] + fn mul_fp_by_nonresidue(fp: &Self::Fp) -> Self::Fp { + -(*fp) + } +} + +pub const FQ2_ZERO: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ZERO); +pub const FQ2_ONE: Fq2 = field_new!(Fq2, FQ_ONE, FQ_ZERO); diff --git a/arkworks/curves/bls12_381/src/fields/fq6.rs b/arkworks/curves/bls12_381/src/fields/fq6.rs new file mode 100644 index 00000000..8adb4773 --- /dev/null +++ b/arkworks/curves/bls12_381/src/fields/fq6.rs @@ -0,0 +1,96 @@ +use crate::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq6 = Fp6; + +#[derive(Clone, Copy)] +pub struct Fq6Parameters; + +impl Fp6Parameters for Fq6Parameters { + type Fp2Params = Fq2Parameters; + + /// NONRESIDUE = (U + 1) + #[rustfmt::skip] + const NONRESIDUE: Fq2 = field_new!(Fq2, + field_new!(Fq, "1"), + field_new!(Fq, "1"), + ); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C1: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^(((q^0) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "1"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^1) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "0"), + field_new!(Fq, "4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436"), + ), + // Fp2::NONRESIDUE^(((q^2) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^3) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "0"), + field_new!(Fq, "1"), + ), + // Fp2::NONRESIDUE^(((q^4) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^5) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "0"), + field_new!(Fq, "793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350"), + ), +]; + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C2: &'static [Fq2] = &[ + // Fq2(u + 1)**(((2q^0) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "1"), + field_new!(Fq, "0"), + ), + // Fq2(u + 1)**(((2q^1) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939437"), + field_new!(Fq, "0"), + ), + // Fq2(u + 1)**(((2q^2) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "4002409555221667392624310435006688643935503118305586438271171395842971157480381377015405980053539358417135540939436"), + field_new!(Fq, "0"), + ), + // Fq2(u + 1)**(((2q^3) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "-1"), + field_new!(Fq, "0"), + ), + // Fq2(u + 1)**(((2q^4) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350"), + field_new!(Fq, "0"), + ), + // Fq2(u + 1)**(((2q^5) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620351"), + field_new!(Fq, "0"), + ), + ]; + + /// Multiply this element by the quadratic nonresidue 1 + u. + /// Make this generic. + fn mul_fp2_by_nonresidue(fe: &Fq2) -> Fq2 { + let mut copy = *fe; + let t0 = copy.c0; + copy.c0 -= &fe.c1; + copy.c1 += &t0; + copy + } +} diff --git a/arkworks/curves/bls12_381/src/fields/fr.rs b/arkworks/curves/bls12_381/src/fields/fr.rs new file mode 100644 index 00000000..7604c678 --- /dev/null +++ b/arkworks/curves/bls12_381/src/fields/fr.rs @@ -0,0 +1,100 @@ +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters, FpParameters}, +}; + +pub type Fr = Fp256; + +pub struct FrParameters; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 32; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0xb9b58d8c5f0e466a, + 0x5b1b4c801819d7ec, + 0xaf53ae352a31e64, + 0x5bf3adda19e9b27b, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xffffffff00000001, + 0x53bda402fffe5bfe, + 0x3339d80809a1d805, + 0x73eda753299d7d48, + ]); + + const MODULUS_BITS: u32 = 255; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 1; + + /// R = 10920338887063814464675503992315976177888879664585288394250266608035967270910 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0x1fffffffe, + 0x5884b7fa00034802, + 0x998c4fefecbc4ff5, + 0x1824b159acc5056f, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xc999e990f3f29c6d, + 0x2b6cedcb87925c23, + 0x5d314967254398f, + 0x748d9d99f59ff11, + ]); + + const INV: u64 = 0xfffffffeffffffff; + + /// GENERATOR = 7 + /// Encoded in Montgomery form, so the value here is + /// 7 * R % q = 24006497034320510773280787438025867407531605151569380937148207556313189711857 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0xefffffff1, + 0x17e363d300189c0f, + 0xff9c57876f8457b0, + 0x351332208fc5a8c4, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x7fffffff80000000, + 0xa9ded2017fff2dff, + 0x199cec0404d0ec02, + 0x39f6d3a994cebea4, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + // For T coprime to 2 + + // T = (MODULUS - 1) / 2^S = + // 12208678567578594777604504606729831043093128246378069236549469339647 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0xfffe5bfeffffffff, + 0x9a1d80553bda402, + 0x299d7d483339d808, + 0x73eda753, + ]); + + // (T - 1) / 2 = + // 6104339283789297388802252303364915521546564123189034618274734669823 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x7fff2dff7fffffff, + 0x4d0ec02a9ded201, + 0x94cebea4199cec04, + 0x39f6d3a9, + ]); +} diff --git a/arkworks/curves/bls12_381/src/fields/mod.rs b/arkworks/curves/bls12_381/src/fields/mod.rs new file mode 100644 index 00000000..c9c69eb2 --- /dev/null +++ b/arkworks/curves/bls12_381/src/fields/mod.rs @@ -0,0 +1,27 @@ +#[cfg(feature = "scalar_field")] +pub mod fr; +#[cfg(feature = "scalar_field")] +pub use self::fr::*; + +#[cfg(feature = "curve")] +pub mod fq; +#[cfg(feature = "curve")] +pub use self::fq::*; + +#[cfg(feature = "curve")] +pub mod fq2; +#[cfg(feature = "curve")] +pub use self::fq2::*; + +#[cfg(feature = "curve")] +pub mod fq6; +#[cfg(feature = "curve")] +pub use self::fq6::*; + +#[cfg(feature = "curve")] +pub mod fq12; +#[cfg(feature = "curve")] +pub use self::fq12::*; + +#[cfg(all(feature = "curve", feature = "std", test))] +mod tests; diff --git a/arkworks/curves/bls12_381/src/fields/tests.rs b/arkworks/curves/bls12_381/src/fields/tests.rs new file mode 100644 index 00000000..de73b0be --- /dev/null +++ b/arkworks/curves/bls12_381/src/fields/tests.rs @@ -0,0 +1,2320 @@ +use ark_ff::{ + biginteger::{BigInteger, BigInteger384}, + fields::{ + FftField, FftParameters, Field, Fp12Parameters, Fp2Parameters, Fp6Parameters, FpParameters, + SquareRootField, + }, + One, UniformRand, Zero, +}; +use core::{ + cmp::Ordering, + ops::{AddAssign, MulAssign, SubAssign}, +}; + +use crate::{Fq, Fq12, Fq12Parameters, Fq2, Fq2Parameters, Fq6, Fq6Parameters, FqParameters, Fr}; +use ark_algebra_test_templates::fields::*; + +pub(crate) const ITERATIONS: usize = 5; + +#[test] +fn test_fr() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a: Fr = UniformRand::rand(&mut rng); + let b: Fr = UniformRand::rand(&mut rng); + field_test(a, b); + primefield_test::(); + sqrt_field_test(b); + } +} + +#[test] +fn test_fq() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a: Fq = UniformRand::rand(&mut rng); + let b: Fq = UniformRand::rand(&mut rng); + field_test(a, b); + primefield_test::(); + sqrt_field_test(a); + } +} + +#[test] +fn test_fq2() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a: Fq2 = UniformRand::rand(&mut rng); + let b: Fq2 = UniformRand::rand(&mut rng); + field_test(a, b); + sqrt_field_test(a); + } + frobenius_test::(Fq::characteristic(), 13); +} + +#[test] +fn test_fq6() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let g: Fq6 = UniformRand::rand(&mut rng); + let h: Fq6 = UniformRand::rand(&mut rng); + field_test(g, h); + } + frobenius_test::(Fq::characteristic(), 13); +} + +#[test] +fn test_fq12() { + let mut rng = ark_std::test_rng(); + for _ in 0..ITERATIONS { + let g: Fq12 = UniformRand::rand(&mut rng); + let h: Fq12 = UniformRand::rand(&mut rng); + field_test(g, h); + } + frobenius_test::(Fq::characteristic(), 13); +} + +#[test] +fn test_negative_one() { + let neg_one = Fq::new(BigInteger384([ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, + ])); + assert_eq!(neg_one, -Fq::one()); +} + +#[test] +fn test_frob_coeffs() { + let nqr = -Fq::one(); + + assert_eq!(Fq2Parameters::FROBENIUS_COEFF_FP2_C1[0], Fq::one()); + assert_eq!( + Fq2Parameters::FROBENIUS_COEFF_FP2_C1[1], + nqr.pow([ + 0xdcff7fffffffd555, + 0xf55ffff58a9ffff, + 0xb39869507b587b12, + 0xb23ba5c279c2895f, + 0x258dd3db21a5d66b, + 0xd0088f51cbff34d, + ]) + ); + + let nqr = Fq2::new(Fq::one(), Fq::one()); + + assert_eq!(Fq6Parameters::FROBENIUS_COEFF_FP6_C1[0], Fq2::one()); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C1[1], + nqr.pow([ + 0x9354ffffffffe38e, + 0xa395554e5c6aaaa, + 0xcd104635a790520c, + 0xcc27c3d6fbd7063f, + 0x190937e76bc3e447, + 0x8ab05f8bdd54cde, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C1[2], + nqr.pow([ + 0xb78e0000097b2f68, + 0xd44f23b47cbd64e3, + 0x5cb9668120b069a9, + 0xccea85f9bf7b3d16, + 0xdba2c8d7adb356d, + 0x9cd75ded75d7429, + 0xfc65c31103284fab, + 0xc58cb9a9b249ee24, + 0xccf734c3118a2e9a, + 0xa0f4304c5a256ce6, + 0xc3f0d2f8e0ba61f8, + 0xe167e192ebca97, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C1[3], + nqr.pow([ + 0xdbc6fcd6f35b9e06, + 0x997dead10becd6aa, + 0x9dbbd24c17206460, + 0x72b97acc6057c45e, + 0xf8e9a230bf0c628e, + 0x647ccb1885c63a7, + 0xce80264fc55bf6ee, + 0x94d8d716c3939fc4, + 0xad78f0eb77ee6ee1, + 0xd6fe49bfe57dc5f9, + 0x2656d6c15c63647, + 0xdf6282f111fa903, + 0x1bdba63e0632b4bb, + 0x6883597bcaa505eb, + 0xa56d4ec90c34a982, + 0x7e4c42823bbe90b2, + 0xf64728aa6dcb0f20, + 0x16e57e16ef152f, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C1[4], + nqr.pow([ + 0x4649add3c71c6d90, + 0x43caa6528972a865, + 0xcda8445bbaaa0fbb, + 0xc93dea665662aa66, + 0x2863bc891834481d, + 0x51a0c3f5d4ccbed8, + 0x9210e660f90ccae9, + 0xe2bd6836c546d65e, + 0xf223abbaa7cf778b, + 0xd4f10b222cf11680, + 0xd540f5eff4a1962e, + 0xa123a1f140b56526, + 0x31ace500636a59f6, + 0x3a82bc8c8dfa57a9, + 0x648c511e217fc1f8, + 0x36c17ffd53a4558f, + 0x881bef5fd684eefd, + 0x5d648dbdc5dbb522, + 0x8fd07bf06e5e59b8, + 0x8ddec8a9acaa4b51, + 0x4cc1f8688e2def26, + 0xa74e63cb492c03de, + 0x57c968173d1349bb, + 0x253674e02a866, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C1[5], + nqr.pow([ + 0xf896f792732eb2be, + 0x49c86a6d1dc593a1, + 0xe5b31e94581f91c3, + 0xe3da5cc0a6b20d7f, + 0x822caef950e0bfed, + 0x317ed950b9ee67cd, + 0xffd664016ee3f6cd, + 0x77d991c88810b122, + 0x62e72e635e698264, + 0x905e1a1a2d22814a, + 0xf5b7ab3a3f33d981, + 0x175871b0bc0e25dd, + 0x1e2e9a63df5c3772, + 0xe888b1f7445b149d, + 0x9551c19e5e7e2c24, + 0xecf21939a3d2d6be, + 0xd830dbfdab72dbd4, + 0x7b34af8d622d40c0, + 0x3df6d20a45671242, + 0xaf86bee30e21d98, + 0x41064c1534e5df5d, + 0xf5f6cabd3164c609, + 0xa5d14bdf2b7ee65, + 0xa718c069defc9138, + 0xdb1447e770e3110e, + 0xc1b164a9e90af491, + 0x7180441f9d251602, + 0x1fd3a5e6a9a893e, + 0x1e17b779d54d5db, + 0x3c7afafe3174, + ]) + ); + + assert_eq!(Fq6Parameters::FROBENIUS_COEFF_FP6_C2[0], Fq2::one()); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C2[1], + nqr.pow([ + 0x26a9ffffffffc71c, + 0x1472aaa9cb8d5555, + 0x9a208c6b4f20a418, + 0x984f87adf7ae0c7f, + 0x32126fced787c88f, + 0x11560bf17baa99bc, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C2[2], + nqr.pow([ + 0x6f1c000012f65ed0, + 0xa89e4768f97ac9c7, + 0xb972cd024160d353, + 0x99d50bf37ef67a2c, + 0x1b74591af5b66adb, + 0x139aebbdaebae852, + 0xf8cb862206509f56, + 0x8b1973536493dc49, + 0x99ee698623145d35, + 0x41e86098b44ad9cd, + 0x87e1a5f1c174c3f1, + 0x1c2cfc325d7952f, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C2[3], + nqr.pow([ + 0xb78df9ade6b73c0c, + 0x32fbd5a217d9ad55, + 0x3b77a4982e40c8c1, + 0xe572f598c0af88bd, + 0xf1d344617e18c51c, + 0xc8f996310b8c74f, + 0x9d004c9f8ab7eddc, + 0x29b1ae2d87273f89, + 0x5af1e1d6efdcddc3, + 0xadfc937fcafb8bf3, + 0x4cadad82b8c6c8f, + 0x1bec505e223f5206, + 0x37b74c7c0c656976, + 0xd106b2f7954a0bd6, + 0x4ada9d9218695304, + 0xfc988504777d2165, + 0xec8e5154db961e40, + 0x2dcafc2dde2a5f, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C2[4], + nqr.pow([ + 0x8c935ba78e38db20, + 0x87954ca512e550ca, + 0x9b5088b775541f76, + 0x927bd4ccacc554cd, + 0x50c779123068903b, + 0xa34187eba9997db0, + 0x2421ccc1f21995d2, + 0xc57ad06d8a8dacbd, + 0xe44757754f9eef17, + 0xa9e2164459e22d01, + 0xaa81ebdfe9432c5d, + 0x424743e2816aca4d, + 0x6359ca00c6d4b3ed, + 0x750579191bf4af52, + 0xc918a23c42ff83f0, + 0x6d82fffaa748ab1e, + 0x1037debfad09ddfa, + 0xbac91b7b8bb76a45, + 0x1fa0f7e0dcbcb370, + 0x1bbd9153595496a3, + 0x9983f0d11c5bde4d, + 0x4e9cc796925807bc, + 0xaf92d02e7a269377, + 0x4a6ce9c0550cc, + ]) + ); + assert_eq!( + Fq6Parameters::FROBENIUS_COEFF_FP6_C2[5], + nqr.pow([ + 0xf12def24e65d657c, + 0x9390d4da3b8b2743, + 0xcb663d28b03f2386, + 0xc7b4b9814d641aff, + 0x4595df2a1c17fdb, + 0x62fdb2a173dccf9b, + 0xffacc802ddc7ed9a, + 0xefb3239110216245, + 0xc5ce5cc6bcd304c8, + 0x20bc34345a450294, + 0xeb6f56747e67b303, + 0x2eb0e361781c4bbb, + 0x3c5d34c7beb86ee4, + 0xd11163ee88b6293a, + 0x2aa3833cbcfc5849, + 0xd9e4327347a5ad7d, + 0xb061b7fb56e5b7a9, + 0xf6695f1ac45a8181, + 0x7beda4148ace2484, + 0x15f0d7dc61c43b30, + 0x820c982a69cbbeba, + 0xebed957a62c98c12, + 0x14ba297be56fdccb, + 0x4e3180d3bdf92270, + 0xb6288fcee1c6221d, + 0x8362c953d215e923, + 0xe300883f3a4a2c05, + 0x3fa74bcd535127c, + 0x3c2f6ef3aa9abb6, + 0x78f5f5fc62e8, + ]) + ); + + assert_eq!(Fq12Parameters::FROBENIUS_COEFF_FP12_C1[0], Fq2::one()); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[1], + nqr.pow([ + 0x49aa7ffffffff1c7, + 0x51caaaa72e35555, + 0xe688231ad3c82906, + 0xe613e1eb7deb831f, + 0xc849bf3b5e1f223, + 0x45582fc5eeaa66f, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[2], + nqr.pow([ + 0xdbc7000004bd97b4, + 0xea2791da3e5eb271, + 0x2e5cb340905834d4, + 0xe67542fcdfbd9e8b, + 0x86dd1646bd6d9ab6, + 0x84e6baef6baeba14, + 0x7e32e188819427d5, + 0x62c65cd4d924f712, + 0x667b9a6188c5174d, + 0x507a18262d12b673, + 0xe1f8697c705d30fc, + 0x70b3f0c975e54b, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[3], + nqr.pow(vec![ + 0x6de37e6b79adcf03, + 0x4cbef56885f66b55, + 0x4edde9260b903230, + 0x395cbd66302be22f, + 0xfc74d1185f863147, + 0x323e658c42e31d3, + 0x67401327e2adfb77, + 0xca6c6b8b61c9cfe2, + 0xd6bc7875bbf73770, + 0xeb7f24dff2bee2fc, + 0x8132b6b60ae31b23, + 0x86fb1417888fd481, + 0x8dedd31f03195a5d, + 0x3441acbde55282f5, + 0x52b6a764861a54c1, + 0x3f2621411ddf4859, + 0xfb23945536e58790, + 0xb72bf0b778a97, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[4], + nqr.pow(vec![ + 0xa324d6e9e38e36c8, + 0xa1e5532944b95432, + 0x66d4222ddd5507dd, + 0xe49ef5332b315533, + 0x1431de448c1a240e, + 0xa8d061faea665f6c, + 0x490873307c866574, + 0xf15eb41b62a36b2f, + 0x7911d5dd53e7bbc5, + 0x6a78859116788b40, + 0x6aa07af7fa50cb17, + 0x5091d0f8a05ab293, + 0x98d6728031b52cfb, + 0x1d415e4646fd2bd4, + 0xb246288f10bfe0fc, + 0x9b60bffea9d22ac7, + 0x440df7afeb42777e, + 0x2eb246dee2edda91, + 0xc7e83df8372f2cdc, + 0x46ef6454d65525a8, + 0x2660fc344716f793, + 0xd3a731e5a49601ef, + 0x2be4b40b9e89a4dd, + 0x129b3a7015433, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[5], + nqr.pow(vec![ + 0xfc4b7bc93997595f, + 0xa4e435368ee2c9d0, + 0xf2d98f4a2c0fc8e1, + 0xf1ed2e60535906bf, + 0xc116577ca8705ff6, + 0x98bf6ca85cf733e6, + 0x7feb3200b771fb66, + 0x3becc8e444085891, + 0x31739731af34c132, + 0xc82f0d0d169140a5, + 0xfadbd59d1f99ecc0, + 0xbac38d85e0712ee, + 0x8f174d31efae1bb9, + 0x744458fba22d8a4e, + 0x4aa8e0cf2f3f1612, + 0x76790c9cd1e96b5f, + 0x6c186dfed5b96dea, + 0x3d9a57c6b116a060, + 0x1efb690522b38921, + 0x857c35f718710ecc, + 0xa083260a9a72efae, + 0xfafb655e98b26304, + 0x52e8a5ef95bf732, + 0x538c6034ef7e489c, + 0xed8a23f3b8718887, + 0x60d8b254f4857a48, + 0x38c0220fce928b01, + 0x80fe9d2f354d449f, + 0xf0bdbbceaa6aed, + 0x1e3d7d7f18ba, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[6], + nqr.pow(vec![ + 0x21219610a012ba3c, + 0xa5c19ad35375325, + 0x4e9df1e497674396, + 0xfb05b717c991c6ef, + 0x4a1265bca93a32f2, + 0xd875ff2a7bdc1f66, + 0xc6d8754736c771b2, + 0x2d80c759ba5a2ae7, + 0x138a20df4b03cc1a, + 0xc22d07fe68e93024, + 0xd1dc474d3b433133, + 0xc22aa5e75044e5c, + 0xf657c6fbf9c17ebf, + 0xc591a794a58660d, + 0x2261850ee1453281, + 0xd17d3bd3b7f5efb4, + 0xf00cec8ec507d01, + 0x2a6a775657a00ae6, + 0x5f098a12ff470719, + 0x409d194e7b5c5afa, + 0x1d66478e982af5b, + 0xda425a5b5e01ca3f, + 0xf77e4f78747e903c, + 0x177d49f73732c6fc, + 0xa9618fecabe0e1f4, + 0xba5337eac90bd080, + 0x66fececdbc35d4e7, + 0xa4cd583203d9206f, + 0x98391632ceeca596, + 0x4946b76e1236ad3f, + 0xa0dec64e60e711a1, + 0xfcb41ed3605013, + 0x8ca8f9692ae1e3a9, + 0xd3078bfc28cc1baf, + 0xf0536f764e982f82, + 0x3125f1a2656, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[7], + nqr.pow(vec![ + 0x742754a1f22fdb, + 0x2a1955c2dec3a702, + 0x9747b28c796d134e, + 0xc113a0411f59db79, + 0x3bb0fa929853bfc1, + 0x28c3c25f8f6fb487, + 0xbc2b6c99d3045b34, + 0x98fb67d6badde1fd, + 0x48841d76a24d2073, + 0xd49891145fe93ae6, + 0xc772b9c8e74d4099, + 0xccf4e7b9907755bb, + 0x9cf47b25d42fd908, + 0x5616a0c347fc445d, + 0xff93b7a7ad1b8a6d, + 0xac2099256b78a77a, + 0x7804a95b02892e1c, + 0x5cf59ca7bfd69776, + 0xa7023502acd3c866, + 0xc76f4982fcf8f37, + 0x51862a5a57ac986e, + 0x38b80ed72b1b1023, + 0x4a291812066a61e1, + 0xcd8a685eff45631, + 0x3f40f708764e4fa5, + 0x8aa0441891285092, + 0x9eff60d71cdf0a9, + 0x4fdd9d56517e2bfa, + 0x1f3c80d74a28bc85, + 0x24617417c064b648, + 0x7ddda1e4385d5088, + 0xf9e132b11dd32a16, + 0xcc957cb8ef66ab99, + 0xd4f206d37cb752c5, + 0x40de343f28ad616b, + 0x8d1f24379068f0e3, + 0x6f31d7947ea21137, + 0x27311f9c32184061, + 0x9eea0664cc78ce5f, + 0x7d4151f6fea9a0da, + 0x454096fa75bd571a, + 0x4fe0f20ecb, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[8], + nqr.pow(vec![ + 0x802f5720d0b25710, + 0x6714f0a258b85c7c, + 0x31394c90afdf16e, + 0xe9d2b0c64f957b19, + 0xe67c0d9c5e7903ee, + 0x3156fdc5443ea8ef, + 0x7c4c50524d88c892, + 0xc99dc8990c0ad244, + 0xd37ababf3649a896, + 0x76fe4b838ff7a20c, + 0xcf69ee2cec728db3, + 0xb83535548e5f41, + 0x371147684ccb0c23, + 0x194f6f4fa500db52, + 0xc4571dc78a4c5374, + 0xe4d46d479999ca97, + 0x76b6785a615a151c, + 0xcceb8bcea7eaf8c1, + 0x80d87a6fbe5ae687, + 0x6a97ddddb85ce85, + 0xd783958f26034204, + 0x7144506f2e2e8590, + 0x948693d377aef166, + 0x8364621ed6f96056, + 0xf021777c4c09ee2d, + 0xc6cf5e746ecd50b, + 0xa2337b7aa22743df, + 0xae753f8bbacab39c, + 0xfc782a9e34d3c1cc, + 0x21b827324fe494d9, + 0x5692ce350ed03b38, + 0xf323a2b3cd0481b0, + 0xe859c97a4ccad2e3, + 0x48434b70381e4503, + 0x46042d62e4132ed8, + 0x48c4d6f56122e2f2, + 0xf87711ab9f5c1af7, + 0xb14b7a054759b469, + 0x8eb0a96993ffa9aa, + 0x9b21fb6fc58b760c, + 0xf3abdd115d2e7d25, + 0xf7beac3d4d12409c, + 0x40a5585cce69bf03, + 0x697881e1ba22d5a8, + 0x3d6c04e6ad373fd9, + 0x849871bf627be886, + 0x550f4b9b71b28ef9, + 0x81d2e0d78, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[9], + nqr.pow(vec![ + 0x4af4accf7de0b977, + 0x742485e21805b4ee, + 0xee388fbc4ac36dec, + 0x1e199da57ad178a, + 0xc27c12b292c6726a, + 0x162e6ed84505b5e8, + 0xe191683f336e09df, + 0x17deb7e8d1e0fce6, + 0xd944f19ad06f5836, + 0x4c5f5e59f6276026, + 0xf1ba9c7c148a38a8, + 0xd205fe2dba72b326, + 0x9a2cf2a4c289824e, + 0x4f47ad512c39e24d, + 0xc5894d984000ea09, + 0x2974c03ff7cf01fa, + 0xfcd243b48cb99a22, + 0x2b5150c9313ac1e8, + 0x9089f37c7fc80eda, + 0x989540cc9a7aea56, + 0x1ab1d4e337e63018, + 0x42b546c30d357e43, + 0x1c6abc04f76233d9, + 0x78b3b8d88bf73e47, + 0x151c4e4c45dc68e6, + 0x519a79c4f54397ed, + 0x93f5b51535a127c5, + 0x5fc51b6f52fa153e, + 0x2e0504f2d4a965c3, + 0xc85bd3a3da52bffe, + 0x98c60957a46a89ef, + 0x48c03b5976b91cae, + 0xc6598040a0a61438, + 0xbf0b49dc255953af, + 0xb78dff905b628ab4, + 0x68140b797ba74ab8, + 0x116cf037991d1143, + 0x2f7fe82e58acb0b8, + 0xc20bf7a8f7be5d45, + 0x86c2905c338d5709, + 0xff13a3ae6c8ace3d, + 0xb6f95e2282d08337, + 0xd49f7b313e9cbf29, + 0xf794517193a1ce8c, + 0x39641fecb596a874, + 0x411c4c4edf462fb3, + 0x3f8cd55c10cf25b4, + 0x2bdd7ea165e860b6, + 0xacd7d2cef4caa193, + 0x6558a1d09a05f96, + 0x1f52b5f5b546fc20, + 0x4ee22a5a8c250c12, + 0xd3a63a54a205b6b3, + 0xd2ff5be8, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[10], + nqr.pow(vec![ + 0xe5953a4f96cdda44, + 0x336b2d734cbc32bb, + 0x3f79bfe3cd7410e, + 0x267ae19aaa0f0332, + 0x85a9c4db78d5c749, + 0x90996b046b5dc7d8, + 0x8945eae9820afc6a, + 0x2644ddea2b036bd, + 0x39898e35ac2e3819, + 0x2574eab095659ab9, + 0x65953d51ac5ea798, + 0xc6b8c7afe6752466, + 0x40e9e993e9286544, + 0x7e0ad34ad9700ea0, + 0xac1015eba2c69222, + 0x24f057a19239b5d8, + 0x2043b48c8a3767eb, + 0x1117c124a75d7ff4, + 0x433cfd1a09fb3ce7, + 0x25b087ce4bcf7fb, + 0xbcee0dc53a3e5bdb, + 0xbffda040cf028735, + 0xf7cf103a25512acc, + 0x31d4ecda673130b9, + 0xea0906dab18461e6, + 0x5a40585a5ac3050d, + 0x803358fc14fd0eda, + 0x3678ca654eada770, + 0x7b91a1293a45e33e, + 0xcd5e5b8ea8530e43, + 0x21ae563ab34da266, + 0xecb00dad60df8894, + 0x77fe53e652facfef, + 0x9b7d1ad0b00244ec, + 0xe695df5ca73f801, + 0x23cdb21feeab0149, + 0x14de113e7ea810d9, + 0x52600cd958dac7e7, + 0xc83392c14667e488, + 0x9f808444bc1717fc, + 0x56facb4bcf7c788f, + 0x8bcad53245fc3ca0, + 0xdef661e83f27d81c, + 0x37d4ebcac9ad87e5, + 0x6fe8b24f5cdb9324, + 0xee08a26c1197654c, + 0xc98b22f65f237e9a, + 0xf54873a908ed3401, + 0x6e1cb951d41f3f3, + 0x290b2250a54e8df6, + 0x7f36d51eb1db669e, + 0xb08c7ed81a6ee43e, + 0x95e1c90fb092f680, + 0x429e4afd0e8b820, + 0x2c14a83ee87d715c, + 0xf37267575cfc8af5, + 0xb99e9afeda3c2c30, + 0x8f0f69da75792d5a, + 0x35074a85a533c73, + 0x156ed119, + ]) + ); + assert_eq!( + Fq12Parameters::FROBENIUS_COEFF_FP12_C1[11], + nqr.pow(vec![ + 0x107db680942de533, + 0x6262b24d2052393b, + 0x6136df824159ebc, + 0xedb052c9970c5deb, + 0xca813aea916c3777, + 0xf49dacb9d76c1788, + 0x624941bd372933bb, + 0xa5e60c2520638331, + 0xb38b661683411074, + 0x1d2c9af4c43d962b, + 0x17d807a0f14aa830, + 0x6e6581a51012c108, + 0x668a537e5b35e6f5, + 0x6c396cf3782dca5d, + 0x33b679d1bff536ed, + 0x736cce41805d90aa, + 0x8a562f369eb680bf, + 0x9f61aa208a11ded8, + 0x43dd89dd94d20f35, + 0xcf84c6610575c10a, + 0x9f318d49cf2fe8e6, + 0xbbc6e5f25a6e434e, + 0x6528c433d11d987b, + 0xffced71cc48c0e8a, + 0x4cbb1474f4cb2a26, + 0x66a035c0b28b7231, + 0xa6f2875faa1a82ae, + 0xdd1ea3deff818b02, + 0xe0cfdf0dcdecf701, + 0x9aefa231f2f6d23, + 0xfb251297efa06746, + 0x5a40d367df985538, + 0x1ea31d69ab506fed, + 0xc64ea8280e89a73f, + 0x969acf9f2d4496f4, + 0xe84c9181ee60c52c, + 0xc60f27fc19fc6ca4, + 0x760b33d850154048, + 0x84f69080f66c8457, + 0xc0192ba0fabf640e, + 0xd2c338765c23a3a8, + 0xa7838c20f02cec6c, + 0xb7cf01d020572877, + 0xd63ffaeba0be200a, + 0xf7492baeb5f041ac, + 0x8602c5212170d117, + 0xad9b2e83a5a42068, + 0x2461829b3ba1083e, + 0x7c34650da5295273, + 0xdc824ba800a8265a, + 0xd18d9b47836af7b2, + 0x3af78945c58cbf4d, + 0x7ed9575b8596906c, + 0x6d0c133895009a66, + 0x53bc1247ea349fe1, + 0x6b3063078d41aa7a, + 0x6184acd8cd880b33, + 0x76f4d15503fd1b96, + 0x7a9afd61eef25746, + 0xce974aadece60609, + 0x88ca59546a8ceafd, + 0x6d29391c41a0ac07, + 0x443843a60e0f46a6, + 0xa1590f62fd2602c7, + 0x536d5b15b514373f, + 0x22d582b, + ]) + ); +} + +#[test] +fn test_neg_one() { + let o = -Fq::one(); + + let thing: [u64; 6] = [ + 0x43f5fffffffcaaae, + 0x32b7fff2ed47fffd, + 0x7e83a49a2e99d69, + 0xeca8f3318332bb7a, + 0xef148d1ea0f4c069, + 0x40ab3263eff0206, + ]; + println!("{:?}", thing); + let negative_one = Fq::new(BigInteger384(thing)); + + assert_eq!(negative_one, o); +} + +#[test] +fn test_fq_repr_from() { + assert_eq!( + BigInteger384::from(100), + BigInteger384([100, 0, 0, 0, 0, 0]) + ); +} + +#[test] +fn test_fq_repr_is_odd() { + assert!(!BigInteger384::from(0).is_odd()); + assert!(BigInteger384::from(0).is_even()); + assert!(BigInteger384::from(1).is_odd()); + assert!(!BigInteger384::from(1).is_even()); + assert!(!BigInteger384::from(324834872).is_odd()); + assert!(BigInteger384::from(324834872).is_even()); + assert!(BigInteger384::from(324834873).is_odd()); + assert!(!BigInteger384::from(324834873).is_even()); +} + +#[test] +fn test_fq_repr_is_zero() { + assert!(BigInteger384::from(0).is_zero()); + assert!(!BigInteger384::from(1).is_zero()); + assert!(!BigInteger384([0, 0, 0, 0, 1, 0]).is_zero()); +} + +#[test] +fn test_fq_repr_div2() { + let mut a = BigInteger384([ + 0x8b0ad39f8dd7482a, + 0x147221c9a7178b69, + 0x54764cb08d8a6aa0, + 0x8519d708e1d83041, + 0x41f82777bd13fdb, + 0xf43944578f9b771b, + ]); + a.div2(); + assert_eq!( + a, + BigInteger384([ + 0xc58569cfc6eba415, + 0xa3910e4d38bc5b4, + 0xaa3b265846c53550, + 0xc28ceb8470ec1820, + 0x820fc13bbde89fed, + 0x7a1ca22bc7cdbb8d, + ]) + ); + for _ in 0..10 { + a.div2(); + } + assert_eq!( + a, + BigInteger384([ + 0x6d31615a73f1bae9, + 0x54028e443934e2f1, + 0x82a8ec99611b14d, + 0xfb70a33ae11c3b06, + 0xe36083f04eef7a27, + 0x1e87288af1f36e, + ]) + ); + for _ in 0..300 { + a.div2(); + } + assert_eq!( + a, + BigInteger384([0x7288af1f36ee3608, 0x1e8, 0x0, 0x0, 0x0, 0x0]) + ); + for _ in 0..50 { + a.div2(); + } + assert_eq!(a, BigInteger384([0x7a1ca2, 0x0, 0x0, 0x0, 0x0, 0x0])); + for _ in 0..22 { + a.div2(); + } + assert_eq!(a, BigInteger384([0x1, 0x0, 0x0, 0x0, 0x0, 0x0])); + a.div2(); + assert!(a.is_zero()); +} + +#[test] +fn test_fq_repr_divn() { + let mut a = BigInteger384([ + 0xaa5cdd6172847ffd, + 0x43242c06aed55287, + 0x9ddd5b312f3dd104, + 0xc5541fd48046b7e7, + 0x16080cf4071e0b05, + 0x1225f2901aea514e, + ]); + a.divn(0); + assert_eq!( + a, + BigInteger384([ + 0xaa5cdd6172847ffd, + 0x43242c06aed55287, + 0x9ddd5b312f3dd104, + 0xc5541fd48046b7e7, + 0x16080cf4071e0b05, + 0x1225f2901aea514e, + ]) + ); + a.divn(1); + assert_eq!( + a, + BigInteger384([ + 0xd52e6eb0b9423ffe, + 0x21921603576aa943, + 0xceeead98979ee882, + 0xe2aa0fea40235bf3, + 0xb04067a038f0582, + 0x912f9480d7528a7, + ]) + ); + a.divn(50); + assert_eq!( + a, + BigInteger384([ + 0x8580d5daaa50f54b, + 0xab6625e7ba208864, + 0x83fa9008d6fcf3bb, + 0x19e80e3c160b8aa, + 0xbe52035d4a29c2c1, + 0x244, + ]) + ); + a.divn(130); + assert_eq!( + a, + BigInteger384([ + 0xa0fea40235bf3cee, + 0x4067a038f0582e2a, + 0x2f9480d7528a70b0, + 0x91, + 0x0, + 0x0, + ]) + ); + a.divn(64); + assert_eq!( + a, + BigInteger384([0x4067a038f0582e2a, 0x2f9480d7528a70b0, 0x91, 0x0, 0x0, 0x0]) + ); +} + +#[test] +fn test_fq_repr_mul2() { + let mut a = BigInteger384::from(23712937547); + a.mul2(); + assert_eq!(a, BigInteger384([0xb0acd6c96, 0x0, 0x0, 0x0, 0x0, 0x0])); + for _ in 0..60 { + a.mul2(); + } + assert_eq!( + a, + BigInteger384([0x6000000000000000, 0xb0acd6c9, 0x0, 0x0, 0x0, 0x0]) + ); + for _ in 0..300 { + a.mul2(); + } + assert_eq!( + a, + BigInteger384([0x0, 0x0, 0x0, 0x0, 0x0, 0xcd6c960000000000]) + ); + for _ in 0..17 { + a.mul2(); + } + assert_eq!( + a, + BigInteger384([0x0, 0x0, 0x0, 0x0, 0x0, 0x2c00000000000000]) + ); + for _ in 0..6 { + a.mul2(); + } + assert!(a.is_zero()); +} + +#[test] +fn test_fq_repr_num_bits() { + let mut a = BigInteger384::from(0); + assert_eq!(0, a.num_bits()); + a = BigInteger384::from(1); + for i in 1..385 { + assert_eq!(i, a.num_bits()); + a.mul2(); + } + assert_eq!(0, a.num_bits()); +} + +#[test] +fn test_fq_repr_sub_noborrow() { + let mut rng = ark_std::test_rng(); + + let mut t = BigInteger384([ + 0x827a4a08041ebd9, + 0x3c239f3dcc8f0d6b, + 0x9ab46a912d555364, + 0x196936b17b43910b, + 0xad0eb3948a5c34fd, + 0xd56f7b5ab8b5ce8, + ]); + t.sub_noborrow(&BigInteger384([ + 0xc7867917187ca02b, + 0x5d75679d4911ffef, + 0x8c5b3e48b1a71c15, + 0x6a427ae846fd66aa, + 0x7a37e7265ee1eaf9, + 0x7c0577a26f59d5, + ])); + assert!( + t == BigInteger384([ + 0x40a12b8967c54bae, + 0xdeae37a0837d0d7b, + 0xe592c487bae374e, + 0xaf26bbc934462a61, + 0x32d6cc6e2b7a4a03, + 0xcdaf23e091c0313, + ]) + ); + + for _ in 0..1000 { + let mut a = BigInteger384::rand(&mut rng); + a.0[5] >>= 30; + let mut b = a; + for _ in 0..10 { + b.mul2(); + } + let mut c = b; + for _ in 0..10 { + c.mul2(); + } + + assert!(a < b); + assert!(b < c); + + let mut csub_ba = c; + csub_ba.sub_noborrow(&b); + csub_ba.sub_noborrow(&a); + + let mut csub_ab = c; + csub_ab.sub_noborrow(&a); + csub_ab.sub_noborrow(&b); + + assert_eq!(csub_ab, csub_ba); + } + + // Subtracting q+1 from q should produce -1 (mod 2**384) + let mut qplusone = BigInteger384([ + 0xb9feffffffffaaab, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, + ]); + qplusone.sub_noborrow(&BigInteger384([ + 0xb9feffffffffaaac, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, + ])); + assert_eq!( + qplusone, + BigInteger384([ + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + ]) + ); +} + +#[test] +fn test_fq_repr_add_nocarry() { + let mut rng = ark_std::test_rng(); + + let mut t = BigInteger384([ + 0x827a4a08041ebd9, + 0x3c239f3dcc8f0d6b, + 0x9ab46a912d555364, + 0x196936b17b43910b, + 0xad0eb3948a5c34fd, + 0xd56f7b5ab8b5ce8, + ]); + t.add_nocarry(&BigInteger384([ + 0xc7867917187ca02b, + 0x5d75679d4911ffef, + 0x8c5b3e48b1a71c15, + 0x6a427ae846fd66aa, + 0x7a37e7265ee1eaf9, + 0x7c0577a26f59d5, + ])); + assert!( + t == BigInteger384([ + 0xcfae1db798be8c04, + 0x999906db15a10d5a, + 0x270fa8d9defc6f79, + 0x83abb199c240f7b6, + 0x27469abae93e1ff6, + 0xdd2fd2d4dfab6be, + ]) + ); + + // Test for the associativity of addition. + for _ in 0..1000 { + let mut a = BigInteger384::rand(&mut rng); + let mut b = BigInteger384::rand(&mut rng); + let mut c = BigInteger384::rand(&mut rng); + + // Unset the first few bits, so that overflow won't occur. + a.0[5] >>= 3; + b.0[5] >>= 3; + c.0[5] >>= 3; + + let mut abc = a; + abc.add_nocarry(&b); + abc.add_nocarry(&c); + + let mut acb = a; + acb.add_nocarry(&c); + acb.add_nocarry(&b); + + let mut bac = b; + bac.add_nocarry(&a); + bac.add_nocarry(&c); + + let mut bca = b; + bca.add_nocarry(&c); + bca.add_nocarry(&a); + + let mut cab = c; + cab.add_nocarry(&a); + cab.add_nocarry(&b); + + let mut cba = c; + cba.add_nocarry(&b); + cba.add_nocarry(&a); + + assert_eq!(abc, acb); + assert_eq!(abc, bac); + assert_eq!(abc, bca); + assert_eq!(abc, cab); + assert_eq!(abc, cba); + } + + // Adding 1 to (2^384 - 1) should produce zero + let mut x = BigInteger384([ + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + ]); + x.add_nocarry(&BigInteger384::from(1)); + assert!(x.is_zero()); +} + +#[test] +fn test_fq_add_assign() { + { + // Random number + let mut tmp = Fq::new(BigInteger384([ + 0x624434821df92b69, + 0x503260c04fd2e2ea, + 0xd9df726e0d16e8ce, + 0xfbcb39adfd5dfaeb, + 0x86b8a22b0c88b112, + 0x165a2ed809e4201b, + ])); + // Test that adding zero has no effect. + tmp.add_assign(&Fq::new(BigInteger384::from(0))); + assert_eq!( + tmp, + Fq::new(BigInteger384([ + 0x624434821df92b69, + 0x503260c04fd2e2ea, + 0xd9df726e0d16e8ce, + 0xfbcb39adfd5dfaeb, + 0x86b8a22b0c88b112, + 0x165a2ed809e4201b, + ])) + ); + // Add one and test for the result. + tmp.add_assign(&Fq::new(BigInteger384::from(1))); + assert_eq!( + tmp, + Fq::new(BigInteger384([ + 0x624434821df92b6a, + 0x503260c04fd2e2ea, + 0xd9df726e0d16e8ce, + 0xfbcb39adfd5dfaeb, + 0x86b8a22b0c88b112, + 0x165a2ed809e4201b, + ])) + ); + // Add another random number that exercises the reduction. + tmp.add_assign(&Fq::new(BigInteger384([ + 0x374d8f8ea7a648d8, + 0xe318bb0ebb8bfa9b, + 0x613d996f0a95b400, + 0x9fac233cb7e4fef1, + 0x67e47552d253c52, + 0x5c31b227edf25da, + ]))); + assert_eq!( + tmp, + Fq::new(BigInteger384([ + 0xdf92c410c59fc997, + 0x149f1bd05a0add85, + 0xd3ec393c20fba6ab, + 0x37001165c1bde71d, + 0x421b41c9f662408e, + 0x21c38104f435f5b, + ])) + ); + // Add one to (q - 1) and test for the result. + tmp = Fq::new(BigInteger384([ + 0xb9feffffffffaaaa, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, + ])); + tmp.add_assign(&Fq::new(BigInteger384::from(1))); + assert!(tmp.0.is_zero()); + // Add a random number to another one such that the result is q - 1 + tmp = Fq::new(BigInteger384([ + 0x531221a410efc95b, + 0x72819306027e9717, + 0x5ecefb937068b746, + 0x97de59cd6feaefd7, + 0xdc35c51158644588, + 0xb2d176c04f2100, + ])); + tmp.add_assign(&Fq::new(BigInteger384([ + 0x66ecde5bef0fe14f, + 0xac2a6cf8aed568e8, + 0x861d70d86483edd, + 0xcc98f1b7839a22e8, + 0x6ee5e2a4eae7674e, + 0x194e40737930c599, + ]))); + assert_eq!( + tmp, + Fq::new(BigInteger384([ + 0xb9feffffffffaaaa, + 0x1eabfffeb153ffff, + 0x6730d2a0f6b0f624, + 0x64774b84f38512bf, + 0x4b1ba7b6434bacd7, + 0x1a0111ea397fe69a, + ])) + ); + // Add one to the result and test for it. + tmp.add_assign(&Fq::new(BigInteger384::from(1))); + assert!(tmp.0.is_zero()); + } + + // Test associativity + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Generate a, b, c and ensure (a + b) + c == a + (b + c). + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + let c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.add_assign(&c); + + let mut tmp2 = b; + tmp2.add_assign(&c); + tmp2.add_assign(&a); + + assert_eq!(tmp1, tmp2); + } +} + +#[test] +fn test_fq_sub_assign() { + { + // Test arbitrary subtraction that tests reduction. + let mut tmp = Fq::new(BigInteger384([ + 0x531221a410efc95b, + 0x72819306027e9717, + 0x5ecefb937068b746, + 0x97de59cd6feaefd7, + 0xdc35c51158644588, + 0xb2d176c04f2100, + ])); + tmp.sub_assign(&Fq::new(BigInteger384([ + 0x98910d20877e4ada, + 0x940c983013f4b8ba, + 0xf677dc9b8345ba33, + 0xbef2ce6b7f577eba, + 0xe1ae288ac3222c44, + 0x5968bb602790806, + ]))); + assert_eq!( + tmp, + Fq::new(BigInteger384([ + 0x748014838971292c, + 0xfd20fad49fddde5c, + 0xcf87f198e3d3f336, + 0x3d62d6e6e41883db, + 0x45a3443cd88dc61b, + 0x151d57aaf755ff94, + ])) + ); + + // Test the opposite subtraction which doesn't test reduction. + tmp = Fq::new(BigInteger384([ + 0x98910d20877e4ada, + 0x940c983013f4b8ba, + 0xf677dc9b8345ba33, + 0xbef2ce6b7f577eba, + 0xe1ae288ac3222c44, + 0x5968bb602790806, + ])); + tmp.sub_assign(&Fq::new(BigInteger384([ + 0x531221a410efc95b, + 0x72819306027e9717, + 0x5ecefb937068b746, + 0x97de59cd6feaefd7, + 0xdc35c51158644588, + 0xb2d176c04f2100, + ]))); + assert_eq!( + tmp, + Fq::new(BigInteger384([ + 0x457eeb7c768e817f, + 0x218b052a117621a3, + 0x97a8e10812dd02ed, + 0x2714749e0f6c8ee3, + 0x57863796abde6bc, + 0x4e3ba3f4229e706, + ])) + ); + + // Test for sensible results with zero + tmp = Fq::new(BigInteger384::from(0)); + tmp.sub_assign(&Fq::new(BigInteger384::from(0))); + assert!(tmp.is_zero()); + + tmp = Fq::new(BigInteger384([ + 0x98910d20877e4ada, + 0x940c983013f4b8ba, + 0xf677dc9b8345ba33, + 0xbef2ce6b7f577eba, + 0xe1ae288ac3222c44, + 0x5968bb602790806, + ])); + tmp.sub_assign(&Fq::new(BigInteger384::from(0))); + assert_eq!( + tmp, + Fq::new(BigInteger384([ + 0x98910d20877e4ada, + 0x940c983013f4b8ba, + 0xf677dc9b8345ba33, + 0xbef2ce6b7f577eba, + 0xe1ae288ac3222c44, + 0x5968bb602790806, + ])) + ); + } + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Ensure that (a - b) + (b - a) = 0. + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.sub_assign(&b); + + let mut tmp2 = b; + tmp2.sub_assign(&a); + + tmp1.add_assign(&tmp2); + assert!(tmp1.is_zero()); + } +} + +#[test] +fn test_fq_mul_assign() { + let mut tmp = Fq::new(BigInteger384([ + 0xcc6200000020aa8a, + 0x422800801dd8001a, + 0x7f4f5e619041c62c, + 0x8a55171ac70ed2ba, + 0x3f69cc3a3d07d58b, + 0xb972455fd09b8ef, + ])); + tmp.mul_assign(&Fq::new(BigInteger384([ + 0x329300000030ffcf, + 0x633c00c02cc40028, + 0xbef70d925862a942, + 0x4f7fa2a82a963c17, + 0xdf1eb2575b8bc051, + 0x1162b680fb8e9566, + ]))); + assert!( + tmp == Fq::new(BigInteger384([ + 0x9dc4000001ebfe14, + 0x2850078997b00193, + 0xa8197f1abb4d7bf, + 0xc0309573f4bfe871, + 0xf48d0923ffaf7620, + 0x11d4b58c7a926e66, + ])) + ); + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000000 { + // Ensure that (a * b) * c = a * (b * c) + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + let c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.mul_assign(&b); + tmp1.mul_assign(&c); + + let mut tmp2 = b; + tmp2.mul_assign(&c); + tmp2.mul_assign(&a); + + assert_eq!(tmp1, tmp2); + } + + for _ in 0..1000000 { + // Ensure that r * (a + b + c) = r*a + r*b + r*c + + let r = Fq::rand(&mut rng); + let mut a = Fq::rand(&mut rng); + let mut b = Fq::rand(&mut rng); + let mut c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.add_assign(&c); + tmp1.mul_assign(&r); + + a.mul_assign(&r); + b.mul_assign(&r); + c.mul_assign(&r); + + a.add_assign(&b); + a.add_assign(&c); + + assert_eq!(tmp1, a); + } +} + +#[test] +fn test_fq_squaring() { + let mut a = Fq::new(BigInteger384([ + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0xffffffffffffffff, + 0x19ffffffffffffff, + ])); + a.square_in_place(); + assert_eq!( + a, + Fq::from(BigInteger384([ + 0x1cfb28fe7dfbbb86, + 0x24cbe1731577a59, + 0xcce1d4edc120e66e, + 0xdc05c659b4e15b27, + 0x79361e5a802c6a23, + 0x24bcbe5d51b9a6f, + ])) + ); + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000000 { + // Ensure that (a * a) = a^2 + let a = Fq::rand(&mut rng); + + let mut tmp = a; + tmp.square_in_place(); + + let mut tmp2 = a; + tmp2.mul_assign(&a); + + assert_eq!(tmp, tmp2); + } +} + +#[test] +fn test_fq_inverse() { + assert!(Fq::zero().inverse().is_none()); + + let mut rng = ark_std::test_rng(); + + let one = Fq::one(); + + for _ in 0..1000 { + // Ensure that a * a^-1 = 1 + let mut a = Fq::rand(&mut rng); + let ainv = a.inverse().unwrap(); + a.mul_assign(&ainv); + assert_eq!(a, one); + } +} + +#[test] +fn test_fq_double_in_place() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Ensure doubling a is equivalent to adding a to itself. + let mut a = Fq::rand(&mut rng); + let mut b = a; + b.add_assign(&a); + a.double_in_place(); + assert_eq!(a, b); + } +} + +#[test] +fn test_fq_negate() { + { + let a = -Fq::zero(); + + assert!(a.is_zero()); + } + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Ensure (a - (-a)) = 0. + let mut a = Fq::rand(&mut rng); + let b = -a; + a.add_assign(&b); + + assert!(a.is_zero()); + } +} + +#[test] +fn test_fq_pow() { + let mut rng = ark_std::test_rng(); + + for i in 0..1000 { + // Exponentiate by various small numbers and ensure it consists with repeated + // multiplication. + let a = Fq::rand(&mut rng); + let target = a.pow(&[i]); + let mut c = Fq::one(); + for _ in 0..i { + c.mul_assign(&a); + } + assert_eq!(c, target); + } + + for _ in 0..1000 { + // Exponentiating by the modulus should have no effect in a prime field. + let a = Fq::rand(&mut rng); + + assert_eq!(a, a.pow(Fq::characteristic())); + } +} + +#[test] +fn test_fq_sqrt() { + let mut rng = ark_std::test_rng(); + + assert_eq!(Fq::zero().sqrt().unwrap(), Fq::zero()); + + for _ in 0..1000 { + // Ensure sqrt(a^2) = a or -a + let a = Fq::rand(&mut rng); + let nega = -a; + let mut b = a; + b.square_in_place(); + + let b = b.sqrt().unwrap(); + + assert!(a == b || nega == b); + } + + for _ in 0..1000 { + // Ensure sqrt(a)^2 = a for random a + let a = Fq::rand(&mut rng); + + if let Some(mut tmp) = a.sqrt() { + tmp.square_in_place(); + + assert_eq!(a, tmp); + } + } +} + +#[test] +fn test_fq_num_bits() { + assert_eq!(FqParameters::MODULUS_BITS, 381); + assert_eq!(FqParameters::CAPACITY, 380); +} + +#[test] +fn test_fq_root_of_unity() { + assert_eq!(FqParameters::TWO_ADICITY, 1); + assert_eq!( + Fq::multiplicative_generator(), + Fq::from(BigInteger384::from(2)) + ); + assert_eq!( + Fq::multiplicative_generator().pow([ + 0xdcff7fffffffd555, + 0xf55ffff58a9ffff, + 0xb39869507b587b12, + 0xb23ba5c279c2895f, + 0x258dd3db21a5d66b, + 0xd0088f51cbff34d, + ]), + Fq::two_adic_root_of_unity() + ); + assert_eq!( + Fq::two_adic_root_of_unity().pow([1 << FqParameters::TWO_ADICITY]), + Fq::one() + ); + assert!(Fq::multiplicative_generator().sqrt().is_none()); +} + +// #[test] +// fn fq_field_tests() { +// ::tests::field::random_field_tests::(); +// ::tests::field::random_sqrt_tests::(); +// ::tests::field::random_frobenius_tests::(Fq::char(), 13); +// ::tests::field::from_str_tests::(); +// } + +#[test] +fn test_fq_ordering() { + // BigInteger384's ordering is well-tested, but we still need to make sure the + // Fq elements aren't being compared in Montgomery form. + for i in 0..100 { + assert!(Fq::from(BigInteger384::from(i + 1)) > Fq::from(BigInteger384::from(i))); + } +} + +// #[test] +// fn fq_repr_tests() { +// ::tests::repr::random_repr_tests::(); +// } + +#[test] +fn test_fq_legendre() { + use ark_ff::fields::LegendreSymbol::*; + + assert_eq!(QuadraticResidue, Fq::one().legendre()); + assert_eq!(Zero, Fq::zero().legendre()); + + assert_eq!( + QuadraticNonResidue, + Fq::from(BigInteger384::from(2)).legendre() + ); + assert_eq!( + QuadraticResidue, + Fq::from(BigInteger384::from(4)).legendre() + ); + + let e = BigInteger384([ + 0x52a112f249778642, + 0xd0bedb989b7991f, + 0xdad3b6681aa63c05, + 0xf2efc0bb4721b283, + 0x6057a98f18c24733, + 0x1022c2fd122889e4, + ]); + assert_eq!(QuadraticNonResidue, Fq::from(e).legendre()); + let e = BigInteger384([ + 0x6dae594e53a96c74, + 0x19b16ca9ba64b37b, + 0x5c764661a59bfc68, + 0xaa346e9b31c60a, + 0x346059f9d87a9fa9, + 0x1d61ac6bfd5c88b, + ]); + assert_eq!(QuadraticResidue, Fq::from(e).legendre()); +} + +#[test] +fn test_fq2_ordering() { + let mut a = Fq2::new(Fq::zero(), Fq::zero()); + + let mut b = a.clone(); + + assert!(a.cmp(&b) == Ordering::Equal); + b.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Equal); + b.c1.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c1.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Greater); + b.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Equal); +} + +#[test] +fn test_fq2_basics() { + assert_eq!(Fq2::new(Fq::zero(), Fq::zero(),), Fq2::zero()); + assert_eq!(Fq2::new(Fq::one(), Fq::zero(),), Fq2::one()); + assert!(Fq2::zero().is_zero()); + assert!(!Fq2::one().is_zero()); + assert!(!Fq2::new(Fq::zero(), Fq::one(),).is_zero()); +} + +#[test] +fn test_fq2_squaring() { + let a = Fq2::new(Fq::one(), Fq::one()).square(); // u + 1 + assert_eq!(a, Fq2::new(Fq::zero(), Fq::from(BigInteger384::from(2)),)); // 2u + + let a = Fq2::new(Fq::zero(), Fq::one()).square(); // u + assert_eq!(a, { + let neg1 = -Fq::one(); + Fq2::new(neg1, Fq::zero()) + }); // -1 + + let mut a = Fq2::new( + Fq::from(BigInteger384([ + 0x9c2c6309bbf8b598, + 0x4eef5c946536f602, + 0x90e34aab6fb6a6bd, + 0xf7f295a94e58ae7c, + 0x41b76dcc1c3fbe5e, + 0x7080c5fa1d8e042, + ])), + Fq::from(BigInteger384([ + 0x38f473b3c870a4ab, + 0x6ad3291177c8c7e5, + 0xdac5a4c911a4353e, + 0xbfb99020604137a0, + 0xfc58a7b7be815407, + 0x10d1615e75250a21, + ])), + ); + a.square_in_place(); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0xf262c28c538bcf68, + 0xb9f2a66eae1073ba, + 0xdc46ab8fad67ae0, + 0xcb674157618da176, + 0x4cf17b5893c3d327, + 0x7eac81369c43361, + ])), + Fq::from(BigInteger384([ + 0xc1579cf58e980cf8, + 0xa23eb7e12dd54d98, + 0xe75138bce4cec7aa, + 0x38d0d7275a9689e1, + 0x739c983042779a65, + 0x1542a61c8a8db994, + ])), + ) + ); +} + +#[test] +fn test_fq2_mul() { + let mut a = Fq2::new( + Fq::from(BigInteger384([ + 0x85c9f989e1461f03, + 0xa2e33c333449a1d6, + 0x41e461154a7354a3, + 0x9ee53e7e84d7532e, + 0x1c202d8ed97afb45, + 0x51d3f9253e2516f, + ])), + Fq::from(BigInteger384([ + 0xa7348a8b511aedcf, + 0x143c215d8176b319, + 0x4cc48081c09b8903, + 0x9533e4a9a5158be, + 0x7a5e1ecb676d65f9, + 0x180c3ee46656b008, + ])), + ); + a.mul_assign(&Fq2::new( + Fq::from(BigInteger384([ + 0xe21f9169805f537e, + 0xfc87e62e179c285d, + 0x27ece175be07a531, + 0xcd460f9f0c23e430, + 0x6c9110292bfa409, + 0x2c93a72eb8af83e, + ])), + Fq::from(BigInteger384([ + 0x4b1c3f936d8992d4, + 0x1d2a72916dba4c8a, + 0x8871c508658d1e5f, + 0x57a06d3135a752ae, + 0x634cd3c6c565096d, + 0x19e17334d4e93558, + ])), + )); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x95b5127e6360c7e4, + 0xde29c31a19a6937e, + 0xf61a96dacf5a39bc, + 0x5511fe4d84ee5f78, + 0x5310a202d92f9963, + 0x1751afbe166e5399, + ])), + Fq::from(BigInteger384([ + 0x84af0e1bd630117a, + 0x6c63cd4da2c2aa7, + 0x5ba6e5430e883d40, + 0xc975106579c275ee, + 0x33a9ac82ce4c5083, + 0x1ef1a36c201589d, + ])), + ) + ); +} + +#[test] +fn test_fq2_inverse() { + assert!(Fq2::zero().inverse().is_none()); + + let a = Fq2::new( + Fq::from(BigInteger384([ + 0x85c9f989e1461f03, + 0xa2e33c333449a1d6, + 0x41e461154a7354a3, + 0x9ee53e7e84d7532e, + 0x1c202d8ed97afb45, + 0x51d3f9253e2516f, + ])), + Fq::from(BigInteger384([ + 0xa7348a8b511aedcf, + 0x143c215d8176b319, + 0x4cc48081c09b8903, + 0x9533e4a9a5158be, + 0x7a5e1ecb676d65f9, + 0x180c3ee46656b008, + ])), + ); + let a = a.inverse().unwrap(); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x70300f9bcb9e594, + 0xe5ecda5fdafddbb2, + 0x64bef617d2915a8f, + 0xdfba703293941c30, + 0xa6c3d8f9586f2636, + 0x1351ef01941b70c4, + ])), + Fq::from(BigInteger384([ + 0x8c39fd76a8312cb4, + 0x15d7b6b95defbff0, + 0x947143f89faedee9, + 0xcbf651a0f367afb2, + 0xdf4e54f0d3ef15a6, + 0x103bdf241afb0019, + ])), + ) + ); +} + +#[test] +fn test_fq2_addition() { + let mut a = Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ); + a.add_assign(&Fq2::new( + Fq::from(BigInteger384([ + 0x619a02d78dc70ef2, + 0xb93adfc9119e33e8, + 0x4bf0b99a9f0dca12, + 0x3b88899a42a6318f, + 0x986a4a62fa82a49d, + 0x13ce433fa26027f5, + ])), + Fq::from(BigInteger384([ + 0x66323bf80b58b9b9, + 0xa1379b6facf6e596, + 0x402aef1fb797e32f, + 0x2236f55246d0d44d, + 0x4c8c1800eb104566, + 0x11d6e20e986c2085, + ])), + )); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x8e9a7adaf6eb0eb9, + 0xcb207e6b3341eaba, + 0xd70b0c7b481d23ff, + 0xf4ef57d604b6bca2, + 0x65309427b3d5d090, + 0x14c715d5553f01d2, + ])), + Fq::from(BigInteger384([ + 0xfdb032e7d9079a94, + 0x35a2809d15468d83, + 0xfe4b23317e0796d5, + 0xd62fa51334f560fa, + 0x9ad265eb46e01984, + 0x1303f3465112c8bc, + ])), + ) + ); +} + +#[test] +fn test_fq2_subtraction() { + let mut a = Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ); + a.sub_assign(&Fq2::new( + Fq::from(BigInteger384([ + 0x619a02d78dc70ef2, + 0xb93adfc9119e33e8, + 0x4bf0b99a9f0dca12, + 0x3b88899a42a6318f, + 0x986a4a62fa82a49d, + 0x13ce433fa26027f5, + ])), + Fq::from(BigInteger384([ + 0x66323bf80b58b9b9, + 0xa1379b6facf6e596, + 0x402aef1fb797e32f, + 0x2236f55246d0d44d, + 0x4c8c1800eb104566, + 0x11d6e20e986c2085, + ])), + )); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x8565752bdb5c9b80, + 0x7756bed7c15982e9, + 0xa65a6be700b285fe, + 0xe255902672ef6c43, + 0x7f77a718021c342d, + 0x72ba14049fe9881, + ])), + Fq::from(BigInteger384([ + 0xeb4abaf7c255d1cd, + 0x11df49bc6cacc256, + 0xe52617930588c69a, + 0xf63905f39ad8cb1f, + 0x4cd5dd9fb40b3b8f, + 0x957411359ba6e4c, + ])), + ) + ); +} + +#[test] +fn test_fq2_negation() { + let mut a = Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ); + a = -a; + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x8cfe87fc96dbaae4, + 0xcc6615c8fb0492d, + 0xdc167fc04da19c37, + 0xab107d49317487ab, + 0x7e555df189f880e3, + 0x19083f5486a10cbd, + ])), + Fq::from(BigInteger384([ + 0x228109103250c9d0, + 0x8a411ad149045812, + 0xa9109e8f3041427e, + 0xb07e9bc405608611, + 0xfcd559cbe77bd8b8, + 0x18d400b280d93e62, + ])), + ) + ); +} + +#[test] +fn test_fq2_doubling() { + let mut a = Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ); + a.double_in_place(); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x5a00f006d247ff8e, + 0x23cb3d4443476da4, + 0x1634a5c1521eb3da, + 0x72cd9c7784211627, + 0x998c938972a657e7, + 0x1f1a52b65bdb3b9, + ])), + Fq::from(BigInteger384([ + 0x2efbeddf9b5dc1b6, + 0x28d5ca5ad09f4fdb, + 0x7c4068238cdf674b, + 0x67f15f81dc49195b, + 0x9c8c9bd4b79fa83d, + 0x25a226f714d506e, + ])), + ) + ); +} + +#[test] +fn test_fq2_frobenius_map() { + let mut a = Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ); + a.frobenius_map(0); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ) + ); + a.frobenius_map(1); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x228109103250c9d0, + 0x8a411ad149045812, + 0xa9109e8f3041427e, + 0xb07e9bc405608611, + 0xfcd559cbe77bd8b8, + 0x18d400b280d93e62, + ])), + ) + ); + a.frobenius_map(1); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ) + ); + a.frobenius_map(2); + assert_eq!( + a, + Fq2::new( + Fq::from(BigInteger384([ + 0x2d0078036923ffc7, + 0x11e59ea221a3b6d2, + 0x8b1a52e0a90f59ed, + 0xb966ce3bc2108b13, + 0xccc649c4b9532bf3, + 0xf8d295b2ded9dc, + ])), + Fq::from(BigInteger384([ + 0x977df6efcdaee0db, + 0x946ae52d684fa7ed, + 0xbe203411c66fb3a5, + 0xb3f8afc0ee248cad, + 0x4e464dea5bcfd41e, + 0x12d1137b8a6a837, + ])), + ) + ); +} + +#[test] +fn test_fq2_legendre() { + use ark_ff::fields::LegendreSymbol::*; + + assert_eq!(Zero, Fq2::zero().legendre()); + // i^2 = -1 + let mut m1 = -Fq2::one(); + assert_eq!(QuadraticResidue, m1.legendre()); + m1 = Fq6Parameters::mul_fp2_by_nonresidue(&m1); + assert_eq!(QuadraticNonResidue, m1.legendre()); +} + +#[test] +fn test_fq2_mul_nonresidue() { + let mut rng = ark_std::test_rng(); + + let nqr = Fq2::new(Fq::one(), Fq::one()); + + for _ in 0..1000 { + let mut a = Fq2::rand(&mut rng); + let mut b = a; + a = Fq6Parameters::mul_fp2_by_nonresidue(&a); + b.mul_assign(&nqr); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq6_mul_nonresidue() { + let mut rng = ark_std::test_rng(); + + let nqr = Fq6::new(Fq2::zero(), Fq2::one(), Fq2::zero()); + + for _ in 0..1000 { + let mut a = Fq6::rand(&mut rng); + let mut b = a; + a = Fq12Parameters::mul_fp6_by_nonresidue(&a); + b.mul_assign(&nqr); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq6_mul_by_1() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let c1 = Fq2::rand(&mut rng); + let mut a = Fq6::rand(&mut rng); + let mut b = a; + + a.mul_by_1(&c1); + b.mul_assign(&Fq6::new(Fq2::zero(), c1, Fq2::zero())); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq6_mul_by_01() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c1 = Fq2::rand(&mut rng); + let mut a = Fq6::rand(&mut rng); + let mut b = a; + + a.mul_by_01(&c0, &c1); + b.mul_assign(&Fq6::new(c0, c1, Fq2::zero())); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq12_mul_by_014() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c1 = Fq2::rand(&mut rng); + let c5 = Fq2::rand(&mut rng); + let mut a = Fq12::rand(&mut rng); + let mut b = a; + + a.mul_by_014(&c0, &c1, &c5); + b.mul_assign(&Fq12::new( + Fq6::new(c0, c1, Fq2::zero()), + Fq6::new(Fq2::zero(), c5, Fq2::zero()), + )); + + assert_eq!(a, b); + } +} diff --git a/arkworks/curves/bls12_381/src/lib.rs b/arkworks/curves/bls12_381/src/lib.rs new file mode 100644 index 00000000..a9142c28 --- /dev/null +++ b/arkworks/curves/bls12_381/src/lib.rs @@ -0,0 +1,32 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the BLS12_381 curve generated by [Sean Bowe](https://electriccoin.co/blog/new-snark-curve/). +//! The name denotes that it is a Barreto--Lynn--Scott curve of embedding degree 12, +//! defined over a 381-bit (prime) field. +//! This curve was intended to replace the BN254 curve to provide a higher security +//! level without incurring a large performance overhead. +//! +//! +//! Curve information: +//! * Base field: q = 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787 +//! * Scalar field: r = 52435875175126190479447740508185965837690552500527637822603658699938581184513 +//! * valuation(q - 1, 2) = 1 +//! * valuation(r - 1, 2) = 32 +//! * G1 curve equation: y^2 = x^3 + 4 +//! * G2 curve equation: y^2 = x^3 + Fq2(4, 4) + +#[cfg(feature = "curve")] +mod curves; +mod fields; + +#[cfg(feature = "curve")] +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/bn254/Cargo.toml b/arkworks/curves/bn254/Cargo.toml new file mode 100644 index 00000000..4a52b0b9 --- /dev/null +++ b/arkworks/curves/bn254/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "ark-bn254" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The BN254 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-bn254/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version="^0.3.0", default-features = false } +ark-ec = { version="^0.3.0", default-features = false } +ark-std = { version="^0.3.0", default-features = false } + +[dev-dependencies] +ark-serialize = { version="^0.3.0", default-features = false } +ark-algebra-test-templates = { version="^0.3.0", default-features = false } + +[features] +default = [ "curve" ] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] + +curve = [ "scalar_field" ] +scalar_field = [] diff --git a/arkworks/curves/bn254/LICENSE-APACHE b/arkworks/curves/bn254/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/curves/bn254/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/curves/bn254/LICENSE-MIT b/arkworks/curves/bn254/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/curves/bn254/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/curves/bn254/src/curves/g1.rs b/arkworks/curves/bn254/src/curves/g1.rs new file mode 100644 index 00000000..b9a36bb7 --- /dev/null +++ b/arkworks/curves/bn254/src/curves/g1.rs @@ -0,0 +1,41 @@ +use ark_ec::models::{ModelParameters, SWModelParameters}; +use ark_ff::{field_new, Zero}; + +use crate::{Fq, Fr}; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 0 + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = 3 + const COEFF_B: Fq = field_new!(Fq, "3"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r = 1 + const COFACTOR_INV: Fr = field_new!(Fr, "1"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +/// G1_GENERATOR_X = 1 +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "1"); + +/// G1_GENERATOR_Y = 2 +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "2"); diff --git a/arkworks/curves/bn254/src/curves/g2.rs b/arkworks/curves/bn254/src/curves/g2.rs new file mode 100644 index 00000000..28160bea --- /dev/null +++ b/arkworks/curves/bn254/src/curves/g2.rs @@ -0,0 +1,74 @@ +use ark_ec::models::{ModelParameters, SWModelParameters}; +use ark_ff::{field_new, Zero}; + +use crate::{Fq, Fq2, Fr}; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq2; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = [0, 0] + #[rustfmt::skip] + const COEFF_A: Fq2 = field_new!(Fq2, field_new!(Fq, "0"), field_new!(Fq, "0")); + + /// COEFF_B = 3/(u+9) + /// = (19485874751759354771024239261021720505790618469301721065564631296452457478373, 266929791119991161246907387137283842545076965332900288569378510910307636690) + #[rustfmt::skip] + const COEFF_B: Fq2 = field_new!(Fq2, + field_new!(Fq, "19485874751759354771024239261021720505790618469301721065564631296452457478373"), + field_new!(Fq, "266929791119991161246907387137283842545076965332900288569378510910307636690"), + ); + + /// COFACTOR = (36 * X^4) + (36 * X^3) + (30 * X^2) + 6*X + 1 + /// = 21888242871839275222246405745257275088844257914179612981679871602714643921549 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 0x345f2299c0f9fa8d, + 0x06ceecda572a2489, + 0xb85045b68181585e, + 0x30644e72e131a029, + ]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "10944121435919637613327163357776759465618812564592884533313067514031822496649"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +#[rustfmt::skip] +pub const G2_GENERATOR_X: Fq2 = field_new!(Fq2, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1); +#[rustfmt::skip] +pub const G2_GENERATOR_Y: Fq2 = field_new!(Fq2, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1); + +/// G2_GENERATOR_X_C0 = +/// 10857046999023057135944570762232829481370756359578518086990519993285655852781 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C0: Fq = field_new!(Fq, "10857046999023057135944570762232829481370756359578518086990519993285655852781"); + +/// G2_GENERATOR_X_C1 = +/// 11559732032986387107991004021392285783925812861821192530917403151452391805634 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C1: Fq = field_new!(Fq, "11559732032986387107991004021392285783925812861821192530917403151452391805634"); + +/// G2_GENERATOR_Y_C0 = +/// 8495653923123431417604973247489272438418190587263600148770280649306958101930 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C0: Fq = field_new!(Fq, "8495653923123431417604973247489272438418190587263600148770280649306958101930"); + +/// G2_GENERATOR_Y_C1 = +/// 4082367875863433681332203403145435568316851327593401208105741076214120093531 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C1: Fq = field_new!(Fq, "4082367875863433681332203403145435568316851327593401208105741076214120093531"); diff --git a/arkworks/curves/bn254/src/curves/mod.rs b/arkworks/curves/bn254/src/curves/mod.rs new file mode 100644 index 00000000..8d09267d --- /dev/null +++ b/arkworks/curves/bn254/src/curves/mod.rs @@ -0,0 +1,61 @@ +use crate::*; +use ark_ec::{ + bn, + bn::{Bn, BnParameters, TwistType}, +}; +use ark_ff::field_new; +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +pub struct Parameters; + +impl BnParameters for Parameters { + const X: &'static [u64] = &[4965661367192848881]; + /// `x` is positive. + const X_IS_NEGATIVE: bool = false; + const ATE_LOOP_COUNT: &'static [i8] = &[ + 0, 0, 0, 1, 0, 1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0, 0, 1, 1, 0, -1, 0, 0, 1, 0, -1, 0, 0, 0, + 0, 1, 1, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1, 1, 0, 0, -1, 0, 0, 0, 1, 1, 0, + -1, 0, 0, 1, 0, 1, 1, + ]; + + const TWIST_MUL_BY_Q_X: Fq2 = field_new!( + Fq2, + field_new!( + Fq, + "21575463638280843010398324269430826099269044274347216827212613867836435027261" + ), + field_new!( + Fq, + "10307601595873709700152284273816112264069230130616436755625194854815875713954" + ), + ); + const TWIST_MUL_BY_Q_Y: Fq2 = field_new!( + Fq2, + field_new!( + Fq, + "2821565182194536844548159561693502659359617185244120367078079554186484126554" + ), + field_new!( + Fq, + "3505843767911556378687030309984248845540243509899259641013678093033130930403" + ), + ); + const TWIST_TYPE: TwistType = TwistType::D; + type Fp = Fq; + type Fp2Params = Fq2Parameters; + type Fp6Params = Fq6Parameters; + type Fp12Params = Fq12Parameters; + type G1Parameters = g1::Parameters; + type G2Parameters = g2::Parameters; +} + +pub type Bn254 = Bn; + +pub type G1Affine = bn::G1Affine; +pub type G1Projective = bn::G1Projective; +pub type G2Affine = bn::G2Affine; +pub type G2Projective = bn::G2Projective; diff --git a/arkworks/curves/bn254/src/curves/tests.rs b/arkworks/curves/bn254/src/curves/tests.rs new file mode 100644 index 00000000..203af4b5 --- /dev/null +++ b/arkworks/curves/bn254/src/curves/tests.rs @@ -0,0 +1,86 @@ +#![allow(unused_imports)] +use ark_ec::{models::SWModelParameters, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{ + fields::{Field, FpParameters, PrimeField, SquareRootField}, + One, Zero, +}; +use ark_serialize::CanonicalSerialize; +use ark_std::rand::Rng; +use ark_std::test_rng; +use core::ops::{AddAssign, MulAssign}; + +use crate::{g1, g2, Bn254, Fq, Fq12, Fq2, Fr, G1Affine, G1Projective, G2Affine, G2Projective}; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let mut sa = a; + sa.mul_assign(s); + let mut sb = b; + sb.mul_assign(s); + + let ans1 = Bn254::pairing(sa, b); + let ans2 = Bn254::pairing(a, sb); + let ans3 = Bn254::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq12::one()); + assert_ne!(ans2, Fq12::one()); + assert_ne!(ans3, Fq12::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq12::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq12::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq12::one()); +} diff --git a/arkworks/curves/bn254/src/fields/fq.rs b/arkworks/curves/bn254/src/fields/fq.rs new file mode 100644 index 00000000..b796421b --- /dev/null +++ b/arkworks/curves/bn254/src/fields/fq.rs @@ -0,0 +1,96 @@ +use ark_ff::{biginteger::BigInteger256 as BigInteger, field_new, fields::*}; + +pub type Fq = Fp256; + +pub struct FqParameters; + +impl Fp256Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 1; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x68c3488912edefaa, + 0x8d087f6872aabf4f, + 0x51e1a24709081231, + 0x2259d6b14729c0fa, + ]); +} +impl FpParameters for FqParameters { + /// MODULUS = 21888242871839275222246405745257275088696311157297823662689037894645226208583 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0x3c208c16d87cfd47, + 0x97816a916871ca8d, + 0xb85045b68181585d, + 0x30644e72e131a029, + ]); + + const MODULUS_BITS: u32 = 254; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 2; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0xd35d438dc58f0d9d, + 0x0a78eb28f5c70b3d, + 0x666ea36f7879462c, + 0xe0a77c19a07df2f, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0xf32cfc5b538afa89, + 0xb5e71911d44501fb, + 0x47ab1eff0a417ff6, + 0x6d89f71cab8351f, + ]); + + const INV: u64 = 9786893198990664585u64; + + // GENERATOR = 3 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0x7a17caa950ad28d7, + 0x1f6ac17ae15521b9, + 0x334bea4e696bd284, + 0x2a1f6744ce179d8e, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x9e10460b6c3e7ea3, + 0xcbc0b548b438e546, + 0xdc2822db40c0ac2e, + 0x183227397098d014, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + + // T = (MODULUS - 1) // 2^S = + // 10944121435919637611123202872628637544348155578648911831344518947322613104291 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0x9e10460b6c3e7ea3, + 0xcbc0b548b438e546, + 0xdc2822db40c0ac2e, + 0x183227397098d014, + ]); + + // (T - 1) // 2 = + // 5472060717959818805561601436314318772174077789324455915672259473661306552145 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x4f082305b61f3f51, + 0x65e05aa45a1c72a3, + 0x6e14116da0605617, + 0xc19139cb84c680a, + ]); +} + +pub const FQ_ONE: Fq = field_new!(Fq, "1"); +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); diff --git a/arkworks/curves/bn254/src/fields/fq12.rs b/arkworks/curves/bn254/src/fields/fq12.rs new file mode 100644 index 00000000..9dcc521b --- /dev/null +++ b/arkworks/curves/bn254/src/fields/fq12.rs @@ -0,0 +1,77 @@ +use super::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq12 = Fp12; + +#[derive(Clone, Copy)] +pub struct Fq12Parameters; + +impl Fp12Parameters for Fq12Parameters { + type Fp6Params = Fq6Parameters; + + const NONRESIDUE: Fq6 = field_new!(Fq6, FQ2_ZERO, FQ2_ONE, FQ2_ZERO); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP12_C1: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^(((q^0) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "1"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^1) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "8376118865763821496583973867626364092589906065868298776909617916018768340080"), + field_new!(Fq, "16469823323077808223889137241176536799009286646108169935659301613961712198316"), + ), + // Fp2::NONRESIDUE^(((q^2) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "21888242871839275220042445260109153167277707414472061641714758635765020556617"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^3) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "11697423496358154304825782922584725312912383441159505038794027105778954184319"), + field_new!(Fq, "303847389135065887422783454877609941456349188919719272345083954437860409601"), + ), + // Fp2::NONRESIDUE^(((q^4) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "21888242871839275220042445260109153167277707414472061641714758635765020556616"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^5) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "3321304630594332808241809054958361220322477375291206261884409189760185844239"), + field_new!(Fq, "5722266937896532885780051958958348231143373700109372999374820235121374419868"), + ), + // Fp2::NONRESIDUE^(((q^6) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "-1"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^7) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "13512124006075453725662431877630910996106405091429524885779419978626457868503"), + field_new!(Fq, "5418419548761466998357268504080738289687024511189653727029736280683514010267"), + ), + // Fp2::NONRESIDUE^(((q^8) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "2203960485148121921418603742825762020974279258880205651966"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^9) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "10190819375481120917420622822672549775783927716138318623895010788866272024264"), + field_new!(Fq, "21584395482704209334823622290379665147239961968378104390343953940207365798982"), + ), + // Fp2::NONRESIDUE^(((q^10) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "2203960485148121921418603742825762020974279258880205651967"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^11) - 1) / 6) + field_new!(Fq2, + field_new!(Fq, "18566938241244942414004596690298913868373833782006617400804628704885040364344"), + field_new!(Fq, "16165975933942742336466353786298926857552937457188450663314217659523851788715"), + ), + ]; +} diff --git a/arkworks/curves/bn254/src/fields/fq2.rs b/arkworks/curves/bn254/src/fields/fq2.rs new file mode 100644 index 00000000..8231c562 --- /dev/null +++ b/arkworks/curves/bn254/src/fields/fq2.rs @@ -0,0 +1,38 @@ +use super::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq2 = Fp2; + +pub struct Fq2Parameters; + +impl Fp2Parameters for Fq2Parameters { + type Fp = Fq; + + /// NONRESIDUE = -1 + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "-1"); + + /// QUADRATIC_NONRESIDUE = U+2 + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE: (Fq, Fq) = ( + field_new!(Fq, "2"), + field_new!(Fq, "1"), + ); + + /// Coefficients for the Frobenius automorphism. + #[rustfmt::skip] + const FROBENIUS_COEFF_FP2_C1: &'static [Fq] = &[ + // NONRESIDUE**(((q^0) - 1) / 2) + field_new!(Fq, "1"), + // NONRESIDUE**(((q^1) - 1) / 2) + field_new!(Fq, "-1"), + ]; + + #[inline(always)] + fn mul_fp_by_nonresidue(fe: &Self::Fp) -> Self::Fp { + -(*fe) + } +} + +pub const FQ2_ZERO: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ZERO); +pub const FQ2_ONE: Fq2 = field_new!(Fq2, FQ_ONE, FQ_ZERO); diff --git a/arkworks/curves/bn254/src/fields/fq6.rs b/arkworks/curves/bn254/src/fields/fq6.rs new file mode 100644 index 00000000..09777a4b --- /dev/null +++ b/arkworks/curves/bn254/src/fields/fq6.rs @@ -0,0 +1,92 @@ +use super::*; +use ark_ff::{field_new, fields::*}; + +pub type Fq6 = Fp6; + +#[derive(Clone, Copy)] +pub struct Fq6Parameters; + +impl Fp6Parameters for Fq6Parameters { + type Fp2Params = Fq2Parameters; + + /// NONRESIDUE = U+9 + #[rustfmt::skip] + const NONRESIDUE: Fq2 = field_new!(Fq2, field_new!(Fq, "9"), field_new!(Fq, "1")); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C1: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^(((q^0) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "1"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^1) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "21575463638280843010398324269430826099269044274347216827212613867836435027261"), + field_new!(Fq, "10307601595873709700152284273816112264069230130616436755625194854815875713954"), + ), + // Fp2::NONRESIDUE^(((q^2) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "21888242871839275220042445260109153167277707414472061641714758635765020556616"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^3) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "3772000881919853776433695186713858239009073593817195771773381919316419345261"), + field_new!(Fq, "2236595495967245188281701248203181795121068902605861227855261137820944008926"), + ), + // Fp2::NONRESIDUE^(((q^4) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "2203960485148121921418603742825762020974279258880205651966"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^(((q^5) - 1) / 3) + field_new!(Fq2, + field_new!(Fq, "18429021223477853657660792034369865839114504446431234726392080002137598044644"), + field_new!(Fq, "9344045779998320333812420223237981029506012124075525679208581902008406485703"), + ), + ]; + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C2: &'static [Fq2] = &[ + // Fp2::NONRESIDUE^((2*(q^0) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "1"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^((2*(q^1) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "2581911344467009335267311115468803099551665605076196740867805258568234346338"), + field_new!(Fq, "19937756971775647987995932169929341994314640652964949448313374472400716661030"), + ), + // Fp2::NONRESIDUE^((2*(q^2) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "2203960485148121921418603742825762020974279258880205651966"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^((2*(q^3) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "5324479202449903542726783395506214481928257762400643279780343368557297135718"), + field_new!(Fq, "16208900380737693084919495127334387981393726419856888799917914180988844123039"), + ), + // Fp2::NONRESIDUE^((2*(q^4) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "21888242871839275220042445260109153167277707414472061641714758635765020556616"), + field_new!(Fq, "0"), + ), + // Fp2::NONRESIDUE^((2*(q^5) - 2) / 3) + field_new!(Fq2, + field_new!(Fq, "13981852324922362344252311234282257507216387789820983642040889267519694726527"), + field_new!(Fq, "7629828391165209371577384193250820201684255241773809077146787135900891633097"), + ), + ]; + + #[inline(always)] + fn mul_fp2_by_nonresidue(fe: &Fq2) -> Fq2 { + // (c0+u*c1)*(9+u) = (9*c0-c1)+u*(9*c1+c0) + let mut f = *fe; + f.double_in_place().double_in_place().double_in_place(); + let c0 = f.c0 + fe.c0 + Fq2Parameters::mul_fp_by_nonresidue(&fe.c1); + let c1 = f.c1 + fe.c1 + fe.c0; + field_new!(Fq2, c0, c1) + } +} diff --git a/arkworks/curves/bn254/src/fields/fr.rs b/arkworks/curves/bn254/src/fields/fr.rs new file mode 100644 index 00000000..c756b998 --- /dev/null +++ b/arkworks/curves/bn254/src/fields/fr.rs @@ -0,0 +1,100 @@ +use ark_ff::{biginteger::BigInteger256 as BigInteger, fields::*}; + +pub type Fr = Fp256; + +pub struct FrParameters; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 28; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 7164790868263648668u64, + 11685701338293206998u64, + 6216421865291908056u64, + 1756667274303109607u64, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 21888242871839275222246405745257275088548364400416034343698204186575808495617 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 4891460686036598785u64, + 2896914383306846353u64, + 13281191951274694749u64, + 3486998266802970665u64, + ]); + + const MODULUS_BITS: u32 = 254; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 2; + + /// R = pow(2, 320) % MODULUS + /// = 6350874878119819312338956282401532410528162663560392320966563075034087161851 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 12436184717236109307u64, + 3962172157175319849u64, + 7381016538464732718u64, + 1011752739694698287u64, + ]); + + /// R2 = R * R % MODULUS + /// = 944936681149208446651664254269745548490766851729442924617792859073125903783 + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 1997599621687373223u64, + 6052339484930628067u64, + 10108755138030829701u64, + 150537098327114917u64, + ]); + + /// INV = (-MODULUS) ^ {-1} % pow(2, 64) = 14042775128853446655 + const INV: u64 = 14042775128853446655u64; + + /// GENERATOR = 5 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 1949230679015292902u64, + 16913946402569752895u64, + 5177146667339417225u64, + 1571765431670520771u64, + ]); + + /// (MODULUS - 1)/2 = + /// 10944121435919637611123202872628637544274182200208017171849102093287904247808 + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xa1f0fac9f8000000, + 0x9419f4243cdcb848, + 0xdc2822db40c0ac2e, + 0x183227397098d014, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where r - 1 = 2^s * t + + /// T = (MODULUS - 1) / 2^s = + /// 81540058820840996586704275553141814055101440848469862132140264610111 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0x9b9709143e1f593f, + 0x181585d2833e8487, + 0x131a029b85045b68, + 0x30644e72e, + ]); + + /// (T - 1) / 2 = + /// 40770029410420498293352137776570907027550720424234931066070132305055 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xcdcb848a1f0fac9f, + 0x0c0ac2e9419f4243, + 0x098d014dc2822db4, + 0x183227397, + ]); +} diff --git a/arkworks/curves/bn254/src/fields/mod.rs b/arkworks/curves/bn254/src/fields/mod.rs new file mode 100644 index 00000000..030e9387 --- /dev/null +++ b/arkworks/curves/bn254/src/fields/mod.rs @@ -0,0 +1,27 @@ +#[cfg(feature = "scalar_field")] +pub mod fr; +#[cfg(feature = "scalar_field")] +pub use self::fr::*; + +#[cfg(feature = "curve")] +pub mod fq; +#[cfg(feature = "curve")] +pub use self::fq::*; + +#[cfg(feature = "curve")] +pub mod fq2; +#[cfg(feature = "curve")] +pub use self::fq2::*; + +#[cfg(feature = "curve")] +pub mod fq6; +#[cfg(feature = "curve")] +pub use self::fq6::*; + +#[cfg(feature = "curve")] +pub mod fq12; +#[cfg(feature = "curve")] +pub use self::fq12::*; + +#[cfg(all(feature = "curve", test))] +mod tests; diff --git a/arkworks/curves/bn254/src/fields/tests.rs b/arkworks/curves/bn254/src/fields/tests.rs new file mode 100644 index 00000000..f78a32e5 --- /dev/null +++ b/arkworks/curves/bn254/src/fields/tests.rs @@ -0,0 +1,504 @@ +use ark_ff::{ + biginteger::{BigInteger, BigInteger256}, + fields::{ + fp6_3over2::Fp6Parameters, FftField, FftParameters, Field, FpParameters, PrimeField, + SquareRootField, + }, + One, UniformRand, Zero, +}; +use ark_serialize::{buffer_bit_byte_size, CanonicalSerialize}; +use ark_std::rand::Rng; +use ark_std::test_rng; +use core::{ + cmp::Ordering, + ops::{AddAssign, MulAssign, SubAssign}, +}; + +use crate::{Fq, Fq12, Fq2, Fq6, Fq6Parameters, FqParameters, Fr}; +use ark_algebra_test_templates::fields::*; + +pub(crate) const ITERATIONS: usize = 5; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); + sqrt_field_test(b); + let byte_size = a.serialized_size(); + field_serialization_test::(byte_size); + } +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); + sqrt_field_test(a); + let byte_size = a.serialized_size(); + let (_, buffer_size) = buffer_bit_byte_size(Fq::size_in_bits()); + assert_eq!(byte_size, buffer_size); + field_serialization_test::(byte_size); + } +} + +#[test] +fn test_fq2() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let a: Fq2 = rng.gen(); + let b: Fq2 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + } + frobenius_test::(Fq::characteristic(), 13); + let byte_size = Fq2::zero().serialized_size(); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq6() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let g: Fq6 = rng.gen(); + let h: Fq6 = rng.gen(); + field_test(g, h); + } + frobenius_test::(Fq::characteristic(), 13); + let byte_size = Fq6::zero().serialized_size(); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq12() { + let mut rng = test_rng(); + for _ in 0..ITERATIONS { + let g: Fq12 = rng.gen(); + let h: Fq12 = rng.gen(); + field_test(g, h); + } + frobenius_test::(Fq::characteristic(), 13); + let byte_size = Fq12::zero().serialized_size(); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq_repr_from() { + assert_eq!(BigInteger256::from(100), BigInteger256([100, 0, 0, 0])); +} + +#[test] +fn test_fq_repr_is_odd() { + assert!(!BigInteger256::from(0).is_odd()); + assert!(BigInteger256::from(0).is_even()); + assert!(BigInteger256::from(1).is_odd()); + assert!(!BigInteger256::from(1).is_even()); + assert!(!BigInteger256::from(324834872).is_odd()); + assert!(BigInteger256::from(324834872).is_even()); + assert!(BigInteger256::from(324834873).is_odd()); + assert!(!BigInteger256::from(324834873).is_even()); +} + +#[test] +fn test_fq_repr_is_zero() { + assert!(BigInteger256::from(0).is_zero()); + assert!(!BigInteger256::from(1).is_zero()); + assert!(!BigInteger256([0, 0, 1, 0]).is_zero()); +} + +#[test] +fn test_fq_repr_num_bits() { + let mut a = BigInteger256::from(0); + assert_eq!(0, a.num_bits()); + a = BigInteger256::from(1); + for i in 1..257 { + assert_eq!(i, a.num_bits()); + a.mul2(); + } + assert_eq!(0, a.num_bits()); +} + +#[test] +fn test_fq_add_assign() { + // Test associativity + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Generate a, b, c and ensure (a + b) + c == a + (b + c). + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + let c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.add_assign(&c); + + let mut tmp2 = b; + tmp2.add_assign(&c); + tmp2.add_assign(&a); + + assert_eq!(tmp1, tmp2); + } +} + +#[test] +fn test_fq_sub_assign() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Ensure that (a - b) + (b - a) = 0. + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.sub_assign(&b); + + let mut tmp2 = b; + tmp2.sub_assign(&a); + + tmp1.add_assign(&tmp2); + assert!(tmp1.is_zero()); + } +} + +#[test] +fn test_fq_mul_assign() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000000 { + // Ensure that (a * b) * c = a * (b * c) + let a = Fq::rand(&mut rng); + let b = Fq::rand(&mut rng); + let c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.mul_assign(&b); + tmp1.mul_assign(&c); + + let mut tmp2 = b; + tmp2.mul_assign(&c); + tmp2.mul_assign(&a); + + assert_eq!(tmp1, tmp2); + } + + for _ in 0..1000000 { + // Ensure that r * (a + b + c) = r*a + r*b + r*c + + let r = Fq::rand(&mut rng); + let mut a = Fq::rand(&mut rng); + let mut b = Fq::rand(&mut rng); + let mut c = Fq::rand(&mut rng); + + let mut tmp1 = a; + tmp1.add_assign(&b); + tmp1.add_assign(&c); + tmp1.mul_assign(&r); + + a.mul_assign(&r); + b.mul_assign(&r); + c.mul_assign(&r); + + a.add_assign(&b); + a.add_assign(&c); + + assert_eq!(tmp1, a); + } +} + +#[test] +fn test_fq_squaring() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000000 { + // Ensure that (a * a) = a^2 + let a = Fq::rand(&mut rng); + + let mut tmp = a; + tmp.square_in_place(); + + let mut tmp2 = a; + tmp2.mul_assign(&a); + + assert_eq!(tmp, tmp2); + } +} + +#[test] +fn test_fq_inverse() { + assert!(Fq::zero().inverse().is_none()); + + let mut rng = ark_std::test_rng(); + + let one = Fq::one(); + + for _ in 0..1000 { + // Ensure that a * a^-1 = 1 + let mut a = Fq::rand(&mut rng); + let ainv = a.inverse().unwrap(); + a.mul_assign(&ainv); + assert_eq!(a, one); + } +} + +#[test] +fn test_fq_double_in_place() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Ensure doubling a is equivalent to adding a to itself. + let mut a = Fq::rand(&mut rng); + let mut b = a; + b.add_assign(&a); + a.double_in_place(); + assert_eq!(a, b); + } +} + +#[test] +fn test_fq_negate() { + { + let a = -Fq::zero(); + + assert!(a.is_zero()); + } + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + // Ensure (a - (-a)) = 0. + let mut a = Fq::rand(&mut rng); + let b = -a; + a.add_assign(&b); + + assert!(a.is_zero()); + } +} + +#[test] +fn test_fq_pow() { + let mut rng = ark_std::test_rng(); + + for i in 0..1000 { + // Exponentiate by various small numbers and ensure it consists with repeated + // multiplication. + let a = Fq::rand(&mut rng); + let target = a.pow(&[i]); + let mut c = Fq::one(); + for _ in 0..i { + c.mul_assign(&a); + } + assert_eq!(c, target); + } + + for _ in 0..1000 { + // Exponentiating by the modulus should have no effect in a prime field. + let a = Fq::rand(&mut rng); + + assert_eq!(a, a.pow(Fq::characteristic())); + } +} + +#[test] +fn test_fq_sqrt() { + let mut rng = ark_std::test_rng(); + + assert_eq!(Fq::zero().sqrt().unwrap(), Fq::zero()); + + for _ in 0..1000 { + // Ensure sqrt(a^2) = a or -a + let a = Fq::rand(&mut rng); + let nega = -a; + let mut b = a; + b.square_in_place(); + + let b = b.sqrt().unwrap(); + + assert!(a == b || nega == b); + } + + for _ in 0..1000 { + // Ensure sqrt(a)^2 = a for random a + let a = Fq::rand(&mut rng); + + if let Some(mut tmp) = a.sqrt() { + tmp.square_in_place(); + + assert_eq!(a, tmp); + } + } +} + +#[test] +fn test_fq_num_bits() { + assert_eq!(FqParameters::MODULUS_BITS, 254); + assert_eq!(FqParameters::CAPACITY, 253); +} + +#[test] +fn test_fq_root_of_unity() { + assert_eq!(FqParameters::TWO_ADICITY, 1); + assert_eq!( + Fq::multiplicative_generator().pow([ + 0x9e10460b6c3e7ea3, + 0xcbc0b548b438e546, + 0xdc2822db40c0ac2e, + 0x183227397098d014, + ]), + Fq::two_adic_root_of_unity() + ); + assert_eq!( + Fq::two_adic_root_of_unity().pow([1 << FqParameters::TWO_ADICITY]), + Fq::one() + ); + assert!(Fq::multiplicative_generator().sqrt().is_none()); +} + +#[test] +fn test_fq_ordering() { + // BigInteger256's ordering is well-tested, but we still need to make sure the + // Fq elements aren't being compared in Montgomery form. + for i in 0..100 { + assert!(Fq::from(BigInteger256::from(i + 1)) > Fq::from(BigInteger256::from(i))); + } +} + +#[test] +fn test_fq_legendre() { + use ark_ff::fields::LegendreSymbol::*; + + assert_eq!(QuadraticResidue, Fq::one().legendre()); + assert_eq!(Zero, Fq::zero().legendre()); + assert_eq!( + QuadraticResidue, + Fq::from(BigInteger256::from(4)).legendre() + ); + assert_eq!( + QuadraticNonResidue, + Fq::from(BigInteger256::from(5)).legendre() + ); +} + +#[test] +fn test_fq2_ordering() { + let mut a = Fq2::new(Fq::zero(), Fq::zero()); + let mut b = a.clone(); + + assert!(a.cmp(&b) == Ordering::Equal); + b.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Equal); + b.c1.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Less); + a.c1.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Greater); + b.c0.add_assign(&Fq::one()); + assert!(a.cmp(&b) == Ordering::Equal); +} + +#[test] +fn test_fq2_basics() { + assert_eq!(Fq2::new(Fq::zero(), Fq::zero(),), Fq2::zero()); + assert_eq!(Fq2::new(Fq::one(), Fq::zero(),), Fq2::one()); + assert!(Fq2::zero().is_zero()); + assert!(!Fq2::one().is_zero()); + assert!(!Fq2::new(Fq::zero(), Fq::one(),).is_zero()); +} + +#[test] +fn test_fq2_legendre() { + use ark_ff::fields::LegendreSymbol::*; + + assert_eq!(Zero, Fq2::zero().legendre()); + // i^2 = -1 + let mut m1 = -Fq2::one(); + assert_eq!(QuadraticResidue, m1.legendre()); + m1 = Fq6Parameters::mul_fp2_by_nonresidue(&m1); + assert_eq!(QuadraticNonResidue, m1.legendre()); +} + +#[test] +fn test_fq6_mul_by_1() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let c1 = Fq2::rand(&mut rng); + let mut a = Fq6::rand(&mut rng); + let mut b = a; + + a.mul_by_1(&c1); + b.mul_assign(&Fq6::new(Fq2::zero(), c1, Fq2::zero())); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq6_mul_by_01() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c1 = Fq2::rand(&mut rng); + let mut a = Fq6::rand(&mut rng); + let mut b = a; + + a.mul_by_01(&c0, &c1); + b.mul_assign(&Fq6::new(c0, c1, Fq2::zero())); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq12_mul_by_014() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c1 = Fq2::rand(&mut rng); + let c5 = Fq2::rand(&mut rng); + let mut a = Fq12::rand(&mut rng); + let mut b = a; + + a.mul_by_014(&c0, &c1, &c5); + b.mul_assign(&Fq12::new( + Fq6::new(c0, c1, Fq2::zero()), + Fq6::new(Fq2::zero(), c5, Fq2::zero()), + )); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fq12_mul_by_034() { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let c0 = Fq2::rand(&mut rng); + let c3 = Fq2::rand(&mut rng); + let c4 = Fq2::rand(&mut rng); + let mut a = Fq12::rand(&mut rng); + let mut b = a; + + a.mul_by_034(&c0, &c3, &c4); + b.mul_assign(&Fq12::new( + Fq6::new(c0, Fq2::zero(), Fq2::zero()), + Fq6::new(c3, c4, Fq2::zero()), + )); + + assert_eq!(a, b); + } +} diff --git a/arkworks/curves/bn254/src/lib.rs b/arkworks/curves/bn254/src/lib.rs new file mode 100644 index 00000000..a2374f0a --- /dev/null +++ b/arkworks/curves/bn254/src/lib.rs @@ -0,0 +1,40 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the BN254 curve that was sampled as part of the [\[BCTV14\]](https://eprint.iacr.org/2013/879.pdf) paper . +//! The name denotes that it is a Barreto--Naehrig curve of embedding degree 12, +//! defined over a 254-bit (prime) field. The scalar field is highly 2-adic. +//! +//! This curve is also implemented in [libff](https://github.com/scipr-lab/libff/tree/master/libff/algebra/curves/alt_bn128) under the name `bn128`. +//! It is the same as the `bn256` curve used in Ethereum (eg: [go-ethereum](https://github.com/ethereum/go-ethereum/tree/master/crypto/bn254/cloudflare)). +//! +//! #CAUTION +//! **This curve does not satisfy the 128-bit security level anymore.** +//! +//! +//! Curve information: +//! * Base field: q = 21888242871839275222246405745257275088696311157297823662689037894645226208583 +//! * Scalar field: r = 21888242871839275222246405745257275088548364400416034343698204186575808495617 +//! * valuation(q - 1, 2) = 1 +//! * valuation(r - 1, 2) = 28 +//! * G1 curve equation: y^2 = x^3 + 3 +//! * G2 curve equation: y^2 = x^3 + B, where +//! * B = 3/(u+9) where Fq2 is represented as Fq\[u\]/(u^2+1) +//! = Fq2(19485874751759354771024239261021720505790618469301721065564631296452457478373, 266929791119991161246907387137283842545076965332900288569378510910307636690) + +#[cfg(feature = "curve")] +mod curves; + +mod fields; + +#[cfg(feature = "curve")] +pub use curves::*; + +pub use fields::*; diff --git a/arkworks/curves/bw6_761/Cargo.toml b/arkworks/curves/bw6_761/Cargo.toml new file mode 100644 index 00000000..442768b8 --- /dev/null +++ b/arkworks/curves/bw6_761/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "ark-bw6-761" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The BW6-761 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-bw6-761/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version="^0.3.0", default-features = false } +ark-ec = { version="^0.3.0", default-features = false } +ark-std = { version="^0.3.0", default-features = false } +ark-bls12-377 = { version="^0.3.0", path = "../bls12_377", default-features = false, features = [ "base_field" ] } + +[dev-dependencies] +ark-serialize = { version="^0.3.0", default-features = false } +ark-algebra-test-templates = { version="^0.3.0", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-bls12-377/std" ] diff --git a/arkworks/curves/bw6_761/src/curves/g1.rs b/arkworks/curves/bw6_761/src/curves/g1.rs new file mode 100644 index 00000000..3d405d16 --- /dev/null +++ b/arkworks/curves/bw6_761/src/curves/g1.rs @@ -0,0 +1,64 @@ +use crate::{Fq, Fr}; +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +pub type G1Affine = GroupAffine; +pub type G1Projective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 0 + #[rustfmt::skip] + + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = -1 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "-1"); + + /// COFACTOR = + /// 26642435879335816683987677701488073867751118270052650655942102502312977592501693353047140953112195348280268661194876 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 0x3de580000000007c, + 0x832ba4061000003b, + 0xc61c554757551c0c, + 0xc856a0853c9db94c, + 0x2c77d5ac34cb12ef, + 0xad1972339049ce76, + ]; + + /// COFACTOR^(-1) mod r = + /// 91141326767669940707819291241958318717982251277713150053234367522357946997763584490607453720072232540829942217804 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "91141326767669940707819291241958318717982251277713150053234367522357946997763584490607453720072232540829942217804"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); + #[inline(always)] + fn mul_by_a(_elem: &Self::BaseField) -> Self::BaseField { + use ark_ff::Zero; + Self::BaseField::zero() + } +} + +/// G1_GENERATOR_X = +/// 6238772257594679368032145693622812838779005809760824733138787810501188623461307351759238099287535516224314149266511977132140828635950940021790489507611754366317801811090811367945064510304504157188661901055903167026722666149426237 +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "6238772257594679368032145693622812838779005809760824733138787810501188623461307351759238099287535516224314149266511977132140828635950940021790489507611754366317801811090811367945064510304504157188661901055903167026722666149426237"); + +/// G1_GENERATOR_Y = +/// 2101735126520897423911504562215834951148127555913367997162789335052900271653517958562461315794228241561913734371411178226936527683203879553093934185950470971848972085321797958124416462268292467002957525517188485984766314758624099 +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "2101735126520897423911504562215834951148127555913367997162789335052900271653517958562461315794228241561913734371411178226936527683203879553093934185950470971848972085321797958124416462268292467002957525517188485984766314758624099"); diff --git a/arkworks/curves/bw6_761/src/curves/g2.rs b/arkworks/curves/bw6_761/src/curves/g2.rs new file mode 100644 index 00000000..d908aa04 --- /dev/null +++ b/arkworks/curves/bw6_761/src/curves/g2.rs @@ -0,0 +1,64 @@ +use crate::{Fq, Fr}; +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +pub type G2Affine = GroupAffine; +pub type G2Projective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 0 + #[rustfmt::skip] + + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = 4 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "4"); + + /// COFACTOR = + /// 26642435879335816683987677701488073867751118270052650655942102502312977592501693353047140953112195348280268661194869 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 0x3de5800000000075, + 0x832ba4061000003b, + 0xc61c554757551c0c, + 0xc856a0853c9db94c, + 0x2c77d5ac34cb12ef, + 0xad1972339049ce76, + ]; + + /// COFACTOR^(-1) mod r = + /// 214911522365886453591244899095480747723790054550866810551297776298664428889000553861210287833206024638187939842124 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "214911522365886453591244899095480747723790054550866810551297776298664428889000553861210287833206024638187939842124"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + #[inline(always)] + fn mul_by_a(_elem: &Self::BaseField) -> Self::BaseField { + use ark_ff::Zero; + Self::BaseField::zero() + } +} + +/// G2_GENERATOR_X = +/// 6445332910596979336035888152774071626898886139774101364933948236926875073754470830732273879639675437155036544153105017729592600560631678554299562762294743927912429096636156401171909259073181112518725201388196280039960074422214428 +#[rustfmt::skip] +pub const G2_GENERATOR_X: Fq = field_new!(Fq, "6445332910596979336035888152774071626898886139774101364933948236926875073754470830732273879639675437155036544153105017729592600560631678554299562762294743927912429096636156401171909259073181112518725201388196280039960074422214428"); + +/// G2_GENERATOR_Y = +/// 562923658089539719386922163444547387757586534741080263946953401595155211934630598999300396317104182598044793758153214972605680357108252243146746187917218885078195819486220416605630144001533548163105316661692978285266378674355041 +#[rustfmt::skip] +pub const G2_GENERATOR_Y: Fq = field_new!(Fq, "562923658089539719386922163444547387757586534741080263946953401595155211934630598999300396317104182598044793758153214972605680357108252243146746187917218885078195819486220416605630144001533548163105316661692978285266378674355041"); diff --git a/arkworks/curves/bw6_761/src/curves/mod.rs b/arkworks/curves/bw6_761/src/curves/mod.rs new file mode 100644 index 00000000..cfc54ee4 --- /dev/null +++ b/arkworks/curves/bw6_761/src/curves/mod.rs @@ -0,0 +1,61 @@ +use crate::*; +use ark_ec::{ + bw6, + bw6::{BW6Parameters, TwistType, BW6}, +}; +use ark_ff::biginteger::BigInteger768 as BigInteger; + +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +#[derive(PartialEq, Eq)] +pub struct Parameters; + +impl BW6Parameters for Parameters { + const X: BigInteger = BigInteger([ + 0x8508c00000000001, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + ]); + /// `x` is positive. + const X_IS_NEGATIVE: bool = false; + // X+1 + const ATE_LOOP_COUNT_1: &'static [u64] = &[0x8508c00000000002]; + const ATE_LOOP_COUNT_1_IS_NEGATIVE: bool = false; + // X^3-X^2-X + const ATE_LOOP_COUNT_2: &'static [i8] = &[ + -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 1, 0, -1, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, -1, 0, 0, + 1, 0, 0, 0, -1, 0, 0, -1, 0, 1, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, -1, 0, 1, 0, 1, 0, 0, 0, 1, + 0, -1, 0, -1, 0, 0, 0, 0, 0, 1, 0, 0, 1, + ]; + const ATE_LOOP_COUNT_2_IS_NEGATIVE: bool = false; + const TWIST_TYPE: TwistType = TwistType::M; + type Fp = Fq; + type Fp3Params = Fq3Parameters; + type Fp6Params = Fq6Parameters; + type G1Parameters = g1::Parameters; + type G2Parameters = g2::Parameters; +} + +pub type BW6_761 = BW6; + +pub type G1Affine = bw6::G1Affine; +pub type G1Projective = bw6::G1Projective; +pub type G2Affine = bw6::G2Affine; +pub type G2Projective = bw6::G2Projective; diff --git a/arkworks/curves/bw6_761/src/curves/tests.rs b/arkworks/curves/bw6_761/src/curves/tests.rs new file mode 100644 index 00000000..bd577372 --- /dev/null +++ b/arkworks/curves/bw6_761/src/curves/tests.rs @@ -0,0 +1,78 @@ +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, One, PrimeField}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let sa = a.mul(s.into_repr()); + let sb = b.mul(s.into_repr()); + + let ans1 = BW6_761::pairing(sa, b); + let ans2 = BW6_761::pairing(a, sb); + let ans3 = BW6_761::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq6::one()); + assert_ne!(ans2, Fq6::one()); + assert_ne!(ans3, Fq6::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq6::one()); +} diff --git a/arkworks/curves/bw6_761/src/fields/fq.rs b/arkworks/curves/bw6_761/src/fields/fq.rs new file mode 100644 index 00000000..bcd252ec --- /dev/null +++ b/arkworks/curves/bw6_761/src/fields/fq.rs @@ -0,0 +1,175 @@ +use ark_ff::{ + biginteger::BigInteger768 as BigInteger, + field_new, + fields::{FftParameters, Fp768, Fp768Parameters, FpParameters}, +}; + +pub type Fq = Fp768; + +pub struct FqParameters; + +pub const FQ_ONE: Fq = field_new!(Fq, "1"); +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); + +impl Fp768Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + // The internal representation of this type is six 64-bit unsigned + // integers in little-endian order. Values are always in + // Montgomery form; i.e., Scalar(a) = aR mod p, with R=2^768. + + // (MODULUS - 1) % 2^TWO_ADICITY == 0 + const TWO_ADICITY: u32 = 1; + + // least_quadratic_nonresidue(MODULUS) in Sage. + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 17481284903592032950u64, + 10104133845767975835u64, + 8607375506753517913u64, + 13706168424391191299u64, + 9580010308493592354u64, + 14241333420363995524u64, + 6665632285037357566u64, + 5559902898979457045u64, + 15504799981718861253u64, + 8332096944629367896u64, + 18005297320867222879u64, + 58811391084848524u64, + ]); +} +impl FpParameters for FqParameters { + /// MODULUS = 6891450384315732539396789682275657542479668912536150109513790160209623422243491736087683183289411687640864567753786613451161759120554247759349511699125301598951605099378508850372543631423596795951899700429969112842764913119068299 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xf49d00000000008b, + 0xe6913e6870000082, + 0x160cf8aeeaf0a437, + 0x98a116c25667a8f8, + 0x71dcd3dc73ebff2e, + 0x8689c8ed12f9fd90, + 0x03cebaff25b42304, + 0x707ba638e584e919, + 0x528275ef8087be41, + 0xb926186a81d14688, + 0xd187c94004faff3e, + 0x122e824fb83ce0a + ]); + + const MODULUS_BITS: u32 = 761; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + // gap to 64-bit machine word + const REPR_SHAVE_BITS: u32 = 7; + + // 2^768 % MODULUS + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 144959613005956565u64, + 6509995272855063783u64, + 11428286765660613342u64, + 15738672438262922740u64, + 17071399330169272331u64, + 13899911246788437003u64, + 12055474021000362245u64, + 2545351818702954755u64, + 8887388221587179644u64, + 5009280847225881135u64, + 15539704305423854047u64, + 23071597697427581u64, + ]); + + // R^2 + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 14305184132582319705u64, + 8868935336694416555u64, + 9196887162930508889u64, + 15486798265448570248u64, + 5402985275949444416u64, + 10893197322525159598u64, + 3204916688966998390u64, + 12417238192559061753u64, + 12426306557607898622u64, + 1305582522441154384u64, + 10311846026977660324u64, + 48736111365249031u64, + ]); + + // (-1/MODULUS) % 2^64 + const INV: u64 = 744663313386281181u64; + + /// GENERATOR = 2 + // primitive_root(MODULUS) + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 289919226011913130u64, + 13019990545710127566u64, + 4409829457611675068u64, + 13030600802816293865u64, + 15696054586628993047u64, + 9353078419867322391u64, + 5664203968291172875u64, + 5090703637405909511u64, + 17774776443174359288u64, + 10018561694451762270u64, + 12632664537138156478u64, + 46143195394855163u64, + ]); + + // (MODULUS - 1) / 2 + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x7a4e800000000045, + 0xf3489f3438000041, + 0x0b067c577578521b, + 0x4c508b612b33d47c, + 0x38ee69ee39f5ff97, + 0x4344e476897cfec8, + 0x81e75d7f92da1182, + 0xb83dd31c72c2748c, + 0x29413af7c043df20, + 0x5c930c3540e8a344, + 0x68c3e4a0027d7f9f, + 0x9174127dc1e705, + ]); + + // T = + // 3445725192157866269698394841137828771239834456268075054756895080104811711121745868043841591644705843820432283876893306725580879560277123879674755849562650799475802549689254425186271815711798397975949850214984556421382456559534149 + // (MODULUS - 1) / 2 ^ TWO_ADICITY + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0x7a4e800000000045, + 0xf3489f3438000041, + 0x0b067c577578521b, + 0x4c508b612b33d47c, + 0x38ee69ee39f5ff97, + 0x4344e476897cfec8, + 0x81e75d7f92da1182, + 0xb83dd31c72c2748c, + 0x29413af7c043df20, + 0x5c930c3540e8a344, + 0x68c3e4a0027d7f9f, + 0x9174127dc1e705, + ]); + + // (T - 1)/2 = + // 1722862596078933134849197420568914385619917228134037527378447540052405855560872934021920795822352921910216141938446653362790439780138561939837377924781325399737901274844627212593135907855899198987974925107492278210691228279767074 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xbd27400000000022, + 0xf9a44f9a1c000020, + 0x05833e2bbabc290d, + 0xa62845b09599ea3e, + 0x1c7734f71cfaffcb, + 0x21a2723b44be7f64, + 0x40f3aebfc96d08c1, + 0x5c1ee98e39613a46, + 0x14a09d7be021ef90, + 0xae49861aa07451a2, + 0xb461f250013ebfcf, + 0x48ba093ee0f382, + ]); +} diff --git a/arkworks/curves/bw6_761/src/fields/fq3.rs b/arkworks/curves/bw6_761/src/fields/fq3.rs new file mode 100644 index 00000000..0c499551 --- /dev/null +++ b/arkworks/curves/bw6_761/src/fields/fq3.rs @@ -0,0 +1,97 @@ +use ark_ff::{ + field_new, + fields::fp3::{Fp3, Fp3Parameters}, +}; + +use crate::{ + fields::{FQ_ONE, FQ_ZERO}, + Fq, +}; + +pub type Fq3 = Fp3; + +pub struct Fq3Parameters; + +impl Fp3Parameters for Fq3Parameters { + type Fp = Fq; + + /// NONRESIDUE = -4 + // Fq3 = Fq\[u\]/u^3+4 + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "-4"); + + // (MODULUS^3 - 1) % 2^TWO_ADICITY == 0 + const TWO_ADICITY: u32 = 1; + + // (T-1)/2 with T = (MODULUS^3-1) / 2^TWO_ADICITY + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: &'static [u64] = &[ + 0xb5e7c000000a3eac, + 0xf79b99dbf41cf4ab, + 0xe9372b1919e55ee5, + 0xbb7bbc4936c1980b, + 0x7c0cb9d4399b36e1, + 0x73304a5507bb1ae0, + 0x92f639be8963936f, + 0x4f574ac2439ba816, + 0x670d9bd389dd29ef, + 0x606ddf900d2124f1, + 0x928fb14985ec3270, + 0x6b2f2428c5f420f3, + 0xac9ade29d5ab5fbe, + 0xec0d0434c4005822, + 0x973f10d7f3c5c108, + 0x6d5e83fc81095979, + 0xdac3e6e4e1647752, + 0x227febf93994603e, + 0x4ab8755d894167d1, + 0x4fd2d3f67d8b537a, + 0x33e196a4d5f4030a, + 0x88b51fb72092df1a, + 0xa67e5b1e8fc48316, + 0xb0855eb2a00d7dab, + 0xe875dd2da6751442, + 0x777594a243e25676, + 0x294e0f70376a85a8, + 0x83f431c7988e4f18, + 0x8e8fb6af3ca2f5f1, + 0x7297896b4b9e90f1, + 0xff38f54664d66123, + 0xb5ecf80bfff41e13, + 0x1662a3666bb8392a, + 0x07a0968e8742d3e1, + 0xf12927e564bcdfdc, + 0x5de9825a0e, + ]; + + // NONRESIDUE^T % q + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE_TO_T: (Fq, Fq, Fq) = ( + field_new!(Fq, "6891450384315732539396789682275657542479668912536150109513790160209623422243491736087683183289411687640864567753786613451161759120554247759349511699125301598951605099378508850372543631423596795951899700429969112842764913119068298"), + FQ_ZERO, + FQ_ZERO, + ); + + // NQR ^ (MODULUS^i - 1)/3, i=0,1,2 with NQR = u = (0,1,0) + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C1: &'static [Fq] = &[ + FQ_ONE, + field_new!(Fq, "4922464560225523242118178942575080391082002530232324381063048548642823052024664478336818169867474395270858391911405337707247735739826664939444490469542109391530482826728203582549674992333383150446779312029624171857054392282775648"), + field_new!(Fq, "1968985824090209297278610739700577151397666382303825728450741611566800370218827257750865013421937292370006175842381275743914023380727582819905021229583192207421122272650305267822868639090213645505120388400344940985710520836292650"), + ]; + + // NQR ^ (2*MODULUS^i - 2)/3, i=0,1,2 with NQR = u = (0,1,0) + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C2: &'static [Fq] = &[ + FQ_ONE, + field_new!(Fq, "1968985824090209297278610739700577151397666382303825728450741611566800370218827257750865013421937292370006175842381275743914023380727582819905021229583192207421122272650305267822868639090213645505120388400344940985710520836292650"), + field_new!(Fq, "4922464560225523242118178942575080391082002530232324381063048548642823052024664478336818169867474395270858391911405337707247735739826664939444490469542109391530482826728203582549674992333383150446779312029624171857054392282775648"), + ]; + + #[inline(always)] + fn mul_fp_by_nonresidue(fe: &Self::Fp) -> Self::Fp { + let original = -(*fe); + let double = original + &original; + double + &double + } +} diff --git a/arkworks/curves/bw6_761/src/fields/fq6.rs b/arkworks/curves/bw6_761/src/fields/fq6.rs new file mode 100644 index 00000000..f9351b5b --- /dev/null +++ b/arkworks/curves/bw6_761/src/fields/fq6.rs @@ -0,0 +1,27 @@ +use crate::{Fq, Fq3, Fq3Parameters, FQ_ONE, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp6_2over3::{Fp6, Fp6Parameters}, +}; + +pub type Fq6 = Fp6; + +pub struct Fq6Parameters; + +impl Fp6Parameters for Fq6Parameters { + type Fp3Params = Fq3Parameters; + + /// NONRESIDUE = (0, 1, 0) + #[rustfmt::skip] + const NONRESIDUE: Fq3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C1: &'static [Fq] = &[ + field_new!(Fq, "1"), + field_new!(Fq, "4922464560225523242118178942575080391082002530232324381063048548642823052024664478336818169867474395270858391911405337707247735739826664939444490469542109391530482826728203582549674992333383150446779312029624171857054392282775649"), + field_new!(Fq, "4922464560225523242118178942575080391082002530232324381063048548642823052024664478336818169867474395270858391911405337707247735739826664939444490469542109391530482826728203582549674992333383150446779312029624171857054392282775648"), + field_new!(Fq, "-1"), + field_new!(Fq, "1968985824090209297278610739700577151397666382303825728450741611566800370218827257750865013421937292370006175842381275743914023380727582819905021229583192207421122272650305267822868639090213645505120388400344940985710520836292650"), + field_new!(Fq, "1968985824090209297278610739700577151397666382303825728450741611566800370218827257750865013421937292370006175842381275743914023380727582819905021229583192207421122272650305267822868639090213645505120388400344940985710520836292651"), + ]; +} diff --git a/arkworks/curves/bw6_761/src/fields/fr.rs b/arkworks/curves/bw6_761/src/fields/fr.rs new file mode 100644 index 00000000..63c94bb4 --- /dev/null +++ b/arkworks/curves/bw6_761/src/fields/fr.rs @@ -0,0 +1 @@ +pub use ark_bls12_377::{Fq as Fr, FqParameters as FrParameters}; diff --git a/arkworks/curves/bw6_761/src/fields/mod.rs b/arkworks/curves/bw6_761/src/fields/mod.rs new file mode 100644 index 00000000..7bfd333d --- /dev/null +++ b/arkworks/curves/bw6_761/src/fields/mod.rs @@ -0,0 +1,14 @@ +pub mod fr; +pub use self::fr::*; + +pub mod fq; +pub use self::fq::*; + +pub mod fq3; +pub use self::fq3::*; + +pub mod fq6; +pub use self::fq6::*; + +#[cfg(test)] +mod tests; diff --git a/arkworks/curves/bw6_761/src/fields/tests.rs b/arkworks/curves/bw6_761/src/fields/tests.rs new file mode 100644 index 00000000..9187972b --- /dev/null +++ b/arkworks/curves/bw6_761/src/fields/tests.rs @@ -0,0 +1,52 @@ +use ark_ff::{Field, PrimeField}; +use ark_serialize::{buffer_bit_byte_size, CanonicalSerialize}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); + sqrt_field_test(a); + + let byte_size = a.serialized_size(); + let (_, buffer_size) = buffer_bit_byte_size(Fq::size_in_bits()); + assert_eq!(byte_size, buffer_size); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq3() { + let mut rng = test_rng(); + let a: Fq3 = rng.gen(); + let b: Fq3 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + frobenius_test::(Fq::characteristic(), 13); +} + +#[test] +fn test_fq6() { + let mut rng = test_rng(); + let a: Fq6 = rng.gen(); + let b: Fq6 = rng.gen(); + field_test(a, b); + frobenius_test::(Fq::characteristic(), 13); +} diff --git a/arkworks/curves/bw6_761/src/lib.rs b/arkworks/curves/bw6_761/src/lib.rs new file mode 100644 index 00000000..117bab18 --- /dev/null +++ b/arkworks/curves/bw6_761/src/lib.rs @@ -0,0 +1,34 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the BW6_761 curve generated in [\[EG20\]](https://eprint.iacr.org/2020/351). +//! The name denotes that it is a curve generated using the Brezing--Weng method, and that +//! its embedding degree is 6. +//! The main feature of this curve is that the scalar field equals the base field of the BLS12_377 curve. +//! +//! Curve information: +//! * Base field: q = 6891450384315732539396789682275657542479668912536150109513790160209623422243491736087683183289411687640864567753786613451161759120554247759349511699125301598951605099378508850372543631423596795951899700429969112842764913119068299 +//! * Scalar field: r = 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 +//! * valuation(q - 1, 2) = 1 +//! * valuation(r - 1, 2) = 46 +//! +//! G1 curve equation: y^2 = x^3 + ax + b, where +//! * a = 0, +//! * b = -1, +//! +//! G2 curve equation: y^2 = x^3 + Ax + B +//! * A = 0 +//! * B = 4 + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/cp6_782/Cargo.toml b/arkworks/curves/cp6_782/Cargo.toml new file mode 100644 index 00000000..9f86e740 --- /dev/null +++ b/arkworks/curves/cp6_782/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "ark-cp6-782" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The CP6-782 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-cp6-782/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-bls12-377 = { version = "^0.3.0", path = "../bls12_377", default-features = false, features = [ "base_field" ] } + +[dev-dependencies] +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-bls12-377/std" ] diff --git a/arkworks/curves/cp6_782/src/curves/g1.rs b/arkworks/curves/cp6_782/src/curves/g1.rs new file mode 100644 index 00000000..393c942c --- /dev/null +++ b/arkworks/curves/cp6_782/src/curves/g1.rs @@ -0,0 +1,60 @@ +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +use crate::{Fq, Fr}; + +pub type G1Affine = GroupAffine; +pub type G1Projective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 5 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "5"); + + /// COEFF_B = 17764315118651679038286329069295091506801468118146712649886336045535808055361274148466772191243305528312843236347777260247138934336850548243151534538734724191505953341403463040067571652261229308333392040104884438208594329793895206056414 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "17764315118651679038286329069295091506801468118146712649886336045535808055361274148466772191243305528312843236347777260247138934336850548243151534538734724191505953341403463040067571652261229308333392040104884438208594329793895206056414"); + + /// COFACTOR = + /// 86482221941698704497288378992285180119495364068003923046442785886272123124361700722982503222189455144364945735564951561028 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 0x5657b9b57b942344, + 0x84f9a65f3bd54eaf, + 0x5ea4214e35cd127, + 0xe3cbcbc14ec1501d, + 0xf196cb845a3092ab, + 0x7e14627ad0e19017, + 0x217db4, + ]; + + /// COFACTOR^(-1) mod r = + /// 163276846538158998893990986356139314746223949404500031940624325017036397274793417940375498603127780919653358641788 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "163276846538158998893990986356139314746223949404500031940624325017036397274793417940375498603127780919653358641788"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); +} + +/// G1_GENERATOR_X = +/// 5511163824921585887915590525772884263960974614921003940645351443740084257508990841338974915037175497689287870585840954231884082785026301437744745393958283053278991955159266640440849940136976927372133743626748847559939620888818486853646 +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "5511163824921585887915590525772884263960974614921003940645351443740084257508990841338974915037175497689287870585840954231884082785026301437744745393958283053278991955159266640440849940136976927372133743626748847559939620888818486853646"); + +/// G1_GENERATOR_Y = +/// 7913123550914612057135582061699117755797758113868200992327595317370485234417808273674357776714522052694559358668442301647906991623400754234679697332299689255516547752391831738454121261248793568285885897998257357202903170202349380518443 +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "7913123550914612057135582061699117755797758113868200992327595317370485234417808273674357776714522052694559358668442301647906991623400754234679697332299689255516547752391831738454121261248793568285885897998257357202903170202349380518443"); diff --git a/arkworks/curves/cp6_782/src/curves/g2.rs b/arkworks/curves/cp6_782/src/curves/g2.rs new file mode 100644 index 00000000..f410b1cb --- /dev/null +++ b/arkworks/curves/cp6_782/src/curves/g2.rs @@ -0,0 +1,119 @@ +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +use crate::{Fq, Fq3, Fr, FQ_ZERO}; + +pub type G2Affine = GroupAffine; +pub type G2Projective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq3; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = (0, 0, COEFF_A * TWIST^2) = (0, 0, 5) + #[rustfmt::skip] + const COEFF_A: Fq3 = field_new!(Fq3, + FQ_ZERO, + FQ_ZERO, + field_new!(Fq, "5"), + ); + + /// COEFF_B = (G1::COEFF_B * TWIST^3, 0, 0) = + /// (7237353553714858194254855835825640240663090882935418626687402315497764195116318527743248304684159666286416318482685337633828994152723793439622384740540789612754127688659139509552568164770448654259255628317166934203899992395064470477612, + /// 0, 0) + #[rustfmt::skip] + const COEFF_B: Fq3 = field_new!(Fq3, + field_new!(Fq, "7237353553714858194254855835825640240663090882935418626687402315497764195116318527743248304684159666286416318482685337633828994152723793439622384740540789612754127688659139509552568164770448654259255628317166934203899992395064470477612"), + FQ_ZERO, + FQ_ZERO, + ); + + /// COFACTOR = + /// 43276679045916726782882096851503554444292580777869919574700824986947162516693702667493938255647666346010819253090121562084993205202476199057555142869892665220155573207800985012241638987472334344174208389303164492698303448192856551557283997344470334833850065978668184377503856699635686872344035470027430053642178229054516302338812152178131995800255516474185251732445975837621097393375441662426280154371264547168198834382681059556891327702516519955053315674076980350109237328216856859758931256208439575383786363605925879337208599843910819433766160937121108797819223653884174994325142959644019600 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 0x4b77fca151d50b90, + 0x8c98a12bd486d2fb, + 0x1f0c9a51593693f8, + 0x1d6f388069c063c1, + 0x556e918748f06793, + 0x2cea7dc01aae2140, + 0x4216f0595cee44d0, + 0x7a5e400154f633cf, + 0xbb74eb9b6630846b, + 0x8eb48c92998f3358, + 0xbedd37f629e8e634, + 0xc541018fe4d10cc7, + 0x574956a099ace2c3, + 0xa597504275948226, + 0x7ecaaf050acb91f3, + 0x0f25b044f4e9c932, + 0xf8c39cbf0df97780, + 0xd8f9eda95d6abf3e, + 0xd1d80da227dd39c1, + 0x8b589c61531dbce7, + 0xfee4439281455474, + 0x9eea59baa2aeb4a1, + 0xa3b8a42c4e1e6f5a, + 0xc4b99b0d9b077d21, + 0xd09033887d09b4d2, + 0x4a86d8ebb7fdf52a, + 0xbe7ce44dd084e05d, + 0x4ed25f7ebe6c44b3, + 0xd7f8e3ef00255961, + 0xa1ad2ad61580ef78, + 0x19e70d3618ca3, + ]; + + /// COFACTOR^(-1) mod r = + /// 45586359457219724873147353901735745013467692594291916855200979604570630929674383405372210802279573887880950375598 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "45586359457219724873147353901735745013467692594291916855200979604570630929674383405372210802279573887880950375598"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); +} + +const G2_GENERATOR_X: Fq3 = + field_new!(Fq3, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1, G2_GENERATOR_X_C2); +const G2_GENERATOR_Y: Fq3 = + field_new!(Fq3, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1, G2_GENERATOR_Y_C2); + +/// G2_GENERATOR_X_C0 = +/// 13426761183630949215425595811885033211332897733228446437546263564078445562454176776915160094418980045665397361295624472103734543457352048745726512354895954850428989867542989474136256025045975283415690491751906307188562464175510373683338 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C0: Fq = field_new!(Fq, "13426761183630949215425595811885033211332897733228446437546263564078445562454176776915160094418980045665397361295624472103734543457352048745726512354895954850428989867542989474136256025045975283415690491751906307188562464175510373683338"); + +/// G2_GENERATOR_X_C1 = +/// 20471601555918880743198170952645906008198510944268658573129351735028343217532386920456705632337352161031960990613816401042894531220068552819818037605513359562118363589199569321421558696125646867661360498323171027455638052943806292028610 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C1: Fq = field_new!(Fq, "20471601555918880743198170952645906008198510944268658573129351735028343217532386920456705632337352161031960990613816401042894531220068552819818037605513359562118363589199569321421558696125646867661360498323171027455638052943806292028610"); + +/// G2_GENERATOR_X_C2 = +/// 3905053196875761830053608605277158152930144841844497593936739534395003062685449846381431331169369910535935138116320442345524758217411779027270883193856999691582831339845600938304719916501940381093815781408183227875600753651697934495980 +#[rustfmt::skip] +pub const G2_GENERATOR_X_C2: Fq = field_new!(Fq, "3905053196875761830053608605277158152930144841844497593936739534395003062685449846381431331169369910535935138116320442345524758217411779027270883193856999691582831339845600938304719916501940381093815781408183227875600753651697934495980"); + +/// G2_GENERATOR_Y_C0 = +/// 8567517639523571619872938228644013584947463594196306323477160496987712111576624702939472765993995586889532559039169098780892505598589581147768095093536988446010255611523736706017580686335404469207486594272103717837888228343074699140243 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C0: Fq = field_new!(Fq, "8567517639523571619872938228644013584947463594196306323477160496987712111576624702939472765993995586889532559039169098780892505598589581147768095093536988446010255611523736706017580686335404469207486594272103717837888228343074699140243"); + +/// G2_GENERATOR_Y_C1 = +/// 3890537069205870914984502594450293167889863914413852788876350245583932846980126025043974070704295857226211547108005650399870458089721518559480870503159804530091559886149680718531004778697982910253701559194337987238111062202037698927752 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C1: Fq = field_new!(Fq, "3890537069205870914984502594450293167889863914413852788876350245583932846980126025043974070704295857226211547108005650399870458089721518559480870503159804530091559886149680718531004778697982910253701559194337987238111062202037698927752"); + +/// G2_GENERATOR_Y_C2 = +/// 10936269922612615564271188303104593362724754284143779051599749016735041389483971486958818324356025479751246744831831158558101688599198721653921723013062333636402617118847009085485166284126970598561393411916461254016145116183331671450721 +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C2: Fq = field_new!(Fq, "10936269922612615564271188303104593362724754284143779051599749016735041389483971486958818324356025479751246744831831158558101688599198721653921723013062333636402617118847009085485166284126970598561393411916461254016145116183331671450721"); diff --git a/arkworks/curves/cp6_782/src/curves/mod.rs b/arkworks/curves/cp6_782/src/curves/mod.rs new file mode 100644 index 00000000..186bb858 --- /dev/null +++ b/arkworks/curves/cp6_782/src/curves/mod.rs @@ -0,0 +1,217 @@ +use ark_ec::{models::SWModelParameters, PairingEngine}; +use ark_ff::{ + biginteger::BigInteger832, + field_new, + fields::{BitIteratorBE, Field}, + One, +}; + +use crate::{Fq, Fq3, Fq6, Fr, FQ_ONE, FQ_ZERO}; + +pub mod g1; +pub use self::g1::{G1Affine, G1Projective}; + +pub mod g2; +pub use self::g2::{G2Affine, G2Projective}; + +#[cfg(test)] +mod tests; + +pub type GT = Fq6; + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct CP6_782; + +impl PairingEngine for CP6_782 { + type Fr = Fr; + type G1Projective = G1Projective; + type G1Affine = G1Affine; + type G1Prepared = G1Affine; + type G2Projective = G2Projective; + type G2Affine = G2Affine; + type G2Prepared = G2Affine; + type Fq = Fq; + type Fqe = Fq3; + type Fqk = Fq6; + + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where + I: IntoIterator, + { + let mut result = Self::Fqk::one(); + for &(ref p, ref q) in i { + result *= &CP6_782::ate_miller_loop(p, q); + } + result + } + + fn final_exponentiation(r: &Self::Fqk) -> Option { + Some(CP6_782::final_exponentiation(r)) + } +} + +impl CP6_782 { + pub fn ate_pairing(p: &G1Affine, q: &G2Affine) -> GT { + CP6_782::final_exponentiation(&CP6_782::ate_miller_loop(p, q)) + } + + fn ate_miller_loop(p: &G1Affine, q: &G2Affine) -> Fq6 { + let px = p.x; + let py = p.y; + let qx = q.x; + let qy = q.y; + let mut py_twist_squared = TWIST.square(); + py_twist_squared.mul_assign_by_fp(&py); + + let mut old_rx; + let mut old_ry; + let mut rx = qx; + let mut ry = qy; + let mut f = Fq6::one(); + + // The for loop is executed for all bits (EXCEPT the MSB itself) of + // cp6_782_param_p (skipping leading zeros) in MSB to LSB order + for bit in BitIteratorBE::without_leading_zeros(ATE_LOOP_COUNT).skip(1) { + old_rx = rx; + old_ry = ry; + + let old_rx_square = old_rx.square(); + let old_rx_square_3 = old_rx_square.double() + &old_rx_square; + let old_rx_square_3_a = old_rx_square_3 + &g2::Parameters::COEFF_A; + let old_ry_double_inverse = old_ry.double().inverse().unwrap(); + + let gamma = old_rx_square_3_a * &old_ry_double_inverse; + let gamma_twist = gamma * &TWIST; + let gamma_old_rx = gamma * &old_rx; + let mut gamma_twist_px = gamma_twist; + gamma_twist_px.mul_assign_by_fp(&px); + + let x = py_twist_squared; + let y = gamma_old_rx - &old_ry - &gamma_twist_px; + let ell_rr_at_p = Fq6::new(x, y); + + rx = gamma.square() - &old_rx.double(); + ry = gamma * &(old_rx - &rx) - &old_ry; + f = f.square() * &ell_rr_at_p; + + if bit { + old_rx = rx; + old_ry = ry; + + let gamma = (old_ry - &qy) * &((old_rx - &qx).inverse().unwrap()); + let gamma_twist = gamma * &TWIST; + let gamma_qx = gamma * &qx; + let mut gamma_twist_px = gamma_twist; + gamma_twist_px.mul_assign_by_fp(&px); + + let x = py_twist_squared; + let y = gamma_qx - &qy - &gamma_twist_px; + let ell_rq_at_p = Fq6::new(x, y); + + rx = gamma.square() - &old_rx - &qx; + ry = gamma * &(old_rx - &rx) - &old_ry; + f = f * &ell_rq_at_p; + } + } + f + } + + fn final_exponentiation(value: &Fq6) -> GT { + let value_inv = value.inverse().unwrap(); + let value_to_first_chunk = CP6_782::final_exponentiation_first(value, &value_inv); + let value_inv_to_first_chunk = CP6_782::final_exponentiation_first(&value_inv, value); + CP6_782::final_exponentiation_last(&value_to_first_chunk, &value_inv_to_first_chunk) + } + + fn final_exponentiation_first(elt: &Fq6, elt_inv: &Fq6) -> Fq6 { + // (q^3-1)*(q+1) + + // elt_q3 = elt^(q^3) + let mut elt_q3 = elt.clone(); + elt_q3.frobenius_map(3); + // elt_q3_over_elt = elt^(q^3-1) + let elt_q3_over_elt = elt_q3 * elt_inv; + // alpha = elt^((q^3-1) * q) + let mut alpha = elt_q3_over_elt.clone(); + alpha.frobenius_map(1); + // beta = elt^((q^3-1)*(q+1) + alpha * &elt_q3_over_elt + } + + fn final_exponentiation_last(elt: &Fq6, elt_inv: &Fq6) -> Fq6 { + let mut elt_q = elt.clone(); + elt_q.frobenius_map(1); + + let w1_part = elt_q.cyclotomic_exp(&FINAL_EXPONENT_LAST_CHUNK_W1); + let w0_part = if FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG { + elt_inv.cyclotomic_exp(&FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + } else { + elt.cyclotomic_exp(&FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0) + }; + + w1_part * &w0_part + } +} + +/// TWIST = (0, 1, 0) +pub const TWIST: Fq3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); + +/// ATE_IS_LOOP_COUNT_NEG = false +pub const ATE_IS_LOOP_COUNT_NEG: bool = false; + +/// ATE_LOOP_COUNT = +/// 506464946133393486072777102926336625944849939610982267859828541006717966526573193706126370441346337661774335955699621 +pub const ATE_LOOP_COUNT: [u64; 13] = [ + 0x55c5b9b57b942ae8, + 0x3d52287d3dfd424a, + 0xcf1ff9d6a543deb7, + 0x820c9c5711ceeebc, + 0x549a2d44305d20fe, + 0x50f5c131afd70235, + 0xab3596c8617c5792, + 0x830c728d80f9d78b, + 0x6a7223ee72023d07, + 0xbc5d176b746af026, + 0xe959283d8f526663, + 0xc4d2263babf8941f, + 0x3848, +]; + +/// FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG = true +pub const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool = true; + +/// FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0 = +/// 7000705447348627246181409558336018323010329260726930841638672011287206690002601216854775649561085256265269640040570922609783227469279331691880282815325569032149343779036142830666859805506518426649197067288711084398033 +pub const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: BigInteger832 = BigInteger832([ + 0xb62ef36af72855d1, + 0x676b5cef49d290fa, + 0xd17fcf3c60947427, + 0x5b93d992bc1b2849, + 0x2171887cecd072cb, + 0x879a2873f1516f4a, + 0x8cc6856bd2cdf24e, + 0xbff4fb6644d01993, + 0x5dcbeea3e31ea667, + 0x5f256f47681649f3, + 0x2355a2b0839967fe, + 0x144ed, + 0x0, +]); + +/// FINAL_EXPONENT_LAST_CHUNK_W1 = +/// 86482221941698704497288378992285180119495364068003923046442785886272123124361700722982503222189455144364945735564951562986 +pub const FINAL_EXPONENT_LAST_CHUNK_W1: BigInteger832 = BigInteger832([ + 0x5657b9b57b942aea, + 0x84f9a65f3bd54eaf, + 0x5ea4214e35cd127, + 0xe3cbcbc14ec1501d, + 0xf196cb845a3092ab, + 0x7e14627ad0e19017, + 0x217db4, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0, +]); diff --git a/arkworks/curves/cp6_782/src/curves/tests.rs b/arkworks/curves/cp6_782/src/curves/tests.rs new file mode 100644 index 00000000..25b22786 --- /dev/null +++ b/arkworks/curves/cp6_782/src/curves/tests.rs @@ -0,0 +1,78 @@ +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, One, PrimeField}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let sa = a.mul(s.into_repr()); + let sb = b.mul(s.into_repr()); + + let ans1 = CP6_782::pairing(sa, b); + let ans2 = CP6_782::pairing(a, sb); + let ans3 = CP6_782::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq6::one()); + assert_ne!(ans2, Fq6::one()); + assert_ne!(ans3, Fq6::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq6::one()); +} diff --git a/arkworks/curves/cp6_782/src/fields/fq.rs b/arkworks/curves/cp6_782/src/fields/fq.rs new file mode 100644 index 00000000..1962c95c --- /dev/null +++ b/arkworks/curves/cp6_782/src/fields/fq.rs @@ -0,0 +1,169 @@ +use ark_ff::{ + biginteger::BigInteger832 as BigInteger, + fields::{FftParameters, Fp832, Fp832Parameters, FpParameters}, +}; + +pub type Fq = Fp832; + +pub struct FqParameters; + +pub const FQ_ONE: Fq = ark_ff::field_new!(Fq, "1"); +pub const FQ_ZERO: Fq = ark_ff::field_new!(Fq, "0"); + +impl Fp832Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 3; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 18044746167194862600u64, + 63590321303744709u64, + 5009346151370959890u64, + 2859114157767503991u64, + 8301813204852325413u64, + 5629414263664332594u64, + 2637340888701394641u64, + 17433538052687852753u64, + 2230763098934759248u64, + 3785382115983092023u64, + 8895511354022222370u64, + 15792083141709071785u64, + 1328u64, + ]); +} +impl FpParameters for FqParameters { + /// MODULUS = 22369874298875696930346742206501054934775599465297184582183496627646774052458024540232479018147881220178054575403841904557897715222633333372134756426301062487682326574958588001132586331462553235407484089304633076250782629492557320825577 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xdace79b57b942ae9, + 0x545d85c16dfd424a, + 0xee135c065f4d26b7, + 0x9c2f764a12c4024b, + 0x1ad533049cfe6a39, + 0x52a3fb77c79c1320, + 0xab3596c8617c5792, + 0x830c728d80f9d78b, + 0x6a7223ee72023d07, + 0xbc5d176b746af026, + 0xe959283d8f526663, + 0xc4d2263babf8941f, + 0x3848, + ]); + + const MODULUS_BITS: u32 = 782; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 50; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 11190988450819017841u64, + 16170411717126802030u64, + 2265463223430229059u64, + 16946880912571045974u64, + 11155248462028513229u64, + 12855672356664541314u64, + 8489376931127408159u64, + 2655797810825538098u64, + 9648483887143916718u64, + 17514963461276738952u64, + 16777089214204267338u64, + 15649035958020076168u64, + 8659u64, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 13983406830510863714u64, + 17863856572171232656u64, + 1698388424046564526u64, + 1773634430448388392u64, + 8684647957094413275u64, + 3992637317298078843u64, + 18420879196616862245u64, + 3238482510270583127u64, + 7928200707794018216u64, + 10024831010452223910u64, + 9613847725664942650u64, + 15361265984156787358u64, + 7833u64, + ]); + + const INV: u64 = 14469047335842394791u64; + + /// GENERATOR = 13 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 16669393626057438558u64, + 1640520694378723217u64, + 1598646156981121135u64, + 12401834967100173388u64, + 2356467520877704673u64, + 14759118825104212161u64, + 5556628239575210651u64, + 5317520392768798654u64, + 16398429955031064995u64, + 3556102264904210145u64, + 8166834915717907988u64, + 11926665585800594452u64, + 11716u64, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x6d673cdabdca1574, + 0xaa2ec2e0b6fea125, + 0xf709ae032fa6935b, + 0xce17bb2509620125, + 0xd6a99824e7f351c, + 0x2951fdbbe3ce0990, + 0xd59acb6430be2bc9, + 0xc1863946c07cebc5, + 0x353911f739011e83, + 0xde2e8bb5ba357813, + 0xf4ac941ec7a93331, + 0x6269131dd5fc4a0f, + 0x1c24, + ]); + + // (T - 1)/2 = + // 1398117143679731058146671387906315933423474966581074036386468539227923378278626533764529938634242576261128410962740119034868607201414583335758422276643816405480145410934911750070786645716409577212967755581539567265673914343284832551598 + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xadace79b57b942ae, + 0x7545d85c16dfd424, + 0xbee135c065f4d26b, + 0x99c2f764a12c4024, + 0x1ad533049cfe6a3, + 0x252a3fb77c79c132, + 0xbab3596c8617c579, + 0x7830c728d80f9d78, + 0x66a7223ee72023d0, + 0x3bc5d176b746af02, + 0xfe959283d8f52666, + 0x8c4d2263babf8941, + 0x384, + ]); + + // T = + // 2796234287359462116293342775812631866846949933162148072772937078455846756557253067529059877268485152522256821925480238069737214402829166671516844553287632810960290821869823500141573291432819154425935511163079134531347828686569665103197 + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0x5b59cf36af72855d, + 0xea8bb0b82dbfa849, + 0x7dc26b80cbe9a4d6, + 0x3385eec942588049, + 0x35aa660939fcd47, + 0x4a547f6ef8f38264, + 0x7566b2d90c2f8af2, + 0xf0618e51b01f3af1, + 0xcd4e447dce4047a0, + 0x778ba2ed6e8d5e04, + 0xfd2b2507b1ea4ccc, + 0x189a44c7757f1283, + 0x709, + ]); +} diff --git a/arkworks/curves/cp6_782/src/fields/fq3.rs b/arkworks/curves/cp6_782/src/fields/fq3.rs new file mode 100644 index 00000000..eb242870 --- /dev/null +++ b/arkworks/curves/cp6_782/src/fields/fq3.rs @@ -0,0 +1,91 @@ +use crate::{fields::FQ_ZERO, Fq}; +use ark_ff::{ + field_new, + fields::fp3::{Fp3, Fp3Parameters}, + Field, +}; + +pub type Fq3 = Fp3; + +pub struct Fq3Parameters; + +impl Fp3Parameters for Fq3Parameters { + type Fp = Fq; + + /// NONRESIDUE = 13 + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "13"); + + const TWO_ADICITY: u32 = 3; + + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: &'static [u64] = &[ + 0x62730e2cd2029617, + 0x660647f735cb88cf, + 0x274359d60784f69d, + 0x83067194eb102629, + 0x54ea4a12a9381160, + 0xade0b24e398dac25, + 0xb476ae9f927e81cb, + 0x220fd4a9178adc3b, + 0x57e0cb9b0569745b, + 0xba15024addc8f52e, + 0x145b9bc116144ab6, + 0x6bc2260726e88b15, + 0x51da6bf151066474, + 0x9fd1b3190f6320cf, + 0x2097bfb7bf4167b0, + 0x27c35b1e7e628e09, + 0x94f80c9d623dd9bb, + 0x20bfa6d5bf31e7d3, + 0x19fb862c049d3a8, + 0xdf4c5efe04c0cec1, + 0x32c9a8abe9b50297, + 0x268d5c2076b44f0a, + 0x76027ec67b23ca21, + 0x248d61e0c45d270, + 0x419cd0d1d6be027e, + 0xbcd8dc3b1986ef18, + 0x73093d8719c862c2, + 0x651d60f8f9f6fcd9, + 0x8dabebe38a09b261, + 0xfa85b5a9e180cd3f, + 0x6a97fc618f319fb7, + 0xce08b93a5652a8e1, + 0x37525cbc4ba24cf9, + 0xb104c580df9d2150, + 0x1407c1bfe240a89d, + 0x34c96a73372daf9a, + 0x2b87fda171, + ]; + + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE_TO_T: (Fq, Fq, Fq) = ( + field_new!(Fq, "5759691735434357221228070840130186543101559976323700017469395641639510585333061695996665166662748527158637897523704071820491869715512532675375604262649010727161924084052120196921150869218319839231115277876207074651754402338718419191428"), + FQ_ZERO, + FQ_ZERO, + ); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C1: &'static [Fq] = &[ + field_new!(Fq, "1"), + field_new!(Fq, "2416169158604010336818399199316106389588878314690767988978701685873498866746813334102117883272276610365242925950967572554030909749205624998805208910209389668659757274773858916683688639755413288353778854399286396639505385648830027756861"), + field_new!(Fq, "19953705140271686593528343007184948545186721150606416593204794941773275185711211206130361134875604609812811649452874332003866805473427708373329547516091672819022569300184729084448897691707139947053705234905346679611277243843727293068715"), + ]; + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C2: &'static [Fq] = &[ + field_new!(Fq, "1"), + field_new!(Fq, "19953705140271686593528343007184948545186721150606416593204794941773275185711211206130361134875604609812811649452874332003866805473427708373329547516091672819022569300184729084448897691707139947053705234905346679611277243843727293068715"), + field_new!(Fq, "2416169158604010336818399199316106389588878314690767988978701685873498866746813334102117883272276610365242925950967572554030909749205624998805208910209389668659757274773858916683688639755413288353778854399286396639505385648830027756861"), + ]; + + #[inline(always)] + fn mul_fp_by_nonresidue(fe: &Self::Fp) -> Self::Fp { + let original = *fe; + let mut four_fe = fe.double(); + four_fe.double_in_place(); + let eight_fe = four_fe.double(); + eight_fe + &four_fe + &original + } +} diff --git a/arkworks/curves/cp6_782/src/fields/fq6.rs b/arkworks/curves/cp6_782/src/fields/fq6.rs new file mode 100644 index 00000000..07b25abe --- /dev/null +++ b/arkworks/curves/cp6_782/src/fields/fq6.rs @@ -0,0 +1,27 @@ +use crate::{Fq, Fq3, Fq3Parameters, FQ_ONE, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp6_2over3::{Fp6, Fp6Parameters}, +}; + +pub type Fq6 = Fp6; + +pub struct Fq6Parameters; + +impl Fp6Parameters for Fq6Parameters { + type Fp3Params = Fq3Parameters; + + /// NONRESIDUE = (0, 1, 0). + #[rustfmt::skip] + const NONRESIDUE: Fq3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C1: &'static [Fq] = &[ + field_new!(Fq, "1"), + field_new!(Fq, "2416169158604010336818399199316106389588878314690767988978701685873498866746813334102117883272276610365242925950967572554030909749205624998805208910209389668659757274773858916683688639755413288353778854399286396639505385648830027756862"), + field_new!(Fq, "2416169158604010336818399199316106389588878314690767988978701685873498866746813334102117883272276610365242925950967572554030909749205624998805208910209389668659757274773858916683688639755413288353778854399286396639505385648830027756861"), + field_new!(Fq, "22369874298875696930346742206501054934775599465297184582183496627646774052458024540232479018147881220178054575403841904557897715222633333372134756426301062487682326574958588001132586331462553235407484089304633076250782629492557320825576"), + field_new!(Fq, "19953705140271686593528343007184948545186721150606416593204794941773275185711211206130361134875604609812811649452874332003866805473427708373329547516091672819022569300184729084448897691707139947053705234905346679611277243843727293068715"), + field_new!(Fq, "19953705140271686593528343007184948545186721150606416593204794941773275185711211206130361134875604609812811649452874332003866805473427708373329547516091672819022569300184729084448897691707139947053705234905346679611277243843727293068716"), + ]; +} diff --git a/arkworks/curves/cp6_782/src/fields/fr.rs b/arkworks/curves/cp6_782/src/fields/fr.rs new file mode 100644 index 00000000..63c94bb4 --- /dev/null +++ b/arkworks/curves/cp6_782/src/fields/fr.rs @@ -0,0 +1 @@ +pub use ark_bls12_377::{Fq as Fr, FqParameters as FrParameters}; diff --git a/arkworks/curves/cp6_782/src/fields/mod.rs b/arkworks/curves/cp6_782/src/fields/mod.rs new file mode 100644 index 00000000..2da6aa67 --- /dev/null +++ b/arkworks/curves/cp6_782/src/fields/mod.rs @@ -0,0 +1,14 @@ +pub mod fr; +pub use self::fr::*; + +pub mod fq; +pub use self::fq::*; + +pub mod fq3; +pub use self::fq3::*; + +pub mod fq6; +pub use self::fq6::*; + +#[cfg(all(feature = "cp6_782", test))] +mod tests; diff --git a/arkworks/curves/cp6_782/src/fields/tests.rs b/arkworks/curves/cp6_782/src/fields/tests.rs new file mode 100644 index 00000000..9187972b --- /dev/null +++ b/arkworks/curves/cp6_782/src/fields/tests.rs @@ -0,0 +1,52 @@ +use ark_ff::{Field, PrimeField}; +use ark_serialize::{buffer_bit_byte_size, CanonicalSerialize}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); + sqrt_field_test(a); + + let byte_size = a.serialized_size(); + let (_, buffer_size) = buffer_bit_byte_size(Fq::size_in_bits()); + assert_eq!(byte_size, buffer_size); + field_serialization_test::(byte_size); +} + +#[test] +fn test_fq3() { + let mut rng = test_rng(); + let a: Fq3 = rng.gen(); + let b: Fq3 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + frobenius_test::(Fq::characteristic(), 13); +} + +#[test] +fn test_fq6() { + let mut rng = test_rng(); + let a: Fq6 = rng.gen(); + let b: Fq6 = rng.gen(); + field_test(a, b); + frobenius_test::(Fq::characteristic(), 13); +} diff --git a/arkworks/curves/cp6_782/src/lib.rs b/arkworks/curves/cp6_782/src/lib.rs new file mode 100644 index 00000000..2163fb7c --- /dev/null +++ b/arkworks/curves/cp6_782/src/lib.rs @@ -0,0 +1,33 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the CP6_782 curve generated in [\[BCGMMW20, “Zexe”\]](https://eprint.iacr.org/2018/962). +//! The name denotes that it was generated using the Cocks--Pinch method for the embedding degree 6. +//! The main feature of this curve is that the scalar field equals the base field of the BLS12_377 curve. +//! +//! Curve information: +//! * Base field: q = 22369874298875696930346742206501054934775599465297184582183496627646774052458024540232479018147881220178054575403841904557897715222633333372134756426301062487682326574958588001132586331462553235407484089304633076250782629492557320825577 +//! * Scalar field: r = 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 +//! * valuation(q - 1, 2) = 3 +//! * valuation(r - 1, 2) = 46 +//! +//! G1 curve equation: y^2 = x^3 + ax + b, where +//! * a = 5, +//! * b = 17764315118651679038286329069295091506801468118146712649886336045535808055361274148466772191243305528312843236347777260247138934336850548243151534538734724191505953341403463040067571652261229308333392040104884438208594329793895206056414, +//! +//! G2 curve equation: y^2 = x^3 + Ax + B +//! * A = Fq3(0, 0, 5) +//! * B = Fq3(7237353553714858194254855835825640240663090882935418626687402315497764195116318527743248304684159666286416318482685337633828994152723793439622384740540789612754127688659139509552568164770448654259255628317166934203899992395064470477612, 0, 0) + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/curve-benches/Cargo.toml b/arkworks/curves/curve-benches/Cargo.toml new file mode 100644 index 00000000..b16dfc0f --- /dev/null +++ b/arkworks/curves/curve-benches/Cargo.toml @@ -0,0 +1,115 @@ +[package] +name = "ark-curve-benches" +version = "0.3.0" +authors = [ + "Sean Bowe", + "Alessandro Chiesa", + "Matthew Green", + "Ian Miers", + "Pratyush Mishra", + "Howard Wu", + "Daira Hopwood" +] +description = "A benchmark library for finite fields and elliptic curves" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/algebra/" +keywords = ["cryptography", "finite-fields", "elliptic-curves", "pairing"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" +publish = false +build = "build.rs" + +################################# Dependencies ################################ + +[dependencies] +bencher = { version = "0.1.5" } + +[dev-dependencies] +ark-std = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-ff = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } + +ark-mnt4-298 = { path = "../mnt4_298" } +ark-mnt6-298 = { path = "../mnt6_298" } +ark-mnt4-753 = { path = "../mnt4_753" } +ark-mnt6-753 = { path = "../mnt6_753" } +ark-bn254 = { path = "../bn254" } +ark-bls12-377 = { path = "../bls12_377" } +ark-bls12-381 = { path = "../bls12_381" } +ark-ed-on-bls12-381 = { path = "../ed_on_bls12_381" } +ark-bw6-761 = { path = "../bw6_761" } +ark-cp6-782 = { path = "../cp6_782" } +ark-pallas = { path = "../pallas" } +ark-vesta = { path = "../vesta" } + +[features] +asm = [ "ark-ff/asm"] +parallel = [ "ark-ff/parallel", "ark-ec/parallel", ] +n_fold = [] + +[build-dependencies] +rustc_version = "0.2" + +[[bench]] +name = "bls12_377" +path = "benches/bls12_377.rs" +harness = false + +[[bench]] +name = "bls12_381" +path = "benches/bls12_381.rs" +harness = false + +[[bench]] +name = "bn254" +path = "benches/bn254.rs" +harness = false + +[[bench]] +name = "bw6_761" +path = "benches/bw6_761.rs" +harness = false + +[[bench]] +name = "cp6_782" +path = "benches/cp6_782.rs" +harness = false + +[[bench]] +name = "ed_on_bls12_381" +path = "benches/ed_on_bls12_381.rs" +harness = false + +[[bench]] +name = "mnt4_298" +path = "benches/mnt4_298.rs" +harness = false + +[[bench]] +name = "mnt6_298" +path = "benches/mnt6_298.rs" +harness = false + +[[bench]] +name = "mnt4_753" +path = "benches/mnt4_753.rs" +harness = false + +[[bench]] +name = "mnt6_753" +path = "benches/mnt6_753.rs" +harness = false + +[[bench]] +name = "pallas" +path = "benches/pallas.rs" +harness = false + +[[bench]] +name = "vesta" +path = "benches/vesta.rs" +harness = false diff --git a/arkworks/curves/curve-benches/LICENSE-APACHE b/arkworks/curves/curve-benches/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/curves/curve-benches/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/curves/curve-benches/LICENSE-MIT b/arkworks/curves/curve-benches/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/curves/curve-benches/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/curves/curve-benches/benches/bls12_377.rs b/arkworks/curves/curve-benches/benches/bls12_377.rs new file mode 100644 index 00000000..96d19867 --- /dev/null +++ b/arkworks/curves/curve-benches/benches/bls12_377.rs @@ -0,0 +1,30 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_bls12_377::{ + fq::Fq, fq2::Fq2, fr::Fr, Bls12_377, Fq12, G1Affine, G1Projective as G1, G2Affine, + G2Projective as G2, +}; +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::{BigInteger256 as FrRepr, BigInteger384 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +f_bench!(extension, Fq2, Fq2, fq2); +f_bench!(target, Fq12, Fq12, fq12); + +pairing_bench!(Bls12_377, Fq12); + +bencher::benchmark_main!(fq, fr, fq2, fq12, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/bls12_381.rs b/arkworks/curves/curve-benches/benches/bls12_381.rs new file mode 100644 index 00000000..080bff05 --- /dev/null +++ b/arkworks/curves/curve-benches/benches/bls12_381.rs @@ -0,0 +1,30 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_bls12_381::{ + fq::Fq, fq2::Fq2, fr::Fr, Bls12_381, Fq12, G1Affine, G1Projective as G1, G2Affine, + G2Projective as G2, +}; +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::{BigInteger256 as FrRepr, BigInteger384 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +f_bench!(extension, Fq2, Fq2, fq2); +f_bench!(target, Fq12, Fq12, fq12); + +pairing_bench!(Bls12_381, Fq12); + +bencher::benchmark_main!(fq, fr, fq2, fq12, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/bn254.rs b/arkworks/curves/curve-benches/benches/bn254.rs new file mode 100644 index 00000000..de9249a0 --- /dev/null +++ b/arkworks/curves/curve-benches/benches/bn254.rs @@ -0,0 +1,30 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_bn254::{ + fq::Fq, fq2::Fq2, fr::Fr, Bn254, Fq12, G1Affine, G1Projective as G1, G2Affine, + G2Projective as G2, +}; +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::{BigInteger256 as FrRepr, BigInteger256 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +f_bench!(extension, Fq2, Fq2, fq2); +f_bench!(target, Fq12, Fq12, fq12); + +pairing_bench!(Bn254, Fq12); + +bencher::benchmark_main!(fq, fr, fq2, fq12, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/bw6_761.rs b/arkworks/curves/curve-benches/benches/bw6_761.rs new file mode 100644 index 00000000..311875ce --- /dev/null +++ b/arkworks/curves/curve-benches/benches/bw6_761.rs @@ -0,0 +1,29 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_bw6_761::{ + fq::Fq, fq3::Fq3, fr::Fr, Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + BW6_761, +}; +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::{BigInteger384 as FrRepr, BigInteger768 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(extension, Fq3, Fq3, fq3); +f_bench!(target, Fq6, Fq6, fq6); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +pairing_bench!(BW6_761, Fq6); + +bencher::benchmark_main!(fq, fr, fq3, fq6, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/cp6_782.rs b/arkworks/curves/curve-benches/benches/cp6_782.rs new file mode 100644 index 00000000..290563f8 --- /dev/null +++ b/arkworks/curves/curve-benches/benches/cp6_782.rs @@ -0,0 +1,29 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_cp6_782::{ + fq::Fq, fq3::Fq3, fr::Fr, Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + CP6_782, +}; +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::{BigInteger384 as FrRepr, BigInteger832 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(extension, Fq3, Fq3, fq3); +f_bench!(target, Fq6, Fq6, fq6); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); +pairing_bench!(CP6_782, Fq6); + +bencher::benchmark_main!(fq, fr, fq3, fq6, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/ed_on_bls12_381.rs b/arkworks/curves/curve-benches/benches/ed_on_bls12_381.rs new file mode 100644 index 00000000..c3a58ba8 --- /dev/null +++ b/arkworks/curves/curve-benches/benches/ed_on_bls12_381.rs @@ -0,0 +1,19 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_ec::ProjectiveCurve; +use ark_ed_on_bls12_381::{fq::Fq, fr::Fr, EdwardsAffine as GAffine, EdwardsProjective as G}; +use ark_ff::{ + biginteger::{BigInteger256 as FrRepr, BigInteger256 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; + +mod g { + use super::*; + ec_bench!(G, GAffine); +} + +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); + +bencher::benchmark_main!(fq, fr, g::group_ops); diff --git a/arkworks/curves/curve-benches/benches/mnt4_298.rs b/arkworks/curves/curve-benches/benches/mnt4_298.rs new file mode 100644 index 00000000..947f17bc --- /dev/null +++ b/arkworks/curves/curve-benches/benches/mnt4_298.rs @@ -0,0 +1,29 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::BigInteger320 as FqRepr, BigInteger, Field, PrimeField, SquareRootField, + UniformRand, +}; +use ark_mnt4_298::{ + fq::Fq, fq2::Fq2, fr::Fr, Fq4, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + MNT4_298, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(extension, Fq2, Fq2, fq2); +f_bench!(target, Fq4, Fq4, fq4); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FqRepr, FqRepr, fr); +pairing_bench!(MNT4_298, Fq4); + +bencher::benchmark_main!(fq, fr, fq2, fq4, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/mnt4_753.rs b/arkworks/curves/curve-benches/benches/mnt4_753.rs new file mode 100644 index 00000000..1d15bb25 --- /dev/null +++ b/arkworks/curves/curve-benches/benches/mnt4_753.rs @@ -0,0 +1,29 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::BigInteger768 as FqRepr, BigInteger, Field, PrimeField, SquareRootField, + UniformRand, +}; +use ark_mnt4_753::{ + fq::Fq, fq2::Fq2, fr::Fr, Fq4, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + MNT4_753, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(extension, Fq2, Fq2, fq2); +f_bench!(target, Fq4, Fq4, fq4); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FqRepr, FqRepr, fr); +pairing_bench!(MNT4_753, Fq4); + +bencher::benchmark_main!(fq, fr, fq2, fq4, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/mnt6_298.rs b/arkworks/curves/curve-benches/benches/mnt6_298.rs new file mode 100644 index 00000000..006d7970 --- /dev/null +++ b/arkworks/curves/curve-benches/benches/mnt6_298.rs @@ -0,0 +1,29 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::BigInteger320 as FqRepr, BigInteger, Field, PrimeField, SquareRootField, + UniformRand, +}; +use ark_mnt6_298::{ + fq::Fq, fq3::Fq3, fr::Fr, Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + MNT6_298, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(extension, Fq3, Fq3, fq3); +f_bench!(target, Fq6, Fq6, fq6); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FqRepr, FqRepr, fr); +pairing_bench!(MNT6_298, Fq6); + +bencher::benchmark_main!(fq, fr, fq3, fq6, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/mnt6_753.rs b/arkworks/curves/curve-benches/benches/mnt6_753.rs new file mode 100644 index 00000000..3310fd0c --- /dev/null +++ b/arkworks/curves/curve-benches/benches/mnt6_753.rs @@ -0,0 +1,29 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{ + biginteger::BigInteger768 as FqRepr, BigInteger, Field, PrimeField, SquareRootField, + UniformRand, +}; +use ark_mnt6_753::{ + fq::Fq, fq3::Fq3, fr::Fr, Fq6, G1Affine, G1Projective as G1, G2Affine, G2Projective as G2, + MNT6_753, +}; + +mod g1 { + use super::*; + ec_bench!(G1, G1Affine); +} +mod g2 { + use super::*; + ec_bench!(G2, G2Affine); +} + +f_bench!(extension, Fq3, Fq3, fq3); +f_bench!(target, Fq6, Fq6, fq6); +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FqRepr, FqRepr, fr); +pairing_bench!(MNT6_753, Fq6); + +bencher::benchmark_main!(fq, fr, fq3, fq6, g1::group_ops, g2::group_ops, pairing); diff --git a/arkworks/curves/curve-benches/benches/pallas.rs b/arkworks/curves/curve-benches/benches/pallas.rs new file mode 100644 index 00000000..adbb392c --- /dev/null +++ b/arkworks/curves/curve-benches/benches/pallas.rs @@ -0,0 +1,19 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_ec::ProjectiveCurve; +use ark_ff::{ + biginteger::{BigInteger256 as FrRepr, BigInteger256 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; +use ark_pallas::{fq::Fq, fr::Fr, Affine as GAffine, Projective as G}; + +mod g { + use super::*; + ec_bench!(G, GAffine); +} + +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); + +bencher::benchmark_main!(fq, fr, g::group_ops); diff --git a/arkworks/curves/curve-benches/benches/vesta.rs b/arkworks/curves/curve-benches/benches/vesta.rs new file mode 100644 index 00000000..11a0fd4a --- /dev/null +++ b/arkworks/curves/curve-benches/benches/vesta.rs @@ -0,0 +1,19 @@ +use ark_curve_benches::*; +use ark_std::ops::{AddAssign, MulAssign, SubAssign}; + +use ark_ec::ProjectiveCurve; +use ark_ff::{ + biginteger::{BigInteger256 as FrRepr, BigInteger256 as FqRepr}, + BigInteger, Field, PrimeField, SquareRootField, UniformRand, +}; +use ark_vesta::{fq::Fq, fr::Fr, Affine as GAffine, Projective as G}; + +mod g { + use super::*; + ec_bench!(G, GAffine); +} + +f_bench!(Fq, Fq, FqRepr, FqRepr, fq); +f_bench!(Fr, Fr, FrRepr, FrRepr, fr); + +bencher::benchmark_main!(fq, fr, g::group_ops); diff --git a/arkworks/curves/curve-benches/build.rs b/arkworks/curves/curve-benches/build.rs new file mode 100644 index 00000000..f71f6fbd --- /dev/null +++ b/arkworks/curves/curve-benches/build.rs @@ -0,0 +1,9 @@ +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if version_meta().expect("nightly check failed").channel == Channel::Nightly { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/arkworks/curves/curve-benches/src/lib.rs b/arkworks/curves/curve-benches/src/lib.rs new file mode 100644 index 00000000..faa053b3 --- /dev/null +++ b/arkworks/curves/curve-benches/src/lib.rs @@ -0,0 +1,8 @@ +#![allow(unused_macros, unused_imports)] +#[macro_use] +pub mod macros; +pub use macros::*; + +#[macro_use] +pub extern crate bencher; +pub use bencher::*; diff --git a/arkworks/curves/curve-benches/src/macros/ec.rs b/arkworks/curves/curve-benches/src/macros/ec.rs new file mode 100644 index 00000000..3b402111 --- /dev/null +++ b/arkworks/curves/curve-benches/src/macros/ec.rs @@ -0,0 +1,230 @@ +#[macro_export] +macro_rules! ec_bench { + ($projective:ty, $affine:ty) => { + fn rand(b: &mut $crate::bencher::Bencher) { + let mut rng = ark_std::test_rng(); + b.iter(|| <$projective>::rand(&mut rng)); + } + + fn mul_assign(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<($projective, Fr)> = (0..SAMPLES) + .map(|_| (<$projective>::rand(&mut rng), Fr::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + tmp *= v[count].1; + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn add_assign(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<($projective, $projective)> = (0..SAMPLES) + .map(|_| (<$projective>::rand(&mut rng), <$projective>::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn sub_assign(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<($projective, $projective)> = (0..SAMPLES) + .map(|_| (<$projective>::rand(&mut rng), <$projective>::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, sub_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn double(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$projective> = (0..SAMPLES) + .map(|_| <$projective>::rand(&mut rng)) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, double_in_place); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn add_assign_mixed(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<($projective, $affine)> = (0..SAMPLES) + .map(|_| { + ( + <$projective>::rand(&mut rng), + <$projective>::rand(&mut rng).into(), + ) + }) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_assign_mixed, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn deser(b: &mut $crate::bencher::Bencher) { + use ark_ec::ProjectiveCurve; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let mut num_bytes = 0; + let tmp = <$projective>::rand(&mut rng).into_affine(); + let v: Vec<_> = (0..SAMPLES) + .flat_map(|_| { + let mut bytes = Vec::with_capacity(1000); + tmp.serialize(&mut bytes).unwrap(); + num_bytes = bytes.len(); + bytes + }) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + let index = count * num_bytes; + <$affine>::deserialize(&v[index..(index + num_bytes)]).unwrap() + }); + } + + fn ser(b: &mut $crate::bencher::Bencher) { + use ark_ec::ProjectiveCurve; + use ark_serialize::CanonicalSerialize; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let mut v: Vec<_> = (0..SAMPLES) + .map(|_| <$projective>::rand(&mut rng)) + .collect(); + let v = <$projective>::batch_normalization_into_affine(v.as_mut_slice()); + let mut bytes = Vec::with_capacity(1000); + + let mut count = 0; + b.iter(|| { + let tmp = v[count]; + count = (count + 1) % SAMPLES; + bytes.clear(); + tmp.serialize(&mut bytes) + }); + } + + fn deser_unchecked(b: &mut $crate::bencher::Bencher) { + use ark_ec::ProjectiveCurve; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let mut num_bytes = 0; + let tmp = <$projective>::rand(&mut rng).into_affine(); + let v: Vec<_> = (0..SAMPLES) + .flat_map(|_| { + let mut bytes = Vec::with_capacity(1000); + tmp.serialize_unchecked(&mut bytes).unwrap(); + num_bytes = bytes.len(); + bytes + }) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + let index = count * num_bytes; + <$affine>::deserialize_unchecked(&v[index..(index + num_bytes)]).unwrap() + }); + } + + fn ser_unchecked(b: &mut $crate::bencher::Bencher) { + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let mut v: Vec<_> = (0..SAMPLES) + .map(|_| <$projective>::rand(&mut rng)) + .collect(); + let v = <$projective>::batch_normalization_into_affine(v.as_mut_slice()); + let mut bytes = Vec::with_capacity(1000); + + let mut count = 0; + b.iter(|| { + let tmp = v[count]; + count = (count + 1) % SAMPLES; + bytes.clear(); + tmp.serialize_unchecked(&mut bytes) + }); + } + + fn msm_131072(b: &mut $crate::bencher::Bencher) { + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + const SAMPLES: usize = 131072; + + let mut rng = ark_std::test_rng(); + + let g = <$projective>::rand(&mut rng).into_affine(); + let v: Vec<_> = (0..SAMPLES).map(|_| g).collect(); + let scalars: Vec<_> = (0..SAMPLES) + .map(|_| Fr::rand(&mut rng).into_repr()) + .collect(); + b.bench_n(1, |b| { + b.iter(|| ark_ec::msm::VariableBaseMSM::multi_scalar_mul(&v, &scalars)); + }) + } + + $crate::benchmark_group!( + group_ops, + rand, + mul_assign, + add_assign, + sub_assign, + add_assign_mixed, + double, + ser, + deser, + ser_unchecked, + deser_unchecked, + msm_131072, + ); + }; +} diff --git a/arkworks/curves/curve-benches/src/macros/field.rs b/arkworks/curves/curve-benches/src/macros/field.rs new file mode 100644 index 00000000..1343823b --- /dev/null +++ b/arkworks/curves/curve-benches/src/macros/field.rs @@ -0,0 +1,455 @@ +#[macro_export] +macro_rules! f_bench { + // Use this for base fields + ($f:ident, $f_type:ty, $f_repr:ident, $f_repr_type:ty, $modname:ident) => { + pub mod $modname { + use super::*; + field_common!($f, $f_type); + sqrt!($f, $f_type); + prime_field!($f, $f_type, $f_repr, $f_repr_type); + $crate::benchmark_group!( + $modname, + // common stuff + add_assign, + sub_assign, + double, + negate, + mul_assign, + square, + inverse, + ser, + deser, + ser_unchecked, + deser_unchecked, + // sqrt field stuff + sqrt, + // prime field stuff + repr_add_nocarry, + repr_sub_noborrow, + repr_num_bits, + repr_mul2, + repr_div2, + into_repr, + from_repr, + ); + } + use $modname::$modname; + }; + // use this for intermediate fields + (extension, $f:ident, $f_type:ty, $modname:ident) => { + mod $modname { + use super::*; + field_common!($f, $f_type); + sqrt!($f, $f_type); + $crate::benchmark_group!( + $modname, + // common stuff + add_assign, + sub_assign, + double, + negate, + mul_assign, + square, + inverse, + ser, + deser, + ser_unchecked, + deser_unchecked, + // sqrt field stuff + sqrt, + ); + } + use $modname::$modname; + }; + // Use this for the full extension field Fqk + (target, $f:ident, $f_type:ty, $modname:ident) => { + mod $modname { + use super::*; + field_common!($f, $f_type); + $crate::benchmark_group!( + $modname, + // common stuff + add_assign, + sub_assign, + double, + negate, + mul_assign, + square, + inverse, + ser, + deser, + ser_unchecked, + deser_unchecked, + ); + } + use $modname::$modname; + }; +} + +#[macro_export] +macro_rules! field_common { + ($f:ident, $f_type:ty) => { + fn add_assign(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| ($f::rand(&mut rng), $f::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn sub_assign(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| ($f::rand(&mut rng), $f::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, sub_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn double(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, double_in_place); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn negate(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + tmp = -tmp; + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn mul_assign(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| ($f::rand(&mut rng), $f::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, mul_assign, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn square(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, square_in_place); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn inverse(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let tmp = v[count].inverse(); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn deser(b: &mut $crate::bencher::Bencher) { + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let mut num_bytes = 0; + let v: Vec<_> = (0..SAMPLES) + .flat_map(|_| { + let mut bytes = Vec::with_capacity(1000); + let tmp = $f::rand(&mut rng); + tmp.serialize(&mut bytes).unwrap(); + num_bytes = bytes.len(); + bytes + }) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + let index = count * num_bytes; + <$f_type>::deserialize(&v[index..(index + num_bytes)]).unwrap() + }); + } + + fn ser(b: &mut $crate::bencher::Bencher) { + use ark_serialize::CanonicalSerialize; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + let mut bytes = Vec::with_capacity(1000); + + let mut count = 0; + b.iter(|| { + let tmp = v[count]; + count = (count + 1) % SAMPLES; + bytes.clear(); + tmp.serialize(&mut bytes) + }); + } + + fn deser_unchecked(b: &mut $crate::bencher::Bencher) { + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let mut num_bytes = 0; + let v: Vec<_> = (0..SAMPLES) + .flat_map(|_| { + let mut bytes = Vec::with_capacity(1000); + let tmp = $f::rand(&mut rng); + tmp.serialize_unchecked(&mut bytes).unwrap(); + num_bytes = bytes.len(); + bytes + }) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + let index = count * num_bytes; + <$f_type>::deserialize_unchecked(&v[index..(index + num_bytes)]).unwrap() + }); + } + + fn ser_unchecked(b: &mut $crate::bencher::Bencher) { + use ark_serialize::CanonicalSerialize; + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + let mut bytes = Vec::with_capacity(1000); + + let mut count = 0; + b.iter(|| { + let tmp = v[count]; + count = (count + 1) % SAMPLES; + bytes.clear(); + tmp.serialize_unchecked(&mut bytes) + }); + } + }; +} + +#[macro_export] +macro_rules! sqrt { + ($f:ident, $f_type:ty) => { + pub fn sqrt(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES) + .map(|_| { + let mut tmp = $f::rand(&mut rng); + tmp.square_in_place(); + tmp + }) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + v[count].sqrt() + }); + } + }; +} + +#[macro_export] +macro_rules! prime_field { + ($f:ident, $f_type:ty, $f_repr:ident, $f_repr_type:ty) => { + fn repr_add_nocarry(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| { + let mut tmp1 = $f_repr::rand(&mut rng); + let mut tmp2 = $f_repr::rand(&mut rng); + // Shave a few bits off to avoid overflow. + for _ in 0..3 { + tmp1.div2(); + tmp2.div2(); + } + (tmp1, tmp2) + }) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, add_nocarry, count); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn repr_sub_noborrow(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| { + let tmp1 = $f_repr::rand(&mut rng); + let mut tmp2 = tmp1; + // Ensure tmp2 is smaller than tmp1. + for _ in 0..10 { + tmp2.div2(); + } + (tmp1, tmp2) + }) + .collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count].0; + n_fold!(tmp, v, sub_noborrow, count); + count = (count + 1) % SAMPLES; + tmp; + }); + } + + fn repr_num_bits(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_repr_type> = (0..SAMPLES).map(|_| $f_repr::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let tmp = v[count].num_bits(); + count = (count + 1) % SAMPLES; + tmp; + }); + } + + fn repr_mul2(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_repr_type> = (0..SAMPLES).map(|_| $f_repr::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, mul2); + count = (count + 1) % SAMPLES; + tmp; + }); + } + + fn repr_div2(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_repr_type> = (0..SAMPLES).map(|_| $f_repr::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + let mut tmp = v[count]; + n_fold!(tmp, div2); + count = (count + 1) % SAMPLES; + tmp; + }); + } + + fn into_repr(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_type> = (0..SAMPLES).map(|_| $f::rand(&mut rng)).collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + v[count].into_repr(); + }); + } + + fn from_repr(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<$f_repr_type> = (0..SAMPLES) + .map(|_| $f::rand(&mut rng).into_repr()) + .collect(); + + let mut count = 0; + b.iter(|| { + count = (count + 1) % SAMPLES; + $f::from(v[count]); + }); + } + }; +} diff --git a/arkworks/curves/curve-benches/src/macros/mod.rs b/arkworks/curves/curve-benches/src/macros/mod.rs new file mode 100644 index 00000000..07857703 --- /dev/null +++ b/arkworks/curves/curve-benches/src/macros/mod.rs @@ -0,0 +1,11 @@ +#[macro_use] +mod utils; + +#[macro_use] +mod ec; + +#[macro_use] +mod field; + +#[macro_use] +mod pairing; diff --git a/arkworks/curves/curve-benches/src/macros/pairing.rs b/arkworks/curves/curve-benches/src/macros/pairing.rs new file mode 100644 index 00000000..5bd64e75 --- /dev/null +++ b/arkworks/curves/curve-benches/src/macros/pairing.rs @@ -0,0 +1,72 @@ +#[macro_export] +macro_rules! pairing_bench { + ($curve:ident, $pairing_field:ident) => { + fn miller_loop(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let g1s = (0..SAMPLES).map(|_| G1::rand(&mut rng)).collect::>(); + let g2s = (0..SAMPLES).map(|_| G2::rand(&mut rng)).collect::>(); + let g1s = G1::batch_normalization_into_affine(&g1s); + let g2s = G2::batch_normalization_into_affine(&g2s); + let prepared = g1s + .into_iter() + .zip(g2s) + .map(|(g1, g2)| (g1.into(), g2.into())) + .collect::::G1Prepared, + <$curve as PairingEngine>::G2Prepared, + )>>(); + let mut count = 0; + b.iter(|| { + let tmp = + $curve::miller_loop(&[(prepared[count].0.clone(), prepared[count].1.clone())]); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn final_exponentiation(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<_> = (0..SAMPLES) + .map(|_| { + ( + G1Affine::from(G1::rand(&mut rng)).into(), + G2Affine::from(G2::rand(&mut rng)).into(), + ) + }) + .map(|(p, q)| $curve::miller_loop(&[(p, q)])) + .collect(); + + let mut count = 0; + b.iter(|| { + let tmp = $curve::final_exponentiation(&v[count]); + count = (count + 1) % SAMPLES; + tmp + }); + } + + fn full_pairing(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + + let mut rng = ark_std::test_rng(); + + let v: Vec<(G1, G2)> = (0..SAMPLES) + .map(|_| (G1::rand(&mut rng), G2::rand(&mut rng))) + .collect(); + + let mut count = 0; + b.iter(|| { + let tmp = $curve::pairing(v[count].0, v[count].1); + count = (count + 1) % SAMPLES; + tmp + }); + } + + $crate::benchmark_group!(pairing, miller_loop, final_exponentiation, full_pairing,); + }; +} diff --git a/arkworks/curves/curve-benches/src/macros/utils.rs b/arkworks/curves/curve-benches/src/macros/utils.rs new file mode 100644 index 00000000..2968bca7 --- /dev/null +++ b/arkworks/curves/curve-benches/src/macros/utils.rs @@ -0,0 +1,36 @@ +#[macro_export] +macro_rules! n_fold { + ($tmp:ident, $v:ident, $func:ident, $count:ident) => { + $tmp.$func(&$v[$count].1); + }; + + ($tmp:ident, $func:ident) => { + $tmp.$func(); + }; +} + +/// Defines a function called `$group_name` that returns the test description +/// values for the listed functions `$function`. +#[macro_export] +macro_rules! benchmark_group { + ($group_name:ident, $($function:path),+) => { + pub fn $group_name() -> ::std::vec::Vec<$crate::TestDescAndFn> { + use $crate::{TestDescAndFn, TestFn, TestDesc}; + use std::borrow::Cow; + let mut benches = ::std::vec::Vec::new(); + $( + benches.push(TestDescAndFn { + desc: TestDesc { + name: Cow::from(module_path!().to_string() + "::" + stringify!($function)), + ignore: false, + }, + testfn: TestFn::StaticBenchFn($function), + }); + )+ + benches + } + }; + ($group_name:ident, $($function:path,)+) => { + benchmark_group!($group_name, $($function),+); + }; +} diff --git a/arkworks/curves/curve-constraint-tests/Cargo.toml b/arkworks/curves/curve-constraint-tests/Cargo.toml new file mode 100644 index 00000000..801fd526 --- /dev/null +++ b/arkworks/curves/curve-constraint-tests/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "ark-curve-constraint-tests" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for testing constraints for finite fields, elliptic curves, and pairings" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/curves" +documentation = "https://docs.rs/ark-curve-constraint-tests/" +keywords = ["cryptography", "finite-fields", "elliptic-curves", "r1cs" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-std = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-ff = { version = "^0.3.0", default-features = false } +ark-relations = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-serialize/std", "ark-ec/std", "ark-relations/std", "ark-r1cs-std/std" ] diff --git a/arkworks/curves/curve-constraint-tests/src/lib.rs b/arkworks/curves/curve-constraint-tests/src/lib.rs new file mode 100644 index 00000000..ed2c59d3 --- /dev/null +++ b/arkworks/curves/curve-constraint-tests/src/lib.rs @@ -0,0 +1,625 @@ +#![macro_use] +extern crate ark_relations; + +pub mod fields { + use ark_ff::{BitIteratorLE, Field, UniformRand}; + use ark_r1cs_std::prelude::*; + use ark_relations::r1cs::{ConstraintSystem, SynthesisError}; + use ark_std::test_rng; + use ark_std::vec::Vec; + + pub fn field_test() -> Result<(), SynthesisError> + where + F: Field, + ConstraintF: Field, + AF: FieldVar, + AF: TwoBitLookupGadget, + for<'a> &'a AF: FieldOpsBounds<'a, F, AF>, + { + let modes = [ + AllocationMode::Input, + AllocationMode::Witness, + AllocationMode::Constant, + ]; + for &mode in &modes { + let cs = ConstraintSystem::::new_ref(); + + let mut rng = test_rng(); + let a_native = F::rand(&mut rng); + let b_native = F::rand(&mut rng); + let a = AF::new_variable(ark_relations::ns!(cs, "generate_a"), || Ok(a_native), mode)?; + let b = AF::new_variable(ark_relations::ns!(cs, "generate_b"), || Ok(b_native), mode)?; + let b_const = AF::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; + + let zero = AF::zero(); + let zero_native = zero.value()?; + zero.enforce_equal(&zero)?; + + let one = AF::one(); + let one_native = one.value()?; + one.enforce_equal(&one)?; + + one.enforce_not_equal(&zero)?; + + let one_dup = &zero + &one; + one_dup.enforce_equal(&one)?; + + let two = &one + &one; + two.enforce_equal(&two)?; + two.enforce_equal(&one.double()?)?; + two.enforce_not_equal(&one)?; + two.enforce_not_equal(&zero)?; + + // a + 0 = a + let a_plus_zero = &a + &zero; + assert_eq!(a_plus_zero.value()?, a_native); + a_plus_zero.enforce_equal(&a)?; + a_plus_zero.enforce_not_equal(&a.double()?)?; + + // a - 0 = a + let a_minus_zero = &a - &zero; + assert_eq!(a_minus_zero.value()?, a_native); + a_minus_zero.enforce_equal(&a)?; + + // a - a = 0 + let a_minus_a = &a - &a; + assert_eq!(a_minus_a.value()?, zero_native); + a_minus_a.enforce_equal(&zero)?; + + // a + b = b + a + let a_b = &a + &b; + let b_a = &b + &a; + assert_eq!(a_b.value()?, a_native + &b_native); + a_b.enforce_equal(&b_a)?; + + // (a + b) + a = a + (b + a) + let ab_a = &a_b + &a; + let a_ba = &a + &b_a; + assert_eq!(ab_a.value()?, a_native + &b_native + &a_native); + ab_a.enforce_equal(&a_ba)?; + + let b_times_a_plus_b = &a_b * &b; + let b_times_b_plus_a = &b_a * &b; + assert_eq!( + b_times_a_plus_b.value()?, + b_native * &(b_native + &a_native) + ); + assert_eq!( + b_times_a_plus_b.value()?, + (b_native + &a_native) * &b_native + ); + assert_eq!( + b_times_a_plus_b.value()?, + (a_native + &b_native) * &b_native + ); + b_times_b_plus_a.enforce_equal(&b_times_a_plus_b)?; + + // a * 1 = a + assert_eq!((&a * &one).value()?, a_native * &one_native); + + // a * b = b * a + let ab = &a * &b; + let ba = &b * &a; + assert_eq!(ab.value()?, ba.value()?); + assert_eq!(ab.value()?, a_native * &b_native); + + let ab_const = &a * &b_const; + let b_const_a = &b_const * &a; + assert_eq!(ab_const.value()?, b_const_a.value()?); + assert_eq!(ab_const.value()?, ab.value()?); + assert_eq!(ab_const.value()?, a_native * &b_native); + + // (a * b) * a = a * (b * a) + let ab_a = &ab * &a; + let a_ba = &a * &ba; + assert_eq!(ab_a.value()?, a_ba.value()?); + assert_eq!(ab_a.value()?, a_native * &b_native * &a_native); + + let aa = &a * &a; + let a_squared = a.square()?; + a_squared.enforce_equal(&aa)?; + assert_eq!(aa.value()?, a_squared.value()?); + assert_eq!(aa.value()?, a_native.square()); + + let aa = &a * a_native; + a_squared.enforce_equal(&aa)?; + assert_eq!(aa.value()?, a_squared.value()?); + assert_eq!(aa.value()?, a_native.square()); + + let a_b2 = &a + b_native; + a_b.enforce_equal(&a_b2)?; + assert_eq!(a_b.value()?, a_b2.value()?); + + let a_inv = a.inverse()?; + a_inv.mul_equals(&a, &one)?; + assert_eq!(a_inv.value()?, a.value()?.inverse().unwrap()); + assert_eq!(a_inv.value()?, a_native.inverse().unwrap()); + + let a_b_inv = a.mul_by_inverse(&b)?; + a_b_inv.mul_equals(&b, &a)?; + assert_eq!(a_b_inv.value()?, a_native * b_native.inverse().unwrap()); + + // a * a * a = a^3 + let bits = BitIteratorLE::without_trailing_zeros([3u64]) + .map(Boolean::constant) + .collect::>(); + assert_eq!(a_native.pow([0x3]), a.pow_le(&bits)?.value()?); + + // a * a * a = a^3 + assert_eq!(a_native.pow([0x3]), a.pow_by_constant(&[0x3])?.value()?); + assert!(cs.is_satisfied().unwrap()); + + let mut constants = [F::zero(); 4]; + for c in &mut constants { + *c = UniformRand::rand(&mut test_rng()); + } + let bits = [ + Boolean::::constant(false), + Boolean::constant(true), + ]; + let lookup_result = AF::two_bit_lookup(&bits, constants.as_ref())?; + assert_eq!(lookup_result.value()?, constants[2]); + assert!(cs.is_satisfied().unwrap()); + + let f = F::from(1u128 << 64); + let f_bits = ark_ff::BitIteratorLE::new(&[0u64, 1u64]).collect::>(); + let fv = AF::new_variable(ark_relations::ns!(cs, "alloc u128"), || Ok(f), mode)?; + assert_eq!(fv.to_bits_le()?.value().unwrap()[..128], f_bits[..128]); + assert!(cs.is_satisfied().unwrap()); + + let r_native: F = UniformRand::rand(&mut test_rng()); + + let r = AF::new_variable(ark_relations::ns!(cs, "r_native"), || Ok(r_native), mode) + .unwrap(); + let _ = r.to_non_unique_bits_le()?; + assert!(cs.is_satisfied().unwrap()); + let _ = r.to_bits_le()?; + assert!(cs.is_satisfied().unwrap()); + + let bytes = r.to_non_unique_bytes()?; + assert_eq!(ark_ff::to_bytes!(r_native).unwrap(), bytes.value().unwrap()); + assert!(cs.is_satisfied().unwrap()); + let bytes = r.to_bytes()?; + assert_eq!(ark_ff::to_bytes!(r_native).unwrap(), bytes.value().unwrap()); + assert!(cs.is_satisfied().unwrap()); + + let ab_false = &a + (AF::from(Boolean::Constant(false)) * b_native); + let ab_true = &a + (AF::from(Boolean::Constant(true)) * b_native); + assert_eq!(ab_false.value()?, a_native); + assert_eq!(ab_true.value()?, a_native + &b_native); + + if !cs.is_satisfied().unwrap() { + panic!( + "Unsatisfied in mode {:?}.\n{:?}", + mode, + cs.which_is_unsatisfied().unwrap() + ); + } + assert!(cs.is_satisfied().unwrap()); + } + Ok(()) + } + + pub fn frobenius_tests(maxpower: usize) -> Result<(), SynthesisError> + where + F: Field, + ConstraintF: Field, + AF: FieldVar, + for<'a> &'a AF: FieldOpsBounds<'a, F, AF>, + { + let modes = [ + AllocationMode::Input, + AllocationMode::Witness, + AllocationMode::Constant, + ]; + for &mode in &modes { + let cs = ConstraintSystem::::new_ref(); + let mut rng = test_rng(); + for i in 0..=maxpower { + let mut a = F::rand(&mut rng); + let mut a_gadget = AF::new_variable(ark_relations::ns!(cs, "a"), || Ok(a), mode)?; + a_gadget.frobenius_map_in_place(i)?; + a.frobenius_map(i); + + assert_eq!(a_gadget.value()?, a); + } + + assert!(cs.is_satisfied().unwrap()); + } + Ok(()) + } +} + +pub mod curves { + use ark_ec::{ + short_weierstrass_jacobian::GroupProjective as SWProjective, + twisted_edwards_extended::GroupProjective as TEProjective, ProjectiveCurve, + }; + use ark_ff::{BitIteratorLE, Field, FpParameters, One, PrimeField}; + use ark_relations::r1cs::{ConstraintSystem, SynthesisError}; + use ark_std::{test_rng, vec::Vec, UniformRand}; + + use ark_r1cs_std::prelude::*; + + pub fn group_test() -> Result<(), SynthesisError> + where + C: ProjectiveCurve, + ConstraintF: Field, + GG: CurveVar, + for<'a> &'a GG: GroupOpsBounds<'a, C, GG>, + { + let modes = [ + AllocationMode::Input, + AllocationMode::Witness, + AllocationMode::Constant, + ]; + for &mode in &modes { + let cs = ConstraintSystem::::new_ref(); + + let mut rng = test_rng(); + let a_native = C::rand(&mut rng); + let b_native = C::rand(&mut rng); + let a = GG::new_variable(ark_relations::ns!(cs, "generate_a"), || Ok(a_native), mode) + .unwrap(); + let b = GG::new_variable(ark_relations::ns!(cs, "generate_b"), || Ok(b_native), mode) + .unwrap(); + + let zero = GG::zero(); + assert_eq!(zero.value()?, zero.value()?); + + // a == a + assert_eq!(a.value()?, a.value()?); + // a + 0 = a + assert_eq!((&a + &zero).value()?, a.value()?); + // a - 0 = a + assert_eq!((&a - &zero).value()?, a.value()?); + // a - a = 0 + assert_eq!((&a - &a).value()?, zero.value()?); + // a + b = b + a + let a_b = &a + &b; + let b_a = &b + &a; + assert_eq!(a_b.value()?, b_a.value()?); + a_b.enforce_equal(&b_a)?; + assert!(cs.is_satisfied().unwrap()); + + // (a + b) + a = a + (b + a) + let ab_a = &a_b + &a; + let a_ba = &a + &b_a; + assert_eq!(ab_a.value()?, a_ba.value()?); + ab_a.enforce_equal(&a_ba)?; + assert!(cs.is_satisfied().unwrap()); + + // a.double() = a + a + let a_a = &a + &a; + let mut a2 = a.clone(); + a2.double_in_place()?; + a2.enforce_equal(&a_a)?; + assert_eq!(a2.value()?, a_native.double()); + assert_eq!(a_a.value()?, a_native.double()); + assert_eq!(a2.value()?, a_a.value()?); + assert!(cs.is_satisfied().unwrap()); + + // b.double() = b + b + let mut b2 = b.clone(); + b2.double_in_place()?; + let b_b = &b + &b; + b2.enforce_equal(&b_b)?; + assert!(cs.is_satisfied().unwrap()); + assert_eq!(b2.value()?, b_b.value()?); + + let _ = a.to_bytes()?; + assert!(cs.is_satisfied().unwrap()); + let _ = a.to_non_unique_bytes()?; + assert!(cs.is_satisfied().unwrap()); + + let _ = b.to_bytes()?; + let _ = b.to_non_unique_bytes()?; + if !cs.is_satisfied().unwrap() { + panic!( + "Unsatisfied in mode {:?}.\n{:?}", + mode, + cs.which_is_unsatisfied().unwrap() + ); + } + assert!(cs.is_satisfied().unwrap()); + + let modulus = ::Params::MODULUS + .as_ref() + .to_vec(); + let mut max = modulus.clone(); + for limb in &mut max { + *limb = u64::MAX; + } + + let modulus_last_limb_bits = ::Params::MODULUS_BITS % 64; + *max.last_mut().unwrap() >>= 64 - modulus_last_limb_bits; + let scalars = [ + C::ScalarField::rand(&mut rng).into_repr().as_ref().to_vec(), + vec![u64::rand(&mut rng)], + (-C::ScalarField::one()).into_repr().as_ref().to_vec(), + ::Params::MODULUS + .as_ref() + .to_vec(), + max, + vec![0; 50], + vec![1000012341233u64; 36], + ]; + + let mut input = vec![]; + + // Check scalar mul with edge cases + for scalar in scalars.iter() { + let native_result = a_native.mul(scalar); + let native_result = native_result.into_affine(); + + let scalar_bits: Vec = BitIteratorLE::new(&scalar).collect(); + input = + Vec::new_witness(ark_relations::ns!(cs, "bits"), || Ok(scalar_bits)).unwrap(); + let result = a + .scalar_mul_le(input.iter()) + .expect(&format!("Mode: {:?}", mode)); + let result_val = result.value()?.into_affine(); + assert_eq!( + result_val, native_result, + "gadget & native values are diff. after scalar mul {:?}", + scalar, + ); + assert!(cs.is_satisfied().unwrap()); + } + + let result = zero.scalar_mul_le(input.iter())?; + let result_val = result.value()?.into_affine(); + result.enforce_equal(&zero)?; + assert_eq!( + result_val, + C::zero().into_affine(), + "gadget & native values are diff. after scalar mul of zero" + ); + assert!(cs.is_satisfied().unwrap()); + } + Ok(()) + } + + pub fn sw_test() -> Result<(), SynthesisError> + where + P: ark_ec::SWModelParameters, + GG: CurveVar, ::BasePrimeField>, + for<'a> &'a GG: GroupOpsBounds<'a, SWProjective

, GG>, + { + group_test::, _, GG>()?; + let modes = [ + AllocationMode::Input, + AllocationMode::Witness, + AllocationMode::Constant, + ]; + for &mode in &modes { + use ark_ec::group::Group; + + let mut rng = test_rng(); + + let cs = ConstraintSystem::<::BasePrimeField>::new_ref(); + + let a = SWProjective::

::rand(&mut rng); + let b = SWProjective::

::rand(&mut rng); + let a_affine = a.into_affine(); + let b_affine = b.into_affine(); + + let ns = ark_relations::ns!(cs, "allocating variables"); + let mut gadget_a = GG::new_variable(cs.clone(), || Ok(a), mode)?; + let gadget_b = GG::new_variable(cs.clone(), || Ok(b), mode)?; + let zero = GG::zero(); + drop(ns); + assert_eq!(gadget_a.value()?.into_affine().x, a_affine.x); + assert_eq!(gadget_a.value()?.into_affine().y, a_affine.y); + assert_eq!(gadget_b.value()?.into_affine().x, b_affine.x); + assert_eq!(gadget_b.value()?.into_affine().y, b_affine.y); + assert_eq!(cs.which_is_unsatisfied().unwrap(), None); + + // Check addition + let ab = a + &b; + let ab_affine = ab.into_affine(); + let gadget_ab = &gadget_a + &gadget_b; + let gadget_ba = &gadget_b + &gadget_a; + gadget_ba.enforce_equal(&gadget_ab)?; + + let ab_val = gadget_ab.value()?.into_affine(); + assert_eq!(ab_val, ab_affine, "Result of addition is unequal"); + assert!(cs.is_satisfied().unwrap()); + + let gadget_a_zero = &gadget_a + &zero; + gadget_a_zero.enforce_equal(&gadget_a)?; + + // Check doubling + let aa = Group::double(&a); + let aa_affine = aa.into_affine(); + gadget_a.double_in_place()?; + let aa_val = gadget_a.value()?.into_affine(); + assert_eq!( + aa_val, aa_affine, + "Gadget and native values are unequal after double." + ); + assert!(cs.is_satisfied().unwrap()); + + if !cs.is_satisfied().unwrap() { + panic!( + "Unsatisfied in mode {:?}.\n{:?}", + mode, + cs.which_is_unsatisfied().unwrap() + ); + } + + assert!(cs.is_satisfied().unwrap()); + } + Ok(()) + } + + pub fn te_test() -> Result<(), SynthesisError> + where + P: ark_ec::TEModelParameters, + GG: CurveVar, ::BasePrimeField>, + for<'a> &'a GG: GroupOpsBounds<'a, TEProjective

, GG>, + { + group_test::, _, GG>()?; + let modes = [ + AllocationMode::Input, + AllocationMode::Witness, + AllocationMode::Constant, + ]; + for &mode in &modes { + use ark_ec::group::Group; + + let mut rng = test_rng(); + + let cs = ConstraintSystem::<::BasePrimeField>::new_ref(); + + let a = TEProjective::

::rand(&mut rng); + let b = TEProjective::

::rand(&mut rng); + let a_affine = a.into_affine(); + let b_affine = b.into_affine(); + + let ns = ark_relations::ns!(cs, "allocating variables"); + let mut gadget_a = GG::new_variable(cs.clone(), || Ok(a), mode)?; + let gadget_b = GG::new_variable(cs.clone(), || Ok(b), mode)?; + drop(ns); + + assert_eq!(gadget_a.value()?.into_affine().x, a_affine.x); + assert_eq!(gadget_a.value()?.into_affine().y, a_affine.y); + assert_eq!(gadget_b.value()?.into_affine().x, b_affine.x); + assert_eq!(gadget_b.value()?.into_affine().y, b_affine.y); + assert_eq!(cs.which_is_unsatisfied()?, None); + + // Check addition + let ab = a + &b; + let ab_affine = ab.into_affine(); + let gadget_ab = &gadget_a + &gadget_b; + let gadget_ba = &gadget_b + &gadget_a; + gadget_ba.enforce_equal(&gadget_ab)?; + + let ab_val = gadget_ab.value()?.into_affine(); + assert_eq!(ab_val, ab_affine, "Result of addition is unequal"); + assert!(cs.is_satisfied().unwrap()); + + // Check doubling + let aa = Group::double(&a); + let aa_affine = aa.into_affine(); + gadget_a.double_in_place()?; + let aa_val = gadget_a.value()?.into_affine(); + assert_eq!( + aa_val, aa_affine, + "Gadget and native values are unequal after double." + ); + assert!(cs.is_satisfied().unwrap()); + + if !cs.is_satisfied().unwrap() { + panic!( + "Unsatisfied in mode {:?}.\n{:?}", + mode, + cs.which_is_unsatisfied().unwrap() + ); + } + + assert!(cs.is_satisfied().unwrap()); + } + Ok(()) + } +} + +pub mod pairing { + use ark_ec::{PairingEngine, ProjectiveCurve}; + use ark_ff::{BitIteratorLE, Field, PrimeField}; + use ark_r1cs_std::prelude::*; + use ark_relations::r1cs::{ConstraintSystem, SynthesisError}; + use ark_std::{test_rng, vec::Vec, UniformRand}; + + #[allow(dead_code)] + pub fn bilinearity_test>() -> Result<(), SynthesisError> + where + for<'a> &'a P::G1Var: GroupOpsBounds<'a, E::G1Projective, P::G1Var>, + for<'a> &'a P::G2Var: GroupOpsBounds<'a, E::G2Projective, P::G2Var>, + for<'a> &'a P::GTVar: FieldOpsBounds<'a, E::Fqk, P::GTVar>, + { + let modes = [ + AllocationMode::Input, + AllocationMode::Witness, + AllocationMode::Constant, + ]; + for &mode in &modes { + let cs = ConstraintSystem::::new_ref(); + + let mut rng = test_rng(); + let a = E::G1Projective::rand(&mut rng); + let b = E::G2Projective::rand(&mut rng); + let s = E::Fr::rand(&mut rng); + + let mut sa = a; + sa *= s; + let mut sb = b; + sb *= s; + + let a_g = P::G1Var::new_variable(cs.clone(), || Ok(a.into_affine()), mode)?; + let b_g = P::G2Var::new_variable(cs.clone(), || Ok(b.into_affine()), mode)?; + let sa_g = P::G1Var::new_variable(cs.clone(), || Ok(sa.into_affine()), mode)?; + let sb_g = P::G2Var::new_variable(cs.clone(), || Ok(sb.into_affine()), mode)?; + + let mut _preparation_num_constraints = cs.num_constraints(); + let a_prep_g = P::prepare_g1(&a_g)?; + let b_prep_g = P::prepare_g2(&b_g)?; + _preparation_num_constraints = cs.num_constraints() - _preparation_num_constraints; + + let sa_prep_g = P::prepare_g1(&sa_g)?; + let sb_prep_g = P::prepare_g2(&sb_g)?; + + let (ans1_g, ans1_n) = { + let _ml_constraints = cs.num_constraints(); + let ml_g = P::miller_loop(&[sa_prep_g], &[b_prep_g.clone()])?; + let _fe_constraints = cs.num_constraints(); + let ans_g = P::final_exponentiation(&ml_g)?; + let ans_n = E::pairing(sa, b); + (ans_g, ans_n) + }; + + let (ans2_g, ans2_n) = { + let ans_g = P::pairing(a_prep_g.clone(), sb_prep_g)?; + let ans_n = E::pairing(a, sb); + (ans_g, ans_n) + }; + + let (ans3_g, ans3_n) = { + let s_iter = BitIteratorLE::without_trailing_zeros(s.into_repr()) + .map(Boolean::constant) + .collect::>(); + + let mut ans_g = P::pairing(a_prep_g, b_prep_g)?; + let mut ans_n = E::pairing(a, b); + ans_n = ans_n.pow(s.into_repr()); + ans_g = ans_g.pow_le(&s_iter)?; + + (ans_g, ans_n) + }; + + ans1_g.enforce_equal(&ans2_g)?; + ans2_g.enforce_equal(&ans3_g)?; + + assert_eq!(ans1_g.value()?, ans1_n, "Failed native test 1"); + assert_eq!(ans2_g.value()?, ans2_n, "Failed native test 2"); + assert_eq!(ans3_g.value()?, ans3_n, "Failed native test 3"); + + assert_eq!(ans1_n, ans2_n, "Failed ans1_native == ans2_native"); + assert_eq!(ans2_n, ans3_n, "Failed ans2_native == ans3_native"); + assert_eq!(ans1_g.value()?, ans3_g.value()?, "Failed ans1 == ans3"); + assert_eq!(ans1_g.value()?, ans2_g.value()?, "Failed ans1 == ans2"); + assert_eq!(ans2_g.value()?, ans3_g.value()?, "Failed ans2 == ans3"); + + if !cs.is_satisfied().unwrap() { + panic!( + "Unsatisfied in mode {:?}.\n{:?}", + mode, + cs.which_is_unsatisfied().unwrap() + ); + } + + assert!(cs.is_satisfied().unwrap(), "cs is not satisfied"); + } + Ok(()) + } +} diff --git a/arkworks/curves/ed_on_bls12_377/Cargo.toml b/arkworks/curves/ed_on_bls12_377/Cargo.toml new file mode 100644 index 00000000..6731491a --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-ed-on-bls12-377" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A Twisted Edwards curve defined over the scalar field of the BLS12-377 curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ed-on-bls12-377/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { path = "../../algebra/ff", version = "^0.3.0", default-features = false } +ark-ec = { path = "../../algebra/ec", version = "^0.3.0", default-features = false } +ark-std = { path = "../../std", version = "^0.3.0", default-features = false } +ark-r1cs-std = { path = "../../r1cs-std", version = "^0.3.0", default-features = false, optional = true } +ark-bls12-377 = { version = "^0.3.0", path = "../bls12_377", default-features = false, features = [ "scalar_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-bls12-377/std" ] +r1cs = [ "ark-r1cs-std" ] diff --git a/arkworks/curves/ed_on_bls12_377/src/constraints/curves.rs b/arkworks/curves/ed_on_bls12_377/src/constraints/curves.rs new file mode 100644 index 00000000..4b15106d --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::twisted_edwards::AffineVar; + +use crate::constraints::FqVar; + +/// A variable that is the R1CS equivalent of `crate::EdwardsAffine`. +pub type EdwardsVar = AffineVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::te_test::().unwrap(); +} diff --git a/arkworks/curves/ed_on_bls12_377/src/constraints/fields.rs b/arkworks/curves/ed_on_bls12_377/src/constraints/fields.rs new file mode 100644 index 00000000..f25fa336 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/constraints/fields.rs @@ -0,0 +1,10 @@ +use crate::fq::Fq; +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FqVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_bls12_377/src/constraints/mod.rs b/arkworks/curves/ed_on_bls12_377/src/constraints/mod.rs new file mode 100644 index 00000000..889fa1e3 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/constraints/mod.rs @@ -0,0 +1,107 @@ +//! This module implements the R1CS equivalent of `ark_ed_on_bls12_377`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_ed_on_bls12_377::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `EdwardsVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_ed_on_bls12_377::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Edwards` elements. +//! let a_native = EdwardsProjective::rand(&mut rng); +//! let b_native = EdwardsProjective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = EdwardsVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = EdwardsVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity. +//! let zero = EdwardsVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_bls12_377/src/curves/mod.rs b/arkworks/curves/ed_on_bls12_377/src/curves/mod.rs new file mode 100644 index 00000000..b236b8ec --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/curves/mod.rs @@ -0,0 +1,73 @@ +use crate::{fq::Fq, fr::Fr}; +use ark_ec::{ + models::{ModelParameters, MontgomeryModelParameters, TEModelParameters}, + twisted_edwards_extended::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +#[cfg(test)] +mod tests; + +pub type EdwardsAffine = GroupAffine; +pub type EdwardsProjective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct EdwardsParameters; + +impl ModelParameters for EdwardsParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl TEModelParameters for EdwardsParameters { + /// COEFF_A = -1 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "-1"); + + /// COEFF_D = 3021 + #[rustfmt::skip] + const COEFF_D: Fq = field_new!(Fq, "3021"); + + /// COFACTOR = 4 + const COFACTOR: &'static [u64] = &[4]; + + /// COFACTOR_INV = + /// 527778859339273151515551558673846658209717731602102048798421311598680340096 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "527778859339273151515551558673846658209717731602102048798421311598680340096"); + + /// Generated randomly + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = (GENERATOR_X, GENERATOR_Y); + + type MontgomeryModelParameters = EdwardsParameters; + + /// Multiplication by `a` is just negation. + /// Is `a` 1 or -1? + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + -*elem + } +} + +impl MontgomeryModelParameters for EdwardsParameters { + /// COEFF_A = 0x8D26E3FADA9010A26949031ECE3971B93952AD84D4753DDEDB748DA37E8F552 + /// = 3990301581132929505568273333084066329187552697088022219156688740916631500114 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "3990301581132929505568273333084066329187552697088022219156688740916631500114"); + /// COEFF_B = 0x9D8F71EEC83A44C3A1FBCEC6F5418E5C6154C2682B8AC231C5A3725C8170AAD + /// = 4454160168295440918680551605697480202188346638066041608778544715000777738925 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "4454160168295440918680551605697480202188346638066041608778544715000777738925"); + + type TEModelParameters = EdwardsParameters; +} + +/// GENERATOR_X = +/// 4497879464030519973909970603271755437257548612157028181994697785683032656389, +#[rustfmt::skip] +const GENERATOR_X: Fq = field_new!(Fq, "4497879464030519973909970603271755437257548612157028181994697785683032656389"); + +/// GENERATOR_Y = +/// 4357141146396347889246900916607623952598927460421559113092863576544024487809 +#[rustfmt::skip] +const GENERATOR_Y: Fq = field_new!(Fq, "4357141146396347889246900916607623952598927460421559113092863576544024487809"); diff --git a/arkworks/curves/ed_on_bls12_377/src/curves/tests.rs b/arkworks/curves/ed_on_bls12_377/src/curves/tests.rs new file mode 100644 index 00000000..58db359a --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/curves/tests.rs @@ -0,0 +1,62 @@ +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + + edwards_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a = rng.gen(); + let b = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_affine_group() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_generator() { + let generator = EdwardsAffine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_conversion() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + let a_b = { + use ark_ec::group::Group; + (a + &b).double().double() + }; + let a_b2 = (a.into_projective() + &b.into_projective()) + .double() + .double(); + assert_eq!(a_b, a_b2.into_affine()); + assert_eq!(a_b.into_projective(), a_b2); +} + +#[test] +fn test_montgomery_conversion() { + montgomery_conversion_test::(); +} diff --git a/arkworks/curves/ed_on_bls12_377/src/fields/fq.rs b/arkworks/curves/ed_on_bls12_377/src/fields/fq.rs new file mode 100644 index 00000000..b743999f --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_bls12_377::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/ed_on_bls12_377/src/fields/fr.rs b/arkworks/curves/ed_on_bls12_377/src/fields/fr.rs new file mode 100644 index 00000000..2812425b --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/fields/fr.rs @@ -0,0 +1,83 @@ +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters, FpParameters}, +}; + +pub type Fr = Fp256; + +pub struct FrParameters; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 1; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 15170730761708361161u64, + 13670723686578117817u64, + 12803492266614043665u64, + 50861023252832611u64, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 2111115437357092606062206234695386632838870926408408195193685246394721360383 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 13356249993388743167u64, + 5950279507993463550u64, + 10965441865914903552u64, + 336320092672043349u64, + ]); + + const MODULUS_BITS: u32 = 251; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 5; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 16632263305389933622u64, + 10726299895124897348u64, + 16608693673010411502u64, + 285459069419210737u64, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 3987543627614508126u64, + 17742427666091596403u64, + 14557327917022607905u64, + 322810149704226881u64, + ]); + + const INV: u64 = 9659935179256617473u64; + + // 70865795004005329077606947863872807680085016823885970091001235374859923341923 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 11289572479685143826u64, + 11383637369941080925u64, + 2288212753973340071u64, + 82014976407880291u64, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 6678124996694371583u64, + 2975139753996731775u64, + 14706092969812227584u64, + 168160046336021674u64, + ]); + + const T: BigInteger = Self::MODULUS_MINUS_ONE_DIV_TWO; + + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xae56bba6b0cff67f, + 0x14a4e8ebf10f22bf, + 0x660b44d1e5c37b00, + 0x12ab655e9a2ca55, + ]); +} diff --git a/arkworks/curves/ed_on_bls12_377/src/fields/mod.rs b/arkworks/curves/ed_on_bls12_377/src/fields/mod.rs new file mode 100644 index 00000000..10e71c55 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/fields/mod.rs @@ -0,0 +1,8 @@ +pub mod fq; +pub mod fr; + +pub use fq::*; +pub use fr::*; + +#[cfg(all(feature = "ed_on_bls12_377", test))] +mod tests; diff --git a/arkworks/curves/ed_on_bls12_377/src/fields/tests.rs b/arkworks/curves/ed_on_bls12_377/src/fields/tests.rs new file mode 100644 index 00000000..173b5ee2 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/fields/tests.rs @@ -0,0 +1,24 @@ +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::{Fq, Fr}; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); +} diff --git a/arkworks/curves/ed_on_bls12_377/src/lib.rs b/arkworks/curves/ed_on_bls12_377/src/lib.rs new file mode 100644 index 00000000..89122791 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_377/src/lib.rs @@ -0,0 +1,31 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements a twisted Edwards curve whose base field is the scalar field of the +//! curve BLS12-377. This allows defining cryptographic primitives that use elliptic curves over +//! the scalar field of the latter curve. This curve was generated as part of the paper +//! [\[BCGMMW20, “Zexe”\]](https://eprint.iacr.org/2018/962). +//! +//! Curve information: +//! * Base field: q = 8444461749428370424248824938781546531375899335154063827935233455917409239041 +//! * Scalar field: r = 2111115437357092606062206234695386632838870926408408195193685246394721360383 +//! * Valuation(q - 1, 2) = 47 +//! * Valuation(r - 1, 2) = 1 +//! * Curve equation: ax^2 + y^2 =1 + dx^2y^2, where +//! * a = -1 +//! * d = 3021 + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_bls12_381/Cargo.toml b/arkworks/curves/ed_on_bls12_381/Cargo.toml new file mode 100644 index 00000000..c584cc1f --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-ed-on-bls12-381" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A Twisted Edwards curve defined over the scalar field of the BLS12-381 curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ed-on-bls12-381/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-bls12-381 = { version = "^0.3.0", path = "../bls12_381", default-features = false, features = [ "scalar_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-bls12-381/std" ] +r1cs = ["ark-r1cs-std"] \ No newline at end of file diff --git a/arkworks/curves/ed_on_bls12_381/src/constraints/curves.rs b/arkworks/curves/ed_on_bls12_381/src/constraints/curves.rs new file mode 100644 index 00000000..9c9f7837 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::twisted_edwards::AffineVar; + +use crate::constraints::FqVar; + +/// A variable that is the R1CS equivalent of `crate::EdwardsAffine`. +pub type EdwardsVar = AffineVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::te_test::<_, EdwardsVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_bls12_381/src/constraints/fields.rs b/arkworks/curves/ed_on_bls12_381/src/constraints/fields.rs new file mode 100644 index 00000000..3f81d7a3 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/constraints/fields.rs @@ -0,0 +1,9 @@ +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FqVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_bls12_381/src/constraints/mod.rs b/arkworks/curves/ed_on_bls12_381/src/constraints/mod.rs new file mode 100644 index 00000000..eda12f84 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/constraints/mod.rs @@ -0,0 +1,107 @@ +//! This module implements the R1CS equivalent of `ark_ed_on_bls12_381`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_ed_on_bls12_381::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `EdwardsVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_ed_on_bls12_381::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Edwards` elements. +//! let a_native = EdwardsProjective::rand(&mut rng); +//! let b_native = EdwardsProjective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = EdwardsVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = EdwardsVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `Edwards`. +//! let zero = EdwardsVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_bls12_381/src/curves/mod.rs b/arkworks/curves/ed_on_bls12_381/src/curves/mod.rs new file mode 100644 index 00000000..affbba01 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/curves/mod.rs @@ -0,0 +1,86 @@ +use crate::{Fq, Fr}; +use ark_ec::{ + models::{ModelParameters, MontgomeryModelParameters, TEModelParameters}, + twisted_edwards_extended::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +#[cfg(test)] +mod tests; + +pub type EdwardsAffine = GroupAffine; +pub type EdwardsProjective = GroupProjective; + +/// `JubJub` is a twisted Edwards curve. These curves have equations of the +/// form: ax² + y² = 1 - dx²y². +/// over some base finite field Fq. +/// +/// JubJub's curve equation: -x² + y² = 1 - (10240/10241)x²y² +/// +/// q = 52435875175126190479447740508185965837690552500527637822603658699938581184513. +/// +/// a = -1. +/// d = (10240/10241) mod q +/// = 19257038036680949359750312669786877991949435402254120286184196891950884077233. +/// +/// Sage script to calculate these: +/// +/// ```text +/// q = 52435875175126190479447740508185965837690552500527637822603658699938581184513 +/// Fq = GF(q) +/// d = -(Fq(10240)/Fq(10241)) +/// ``` +/// These parameters and the sage script obtained from: +/// +#[derive(Clone, Default, PartialEq, Eq)] +pub struct EdwardsParameters; + +impl ModelParameters for EdwardsParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl TEModelParameters for EdwardsParameters { + /// COEFF_A = -1 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "-1"); + + /// COEFF_D = (10240/10241) mod q + #[rustfmt::skip] + const COEFF_D: Fq = field_new!(Fq, "19257038036680949359750312669786877991949435402254120286184196891950884077233"); + + /// COFACTOR = 8 + const COFACTOR: &'static [u64] = &[8]; + + /// COFACTOR^(-1) mod r = + /// 819310549611346726241370945440405716213240158234039660170669895299022906775 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "819310549611346726241370945440405716213240158234039660170669895299022906775"); + + /// AFFINE_GENERATOR_COEFFS = (GENERATOR_X, GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = (GENERATOR_X, GENERATOR_Y); + + type MontgomeryModelParameters = EdwardsParameters; + + /// Multiplication by `a` is simply negation here. + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + -(*elem) + } +} + +impl MontgomeryModelParameters for EdwardsParameters { + /// COEFF_A = 40962 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "40962"); + /// COEFF_B = -40964 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "-40964"); + + type TEModelParameters = EdwardsParameters; +} + +#[rustfmt::skip] +const GENERATOR_X: Fq = field_new!(Fq, "8076246640662884909881801758704306714034609987455869804520522091855516602923"); +#[rustfmt::skip] +const GENERATOR_Y: Fq = field_new!(Fq, "13262374693698910701929044844600465831413122818447359594527400194675274060458"); diff --git a/arkworks/curves/ed_on_bls12_381/src/curves/tests.rs b/arkworks/curves/ed_on_bls12_381/src/curves/tests.rs new file mode 100644 index 00000000..1ba5d2fc --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/curves/tests.rs @@ -0,0 +1,107 @@ +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ff::{bytes::FromBytes, Zero}; +use ark_std::rand::Rng; +use ark_std::str::FromStr; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + + edwards_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a = rng.gen(); + let b = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_affine_group() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_generator() { + let generator = EdwardsAffine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_conversion() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + let a_b = { + use ark_ec::group::Group; + (a + &b).double().double() + }; + let a_b2 = (a.into_projective() + &b.into_projective()) + .double() + .double(); + assert_eq!(a_b, a_b2.into_affine()); + assert_eq!(a_b.into_projective(), a_b2); +} + +#[test] +fn test_scalar_multiplication() { + let f1 = Fr::from_str( + "4691331900926794624732159288782398864809513177368446695323460897088210774597", + ) + .unwrap(); + let f2 = Fr::from_str( + "1305028103380024953477151132159456965337646722479526711736847301646466538045", + ) + .unwrap(); + + let g = EdwardsAffine::from_str( + "(1158870117176967269192899343636553522971009777237254192973081388797299308391, \ + 36933624999642413792569726058244472742169727126562409632889593958355839948294)", + ) + .unwrap(); + let f1f2g = EdwardsAffine::from_str( + "(12638652891150111215300246576936483137884466359309882317048163368620501191944, \ + 38385045634663742820428406709832518145724237919360177362175527604556651918148)", + ) + .unwrap(); + + assert!(!g.is_zero()); + assert!(!f1f2g.is_zero()); + + let f1g = g.mul(f1).into_affine(); + assert_eq!(g.mul(f1 * &f2).into_affine(), f1f2g); + assert_eq!(f1g.mul(f2).into_affine(), f1f2g); +} + +#[test] +fn test_bytes() { + let g_from_repr = EdwardsAffine::from_str( + "(1158870117176967269192899343636553522971009777237254192973081388797299308391, \ + 36933624999642413792569726058244472742169727126562409632889593958355839948294)", + ) + .unwrap(); + + let g_bytes = ark_ff::to_bytes![g_from_repr].unwrap(); + let g = EdwardsAffine::read(g_bytes.as_slice()).unwrap(); + assert_eq!(g_from_repr, g); +} + +#[test] +fn test_montgomery_conversion() { + montgomery_conversion_test::(); +} diff --git a/arkworks/curves/ed_on_bls12_381/src/fields/fq.rs b/arkworks/curves/ed_on_bls12_381/src/fields/fq.rs new file mode 100644 index 00000000..46c052be --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_bls12_381::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/ed_on_bls12_381/src/fields/fr.rs b/arkworks/curves/ed_on_bls12_381/src/fields/fr.rs new file mode 100644 index 00000000..82cd18ac --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/fields/fr.rs @@ -0,0 +1,81 @@ +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters, FpParameters}, +}; + +pub type Fr = Fp256; + +pub struct FrParameters; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 1; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0xaa9f02ab1d6124de, + 0xb3524a6466112932, + 0x7342261215ac260b, + 0x4d6b87b1da259e2, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 6554484396890773809930967563523245729705921265872317281365359162392183254199. + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xd0970e5ed6f72cb7, + 0xa6682093ccc81082, + 0x6673b0101343b00, + 0xe7db4ea6533afa9, + ]); + + const MODULUS_BITS: u32 = 252; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 4; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0x25f80bb3b99607d9, + 0xf315d62f66b6e750, + 0x932514eeeb8814f4, + 0x9a6fc6f479155c6, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0x67719aa495e57731, + 0x51b0cef09ce3fc26, + 0x69dab7fac026e9a5, + 0x4f6547b8d127688, + ]); + + const INV: u64 = 0x1ba3a358ef788ef9; + + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0x720b1b19d49ea8f1, + 0xbf4aa36101f13a58, + 0x5fa8cc968193ccbb, + 0xe70cbdc7dccf3ac, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 7515249040934278747, + 5995434913520945217, + 9454073218019761536, + 522094803716528084, + ]); + + const T: BigInteger = Self::MODULUS_MINUS_ONE_DIV_TWO; + + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 12980996557321915181, + 2997717456760472608, + 4727036609009880768, + 261047401858264042, + ]); +} diff --git a/arkworks/curves/ed_on_bls12_381/src/fields/mod.rs b/arkworks/curves/ed_on_bls12_381/src/fields/mod.rs new file mode 100644 index 00000000..6e99b4b9 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/fields/mod.rs @@ -0,0 +1,8 @@ +pub mod fq; +pub mod fr; + +pub use fq::*; +pub use fr::*; + +#[cfg(all(feature = "ed_on_bls12_381", test))] +mod tests; diff --git a/arkworks/curves/ed_on_bls12_381/src/fields/tests.rs b/arkworks/curves/ed_on_bls12_381/src/fields/tests.rs new file mode 100644 index 00000000..6314166e --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/fields/tests.rs @@ -0,0 +1,427 @@ +use crate::{Fq, Fr}; +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + bytes::{FromBytes, ToBytes}, + fields::{Field, LegendreSymbol::*, SquareRootField}, + One, Zero, +}; +use ark_std::test_rng; + +use ark_algebra_test_templates::fields::*; + +use ark_std::rand::Rng; +use ark_std::str::FromStr; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq_add() { + let f1 = Fq::from_str( + "18386742314266644595564329008376577163854043021652781768352795308532764650733", + ) + .unwrap(); + let f2 = Fq::from_str( + "39786307610986038981023499868190793548353538256264351797285876981647142458383", + ) + .unwrap(); + let f3 = Fq::from_str( + "5737174750126493097140088368381404874517028777389495743035013590241325924603", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 + &f2, f3); +} + +#[test] +fn test_fq_add_one() { + let f1 = Fq::from_str( + "4946875394261337176810256604189376311946643975348516311606738923340201185904", + ) + .unwrap(); + let f2 = Fq::from_str( + "4946875394261337176810256604189376311946643975348516311606738923340201185905", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert_eq!(f1 + &Fq::one(), f2); +} + +#[test] +fn test_fq_mul() { + let f1 = Fq::from_str( + "24703123148064348394273033316595937198355721297494556079070134653139656190956", + ) + .unwrap(); + let f2 = Fq::from_str( + "38196797080882758914424853878212529985425118523754343117256179679117054302131", + ) + .unwrap(); + let f3 = Fq::from_str( + "38057113854472161555556064369220825628027487067886761874351491955834635348140", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 * &f2, f3); +} + +#[test] +fn test_fq_triple_mul() { + let f1 = Fq::from_str( + "23834398828139479510988224171342199299644042568628082836691700490363123893905", + ) + .unwrap(); + let f2 = Fq::from_str( + "48343809612844640454129919255697536258606705076971130519928764925719046689317", + ) + .unwrap(); + let f3 = Fq::from_str( + "22704845471524346880579660022678666462201713488283356385810726260959369106033", + ) + .unwrap(); + let f4 = Fq::from_str( + "18897508522635316277030308074760673440128491438505204942623624791502972539393", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 * &f2 * &f3, f4); +} + +#[test] +fn test_fq_div() { + let f1 = Fq::from_str( + "31892744363926593013886463524057935370302352424137349660481695792871889573091", + ) + .unwrap(); + let f2 = Fq::from_str( + "47695868328933459965610498875668250916462767196500056002116961816137113470902", + ) + .unwrap(); + let f3 = Fq::from_str( + "29049672724678710659792141917402891276693777283079976086581207190825261000580", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 / &f2, f3); +} + +#[test] +fn test_fq_sub() { + let f1 = Fq::from_str( + "18695869713129401390241150743745601908470616448391638969502807001833388904079", + ) + .unwrap(); + let f2 = Fq::from_str( + "10105476028534616828778879109836101003805485072436929139123765141153277007373", + ) + .unwrap(); + let f3 = Fq::from_str( + "8590393684594784561462271633909500904665131375954709830379041860680111896706", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 - &f2, f3); +} + +#[test] +fn test_fq_double_in_place() { + let mut f1 = Fq::from_str( + "29729289787452206300641229002276778748586801323231253291984198106063944136114", + ) + .unwrap(); + let f3 = Fq::from_str( + "7022704399778222121834717496367591659483050145934868761364737512189307087715", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f3.is_zero()); + f1.double_in_place(); + assert_eq!(f1, f3); +} + +#[test] +fn test_fq_double_in_place_thrice() { + let mut f1 = Fq::from_str( + "32768907806651393940832831055386272949401004221411141755415956893066040832473", + ) + .unwrap(); + let f3 = Fq::from_str( + "52407761752706389608871686410346320244445823769178582752913020344774001921732", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f3.is_zero()); + f1.double_in_place(); + f1.double_in_place(); + f1.double_in_place(); + assert_eq!(f1, f3); +} + +#[test] +fn test_fq_generate_random_ed_on_bls12_381_point() { + let d = Fq::from_str( + "19257038036680949359750312669786877991949435402254120286184196891950884077233", + ) + .unwrap(); + let y = Fq::from_str( + "20269054604167148422407276086932743904275456233139568486008667107872965128512", + ) + .unwrap(); + let x2 = Fq::from_str( + "35041048504708632193693740149219726446678304552734087046982753200179718192840", + ) + .unwrap(); + + let computed_y2 = y.square(); + let y2 = Fq::from_str( + "22730681238307918419349440108285755984465605552827817317611903495170775437833", + ) + .unwrap(); + assert_eq!(y2, computed_y2); + + let computed_dy2 = d * &computed_y2; + let dy2 = Fq::from_str( + "24720347560552809545835752815204882739669031262711919770503096707526812943411", + ) + .unwrap(); + assert_eq!(dy2, computed_dy2); + + let computed_divisor = computed_dy2 + &Fq::one(); + let divisor = Fq::from_str( + "24720347560552809545835752815204882739669031262711919770503096707526812943412", + ) + .unwrap(); + assert_eq!(divisor, computed_divisor); + + let computed_x2 = (computed_y2 - &Fq::one()) / &computed_divisor; + assert_eq!(x2, computed_x2); + + let x = Fq::from_str( + "15337652609730546173818014678723269532482775720866471265774032070871608223361", + ) + .unwrap(); + let computed_x = computed_x2.sqrt().unwrap(); + assert_eq!(computed_x.square(), x2); + assert_eq!(x, computed_x); + + fn add<'a>(curr: (Fq, Fq), other: &'a (Fq, Fq)) -> (Fq, Fq) { + let y1y2 = curr.1 * &other.1; + let x1x2 = curr.0 * &other.0; + let d = Fq::from_str( + "19257038036680949359750312669786877991949435402254120286184196891950884077233", + ) + .unwrap(); + let dx1x2y1y2 = d * &y1y2 * &x1x2; + + let d1 = Fq::one() + &dx1x2y1y2; + let d2 = Fq::one() - &dx1x2y1y2; + + let x1y2 = curr.0 * &other.1; + let y1x2 = curr.1 * &other.0; + + let x = (x1y2 + &y1x2) / &d1; + let y = (y1y2 + &x1x2) / &d2; + + (x, y) + } + + let result = add((x, y), &(x, y)); + let result = add(result, &result); + let result = add(result, &result); + + let point_x = Fq::from_str( + "47259664076168047050113154262636619161204477920503059672059915868534495873964", + ) + .unwrap(); + let point_y = Fq::from_str( + "19016409245280491801573912449420132838852726543024859389273314249842195919690", + ) + .unwrap(); + assert_eq!((point_x, point_y), result); +} + +#[test] +fn test_fq_square_in_place() { + let mut f1 = Fq::from_str( + "34864651240005695523200639428464570946052769938774601449735727714436878540682", + ) + .unwrap(); + let f3 = + Fq::from_str("213133100629336594719108316042277780359104840987226496279264105585804377948") + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f3.is_zero()); + f1.square_in_place(); + assert_eq!(f1, f3); +} + +#[test] +fn test_fq_sqrt() { + let f1 = Fq::from_str( + "10875927553327821418567659853801220899541454800710193788767706167237535308235", + ) + .unwrap(); + let f3 = Fq::from_str( + "10816221372957505053219354782681292880545918527618367765651802809826238616708", + ) + .unwrap(); + assert_eq!(f1.sqrt().unwrap(), f3); +} + +#[test] +fn test_fq_from_str() { + let f1_from_repr = Fq::from(BigInteger([ + 0xab8a2535947d1a77, + 0x9ba74cbfda0bbcda, + 0xe928b59724d60baf, + 0x1cccaaeb9bb1680a, + ])); + let f1 = Fq::from_str( + "13026376210409056429264774981357153555336288129100724591327877625017068755575", + ) + .unwrap(); + let f2_from_repr = Fq::from(BigInteger([ + 0x97e9103775d2f35c, + 0xbe6756b6c587544b, + 0x6ee38c3afd88ef4b, + 0x2bacd150f540c677, + ])); + let f2 = Fq::from_str( + "19754794831832707859764530223239420866832328728734160755396495950822165902172", + ) + .unwrap(); + assert_eq!(f1_from_repr, f1); + assert_eq!(f2_from_repr, f2); +} + +#[test] +fn test_fq_legendre() { + assert_eq!(QuadraticResidue, Fq::one().legendre()); + assert_eq!(Zero, Fq::zero().legendre()); + + let e = BigInteger([ + 0x0dbc5349cd5664da, + 0x8ac5b6296e3ae29d, + 0x127cb819feceaa3b, + 0x3a6b21fb03867191, + ]); + assert_eq!(QuadraticResidue, Fq::from(e).legendre()); + let e = BigInteger([ + 0x96341aefd047c045, + 0x9b5f4254500a4d65, + 0x1ee08223b68ac240, + 0x31d9cd545c0ec7c6, + ]); + assert_eq!(QuadraticNonResidue, Fq::from(e).legendre()); +} + +#[test] +fn test_fq_bytes() { + let f1_from_repr = Fq::from(BigInteger([ + 0xab8a2535947d1a77, + 0x9ba74cbfda0bbcda, + 0xe928b59724d60baf, + 0x1cccaaeb9bb1680a, + ])); + + let mut f1_bytes = [0u8; 32]; + f1_from_repr.write(f1_bytes.as_mut()).unwrap(); + + let f1 = Fq::read(f1_bytes.as_ref()).unwrap(); + assert_eq!(f1_from_repr, f1); +} + +#[test] +fn test_fr_add() { + let f1 = Fr::from(BigInteger([ + 0xc81265fb4130fe0c, + 0xb308836c14e22279, + 0x699e887f96bff372, + 0x84ecc7e76c11ad, + ])); + let f2 = Fr::from(BigInteger([ + 0x71875719b422efb8, + 0x43658e68a93612, + 0x9fa756be2011e833, + 0xaa2b2cb08dac497, + ])); + let f3 = Fr::from(BigInteger([ + 0x3999bd14f553edc4, + 0xb34be8fa7d8b588c, + 0x945df3db6d1dba5, + 0xb279f92f046d645, + ])); + assert_eq!(f1 + &f2, f3); +} + +#[test] +fn test_fr_mul() { + let f1 = Fr::from(BigInteger([ + 0xc81265fb4130fe0c, + 0xb308836c14e22279, + 0x699e887f96bff372, + 0x84ecc7e76c11ad, + ])); + let f2 = Fr::from(BigInteger([ + 0x71875719b422efb8, + 0x43658e68a93612, + 0x9fa756be2011e833, + 0xaa2b2cb08dac497, + ])); + let f3 = Fr::from(BigInteger([ + 0x6d6618ac6b4a8381, + 0x5b9eb35d711ee1da, + 0xce83310e6ac4105d, + 0x98032e0f206320a, + ])); + assert_eq!(f1 * &f2, f3); +} + +#[test] +fn test_fr_bytes() { + let f1_from_repr = Fr::from(BigInteger([ + 0xc81265fb4130fe0c, + 0xb308836c14e22279, + 0x699e887f96bff372, + 0x84ecc7e76c11ad, + ])); + + let mut f1_bytes = [0u8; 32]; + f1_from_repr.write(f1_bytes.as_mut()).unwrap(); + + let f1 = Fr::read(f1_bytes.as_ref()).unwrap(); + assert_eq!(f1_from_repr, f1); +} + +#[test] +fn test_fr_from_str() { + let f100_from_repr = Fr::from(BigInteger([0x64, 0, 0, 0])); + let f100 = Fr::from_str("100").unwrap(); + assert_eq!(f100_from_repr, f100); +} diff --git a/arkworks/curves/ed_on_bls12_381/src/lib.rs b/arkworks/curves/ed_on_bls12_381/src/lib.rs new file mode 100644 index 00000000..3b429ba5 --- /dev/null +++ b/arkworks/curves/ed_on_bls12_381/src/lib.rs @@ -0,0 +1,31 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements a twisted Edwards curve whose base field is the scalar field of the +//! curve BLS12-377. This allows defining cryptographic primitives that use elliptic curves over +//! the scalar field of the latter curve. This curve was generated by Sean Bowe, and is also known +//! as [Jubjub](https://github.com/zkcrypto/jubjub). +//! +//! Curve information: +//! * Base field: q = 52435875175126190479447740508185965837690552500527637822603658699938581184513 +//! * Scalar field: r = 6554484396890773809930967563523245729705921265872317281365359162392183254199 +//! * Valuation(q - 1, 2) = 32 +//! * Valuation(r - 1, 2) = 1 +//! * Curve equation: ax^2 + y^2 =1 + dx^2y^2, where +//! * a = -1 +//! * d = -(10240/10241) + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_bn254/Cargo.toml b/arkworks/curves/ed_on_bn254/Cargo.toml new file mode 100644 index 00000000..3cd7a72e --- /dev/null +++ b/arkworks/curves/ed_on_bn254/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-ed-on-bn254" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A Twisted Edwards curve defined over the scalar field of the BN254 curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ed-on-bn254/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-bn254 = { version = "^0.3.0", path = "../bn254", default-features = false, features = [ "scalar_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-bn254/std" ] +r1cs = ["ark-r1cs-std"] diff --git a/arkworks/curves/ed_on_bn254/src/constraints/curves.rs b/arkworks/curves/ed_on_bn254/src/constraints/curves.rs new file mode 100644 index 00000000..9c9f7837 --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::twisted_edwards::AffineVar; + +use crate::constraints::FqVar; + +/// A variable that is the R1CS equivalent of `crate::EdwardsAffine`. +pub type EdwardsVar = AffineVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::te_test::<_, EdwardsVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_bn254/src/constraints/fields.rs b/arkworks/curves/ed_on_bn254/src/constraints/fields.rs new file mode 100644 index 00000000..3f81d7a3 --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/constraints/fields.rs @@ -0,0 +1,9 @@ +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FqVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_bn254/src/constraints/mod.rs b/arkworks/curves/ed_on_bn254/src/constraints/mod.rs new file mode 100644 index 00000000..c61c7dc3 --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/constraints/mod.rs @@ -0,0 +1,107 @@ +//! This module implements the R1CS equivalent of `ark_ed_on_bn254`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_ed_on_bn254::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `EdwardsVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_ed_on_bn254::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Edwards` elements. +//! let a_native = EdwardsProjective::rand(&mut rng); +//! let b_native = EdwardsProjective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = EdwardsVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = EdwardsVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `Edwards`. +//! let zero = EdwardsVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_bn254/src/curves/mod.rs b/arkworks/curves/ed_on_bn254/src/curves/mod.rs new file mode 100644 index 00000000..69db4f33 --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/curves/mod.rs @@ -0,0 +1,73 @@ +use crate::{Fq, Fr}; +use ark_ec::{ + models::{ModelParameters, MontgomeryModelParameters, TEModelParameters}, + twisted_edwards_extended::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +#[cfg(test)] +mod tests; + +pub type EdwardsAffine = GroupAffine; +pub type EdwardsProjective = GroupProjective; + +/// `Baby-JubJub` is a twisted Edwards curve. These curves have equations of the +/// form: ax² + y² = 1 + dx²y². +/// over some base finite field Fq. +/// +/// Baby-JubJub's curve equation: x² + y² = 1 + (168696/168700)x²y² +/// +/// q = 21888242871839275222246405745257275088548364400416034343698204186575808495617 +/// +#[derive(Clone, Default, PartialEq, Eq)] +pub struct EdwardsParameters; + +impl ModelParameters for EdwardsParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl TEModelParameters for EdwardsParameters { + /// COEFF_A = 1 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "1"); + + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + *elem + } + + /// COEFF_D = 168696/168700 mod q + /// = 9706598848417545097372247223557719406784115219466060233080913168975159366771 + #[rustfmt::skip] + const COEFF_D: Fq = field_new!(Fq, "9706598848417545097372247223557719406784115219466060233080913168975159366771"); + + /// COFACTOR = 8 + const COFACTOR: &'static [u64] = &[8]; + + /// COFACTOR^(-1) mod r = + /// 2394026564107420727433200628387514462817212225638746351800188703329891451411 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "2394026564107420727433200628387514462817212225638746351800188703329891451411"); + + /// AFFINE_GENERATOR_COEFFS = (GENERATOR_X, GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = (GENERATOR_X, GENERATOR_Y); + + type MontgomeryModelParameters = EdwardsParameters; +} + +impl MontgomeryModelParameters for EdwardsParameters { + /// COEFF_A = 168698 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "168698"); + /// COEFF_B = 168700 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "168700"); + + type TEModelParameters = EdwardsParameters; +} + +#[rustfmt::skip] +const GENERATOR_X: Fq = field_new!(Fq, "19698561148652590122159747500897617769866003486955115824547446575314762165298"); +#[rustfmt::skip] +const GENERATOR_Y: Fq = field_new!(Fq, "19298250018296453272277890825869354524455968081175474282777126169995084727839"); diff --git a/arkworks/curves/ed_on_bn254/src/curves/tests.rs b/arkworks/curves/ed_on_bn254/src/curves/tests.rs new file mode 100644 index 00000000..358d89a2 --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/curves/tests.rs @@ -0,0 +1,107 @@ +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_ff::{bytes::FromBytes, Zero}; +use ark_std::rand::Rng; +use ark_std::str::FromStr; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + + edwards_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a = rng.gen(); + let b = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_affine_group() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_generator() { + let generator = EdwardsAffine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_conversion() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + let a_b = { + use ark_ec::group::Group; + (a + &b).double().double() + }; + let a_b2 = (a.into_projective() + &b.into_projective()) + .double() + .double(); + assert_eq!(a_b, a_b2.into_affine()); + assert_eq!(a_b.into_projective(), a_b2); +} + +#[test] +fn test_scalar_multiplication() { + let f1 = Fr::from_str( + "4691331900926794624732159288782398864809513177368446695323460897088210774597", + ) + .unwrap(); + let f2 = Fr::from_str( + "1305028103380024953477151132159456965337646722479526711736847301646466538045", + ) + .unwrap(); + + let g = EdwardsAffine::from_str( + "(15863623088992515880085393097393553694825975317405843389771115419751650972659, \ + 16950150798460657717958625567821834550301663161624707787222815936182638968203)", + ) + .unwrap(); + let f1f2g = EdwardsAffine::from_str( + "(20773645713088336957786354488799297695596635653208610804806657050882264237947, \ + 19987327827845206670850937090314462639017692512983955920885166014935289314257)", + ) + .unwrap(); + + assert!(!g.is_zero()); + assert!(!f1f2g.is_zero()); + + let f1g = g.mul(f1).into_affine(); + assert_eq!(g.mul(f1 * &f2).into_affine(), f1f2g); + assert_eq!(f1g.mul(f2).into_affine(), f1f2g); +} + +#[test] +fn test_bytes() { + let g_from_repr = EdwardsAffine::from_str( + "(15863623088992515880085393097393553694825975317405843389771115419751650972659, \ + 16950150798460657717958625567821834550301663161624707787222815936182638968203)", + ) + .unwrap(); + + let g_bytes = ark_ff::to_bytes![g_from_repr].unwrap(); + let g = EdwardsAffine::read(g_bytes.as_slice()).unwrap(); + assert_eq!(g_from_repr, g); +} + +#[test] +fn test_montgomery_conversion() { + montgomery_conversion_test::(); +} diff --git a/arkworks/curves/ed_on_bn254/src/fields/fq.rs b/arkworks/curves/ed_on_bn254/src/fields/fq.rs new file mode 100644 index 00000000..800a203b --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_bn254::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/ed_on_bn254/src/fields/fr.rs b/arkworks/curves/ed_on_bn254/src/fields/fr.rs new file mode 100644 index 00000000..7d210f4a --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/fields/fr.rs @@ -0,0 +1,87 @@ +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters, FpParameters}, +}; + +pub type Fr = Fp256; + +pub struct FrParameters; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 4; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x1721ada8d4d27255, + 0xcda0f5264e0e35bb, + 0x961a936922086fe6, + 0x1ab00857387dd52, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 2736030358979909402780800718157159386076813972158567259200215660948447373041 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0x677297dc392126f1, + 0xab3eedb83920ee0a, + 0x370a08b6d0302b0b, + 0x60c89ce5c263405, + ]); + + const MODULUS_BITS: u32 = 251; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 5; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0x073315dea08f9c76, + 0xe7acffc6a098f24b, + 0xf85a9201d818f015, + 0x1f16424e1bb7724, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0x35e44abee7ecb21e, + 0x74646cacf5f84ec4, + 0xe472df203faa158f, + 0x445b524f1ba50a8, + ]); + + const INV: u64 = 0x532ce5aebc48f5ef; + + #[rustfmt::skip] + /// GENERATOR = 31 + const GENERATOR: BigInteger = BigInteger([ + 0x3c284f376f3993d1, + 0x08bc9d93705cf8b8, + 0x239d5fcbd9538f3e, + 0x5ca4836185b994b, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x33b94bee1c909378, + 0xd59f76dc1c907705, + 0x9b85045b68181585, + 0x30644e72e131a02, + ]); + + const T: BigInteger = BigInteger([ + 0xa677297dc392126f, + 0xbab3eedb83920ee0, + 0x5370a08b6d0302b0, + 0x60c89ce5c26340, + ]); + + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x533b94bee1c90937, + 0x5d59f76dc1c90770, + 0x29b85045b6818158, + 0x30644e72e131a0, + ]); +} diff --git a/arkworks/curves/ed_on_bn254/src/fields/mod.rs b/arkworks/curves/ed_on_bn254/src/fields/mod.rs new file mode 100644 index 00000000..9e3fbaa3 --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/fields/mod.rs @@ -0,0 +1,8 @@ +pub mod fq; +pub mod fr; + +pub use fq::*; +pub use fr::*; + +#[cfg(all(feature = "ed_on_bn254", test))] +mod tests; diff --git a/arkworks/curves/ed_on_bn254/src/fields/tests.rs b/arkworks/curves/ed_on_bn254/src/fields/tests.rs new file mode 100644 index 00000000..e3929872 --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/fields/tests.rs @@ -0,0 +1,421 @@ +use crate::{Fq, Fr}; +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + bytes::{FromBytes, ToBytes}, + fields::{Field, LegendreSymbol::*, SquareRootField}, + One, Zero, +}; +use ark_std::test_rng; + +use ark_algebra_test_templates::fields::*; + +use ark_std::rand::Rng; +use ark_std::str::FromStr; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq_add() { + let f1 = Fq::from_str( + "18386742314266644595564329008376577163854043021652781768352795308532764650733", + ) + .unwrap(); + let f2 = Fq::from_str( + "39786307610986038981023499868190793548353538256264351797285876981647142458383", + ) + .unwrap(); + let f3 = Fq::from_str( + "14396564181574133132095017386052820535110852477085064878242263917028290117882", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 + &f2, f3); +} + +#[test] +fn test_fq_add_one() { + let f1 = Fq::from_str( + "4946875394261337176810256604189376311946643975348516311606738923340201185904", + ) + .unwrap(); + let f2 = Fq::from_str( + "4946875394261337176810256604189376311946643975348516311606738923340201185905", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert_eq!(f1 + &Fq::one(), f2); +} + +#[test] +fn test_fq_mul() { + let f1 = Fq::from_str( + "24703123148064348394273033316595937198355721297494556079070134653139656190956", + ) + .unwrap(); + let f2 = Fq::from_str( + "38196797080882758914424853878212529985425118523754343117256179679117054302131", + ) + .unwrap(); + let f3 = Fq::from_str( + "1321267396236123309645330145349353750536542060403774171357889269349508194307", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 * &f2, f3); +} + +#[test] +fn test_fq_triple_mul() { + let f1 = Fq::from_str( + "23834398828139479510988224171342199299644042568628082836691700490363123893905", + ) + .unwrap(); + let f2 = Fq::from_str( + "48343809612844640454129919255697536258606705076971130519928764925719046689317", + ) + .unwrap(); + let f3 = Fq::from_str( + "22704845471524346880579660022678666462201713488283356385810726260959369106033", + ) + .unwrap(); + let f4 = Fq::from_str( + "7747776931431194635550680695131420638163057297019399136408144301550822179875", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 * &f2 * &f3, f4); +} + +#[test] +fn test_fq_div() { + let f1 = Fq::from_str( + "31892744363926593013886463524057935370302352424137349660481695792871889573091", + ) + .unwrap(); + let f2 = Fq::from_str( + "47695868328933459965610498875668250916462767196500056002116961816137113470902", + ) + .unwrap(); + let f3 = Fq::from_str( + "7301086967624450577859019086314322648061398679982346993011603220910508457334", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 / &f2, f3); +} + +#[test] +fn test_fq_sub() { + let f1 = Fq::from_str( + "18695869713129401390241150743745601908470616448391638969502807001833388904079", + ) + .unwrap(); + let f2 = Fq::from_str( + "10105476028534616828778879109836101003805485072436929139123765141153277007373", + ) + .unwrap(); + let f3 = Fq::from_str( + "8590393684594784561462271633909500904665131375954709830379041860680111896706", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f2.is_zero()); + assert!(!f3.is_zero()); + assert_eq!(f1 - &f2, f3); +} + +#[test] +fn test_fq_double_in_place() { + let mut f1 = Fq::from_str( + "29729289787452206300641229002276778748586801323231253291984198106063944136114", + ) + .unwrap(); + let f3 = Fq::from_str( + "15682093831225862156789646514039007320076873845630437896571987838976271280994", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f3.is_zero()); + f1.double_in_place(); + assert_eq!(f1, f3); +} + +#[test] +fn test_fq_double_in_place_thrice() { + let mut f1 = Fq::from_str( + "32768907806651393940832831055386272949401004221411141755415956893066040832473", + ) + .unwrap(); + let f3 = Fq::from_str( + "21380590862979124081952185245260157621176025366712756262647409092194433207997", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f3.is_zero()); + f1.double_in_place(); + f1.double_in_place(); + f1.double_in_place(); + assert_eq!(f1, f3); +} + +#[test] +fn test_fq_generate_random_ed_on_bn254_point() { + let a = Fq::from_str("168700").unwrap(); + + let d = Fq::from_str("168696").unwrap(); + let y = Fq::from_str( + "19987327827845206670850937090314462639017692512983955920885166014935289314257", + ) + .unwrap(); + let x2 = Fq::from_str( + "2144239075372598103060889495211040948751593385312551803225522963913923559328", + ) + .unwrap(); + + let computed_y2 = y.square(); + let y2 = Fq::from_str( + "11134206686211572308995578277928848431421308813024790181507137950838333998633", + ) + .unwrap(); + assert_eq!(y2, computed_y2); + + let computed_dy2 = d * &computed_y2; + let dy2 = + Fq::from_str("345576003677591687256955722467813448317229128849323754147891993737799010947") + .unwrap(); + assert_eq!(dy2, computed_dy2); + + let computed_divisor = computed_dy2 - a; + let divisor = + Fq::from_str("345576003677591687256955722467813448317229128849323754147891993737798842247") + .unwrap(); + assert_eq!(divisor, computed_divisor); + + let computed_x2 = (computed_y2 - &Fq::one()) / &computed_divisor; + assert_eq!(x2, computed_x2); + + let x = Fq::from_str( + "4801447892755635304907919953550459075619191823587157449340656925102682829025", + ) + .unwrap(); + let computed_x = computed_x2.sqrt().unwrap(); + assert_eq!(computed_x.square(), x2); + assert_eq!(x, computed_x); + + fn add<'a>(curr: (Fq, Fq), other: &'a (Fq, Fq)) -> (Fq, Fq) { + let y1y2 = curr.1 * &other.1; + let x1x2 = curr.0 * &other.0; + let a = Fq::from_str("168700").unwrap(); + let d = Fq::from_str("168696").unwrap(); + let dx1x2y1y2 = d * &y1y2 * &x1x2; + + let d1 = Fq::one() + &dx1x2y1y2; + let d2 = Fq::one() - &dx1x2y1y2; + + let x1y2 = curr.0 * &other.1; + let y1x2 = curr.1 * &other.0; + + let x = (x1y2 + &y1x2) / &d1; + let y = (y1y2 - a * &x1x2) / &d2; + + (x, y) + } + + let result = add((x, y), &(x, y)); + let result = add(result, &result); + let result = add(result, &result); + + let point_x = + Fq::from_str("380676173762867192861894055350059333852732198308367125138259398265363727587") + .unwrap(); + let point_y = Fq::from_str( + "8435074244857818446059206728316702149733931432112984450960434710303841866985", + ) + .unwrap(); + assert_eq!((point_x, point_y), result); +} + +#[test] +fn test_fq_square_in_place() { + let mut f1 = Fq::from_str( + "6060110850233386730847324622937480088943976359504617699731744947670229990461", + ) + .unwrap(); + let f3 = Fq::from_str( + "17018926051730832095053393285350575966874590491719897015583930476179087429684", + ) + .unwrap(); + assert!(!f1.is_zero()); + assert!(!f3.is_zero()); + f1.square_in_place(); + assert_eq!(f1, f3); +} + +#[test] +fn test_fq_sqrt() { + let f1 = Fq::from_str( + "5830207146824777307592559303161432403393380070279905260050870500920682305217", + ) + .unwrap(); + let f3 = Fq::from_str( + "2108183130040740552565127577293974960058698876185401671087892009247563211475", + ) + .unwrap(); + assert_eq!(f1.sqrt().unwrap(), f3); +} + +#[test] +fn test_fq_from_str() { + let f1_from_repr = Fq::from(BigInteger([ + 0xab8a2535947d1a77, + 0x9ba74cbfda0bbcda, + 0xe928b59724d60baf, + 0x1cccaaeb9bb1680a, + ])); + let f1 = Fq::from_str( + "13026376210409056429264774981357153555336288129100724591327877625017068755575", + ) + .unwrap(); + let f2_from_repr = Fq::from(BigInteger([ + 0x97e9103775d2f35c, + 0xbe6756b6c587544b, + 0x6ee38c3afd88ef4b, + 0x2bacd150f540c677, + ])); + let f2 = Fq::from_str( + "19754794831832707859764530223239420866832328728734160755396495950822165902172", + ) + .unwrap(); + assert_eq!(f1_from_repr, f1); + assert_eq!(f2_from_repr, f2); +} + +#[test] +fn test_fq_legendre() { + assert_eq!(QuadraticResidue, Fq::one().legendre()); + assert_eq!(Zero, Fq::zero().legendre()); + + let e = BigInteger([ + 0x2e8de1a676c03be8, + 0x73350d34fe25a560, + 0x7ea085919029688e, + 0x1d0868cb993cf28, + ]); + assert_eq!(QuadraticResidue, Fq::from(e).legendre()); + let e = BigInteger([ + 0x891d8cc23c8d0706, + 0xe91800e007db2698, + 0xfff380321e9ac7a7, + 0x2659e28bd17eab6, + ]); + assert_eq!(QuadraticNonResidue, Fq::from(e).legendre()); +} + +#[test] +fn test_fq_bytes() { + let f1_from_repr = Fq::from(BigInteger([ + 0xab8a2535947d1a77, + 0x9ba74cbfda0bbcda, + 0xe928b59724d60baf, + 0x1cccaaeb9bb1680a, + ])); + + let mut f1_bytes = [0u8; 32]; + f1_from_repr.write(f1_bytes.as_mut()).unwrap(); + + let f1 = Fq::read(f1_bytes.as_ref()).unwrap(); + assert_eq!(f1_from_repr, f1); +} + +#[test] +fn test_fr_add() { + let f1 = Fr::from(BigInteger([ + 0xccfc9a195e0f5c46, + 0xaed4874d13fb1285, + 0x27368f86ca2848eb, + 0x4f8adcfeb44fccc, + ])); + let f2 = Fr::from(BigInteger([ + 0x661ff05bf8570851, + 0x1b171f4c59be97ef, + 0x5d2ce7f9b4d701f3, + 0x1e0e794623e0f68, + ])); + let f3 = Fr::from(BigInteger([ + 0xcba9f2991d453da6, + 0x1eacb8e13498bc6a, + 0x4d596ec9aecf1fd3, + 0xcd0b95f15cd82f, + ])); + assert_eq!(f1 + &f2, f3); +} + +#[test] +fn test_fr_mul() { + let f1 = Fr::from(BigInteger([ + 0xc2964d2dd5fb980f, + 0xbab64d599c57e496, + 0x39cae13e7d1d4f78, + 0x1aa995aa4de205c, + ])); + let f2 = Fr::from(BigInteger([ + 0xc256e720cd43533b, + 0x3bfbadf6247e13bb, + 0x94c3d63a53714f63, + 0x10f8a7bf74efd57, + ])); + let f3 = Fr::from(BigInteger([ + 0x5eac88be41e0e1fd, + 0x57aab36675b11e24, + 0x835582d896b4d13f, + 0x4808736e213036e, + ])); + assert_eq!(f1 * &f2, f3); +} +#[test] +fn test_fr_bytes() { + let f1_from_repr = Fr::from(BigInteger([ + 0xc81265fb4130fe0c, + 0xb308836c14e22279, + 0x699e887f96bff372, + 0x84ecc7e76c11ad, + ])); + + let mut f1_bytes = [0u8; 32]; + f1_from_repr.write(f1_bytes.as_mut()).unwrap(); + + let f1 = Fr::read(f1_bytes.as_ref()).unwrap(); + assert_eq!(f1_from_repr, f1); +} + +#[test] +fn test_fr_from_str() { + let f100_from_repr = Fr::from(BigInteger([0x64, 0, 0, 0])); + let f100 = Fr::from_str("100").unwrap(); + assert_eq!(f100_from_repr, f100); +} diff --git a/arkworks/curves/ed_on_bn254/src/lib.rs b/arkworks/curves/ed_on_bn254/src/lib.rs new file mode 100644 index 00000000..37f5a3fb --- /dev/null +++ b/arkworks/curves/ed_on_bn254/src/lib.rs @@ -0,0 +1,31 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements a twisted Edwards curve whose base field is the scalar field of the +//! curve BN254. This allows defining cryptographic primitives that use elliptic curves over +//! the scalar field of the latter curve. This curve is also known as [Baby-Jubjub](https://github.com/barryWhiteHat/baby_jubjub). +//! +//! Curve information: +//! * Base field: q = 21888242871839275222246405745257275088548364400416034343698204186575808495617 +//! * Scalar field: r = 2736030358979909402780800718157159386076813972158567259200215660948447373041 +//! * Valuation(q - 1, 2) = 28 +//! * Valuation(r - 1, 2) = 4 +//! * Curve equation: ax^2 + y^2 =1 + dx^2y^2, where +//! * a = 1 +//! * d = 168696/168700 mod q +//! = 9706598848417545097372247223557719406784115219466060233080913168975159366771 + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_bw6_761/Cargo.toml b/arkworks/curves/ed_on_bw6_761/Cargo.toml new file mode 100644 index 00000000..b641a121 --- /dev/null +++ b/arkworks/curves/ed_on_bw6_761/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "ark-ed-on-bw6-761" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A Twisted Edwards curve defined over the scalar field of the BW6-761 curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ed-on-bw6-761/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ed-on-cp6-782 = { version = "^0.3.0", path = "../ed_on_cp6_782", default-features = false } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false } +ark-ff = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } + +[features] +default = [] +std = [ "ark-ed-on-cp6-782/std" ] +r1cs = [ "ark-ed-on-cp6-782/r1cs" ] \ No newline at end of file diff --git a/arkworks/curves/ed_on_bw6_761/src/constraints/mod.rs b/arkworks/curves/ed_on_bw6_761/src/constraints/mod.rs new file mode 100644 index 00000000..bc3d0ec6 --- /dev/null +++ b/arkworks/curves/ed_on_bw6_761/src/constraints/mod.rs @@ -0,0 +1,103 @@ +//! This module implements the R1CS equivalent of `ark_ed_on_bw6_761`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_ed_on_bw6_761::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `EdwardsVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_ed_on_bw6_761::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Edwards` elements. +//! let a_native = EdwardsProjective::rand(&mut rng); +//! let b_native = EdwardsProjective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = EdwardsVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = EdwardsVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `Edwards`. +//! let zero = EdwardsVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +pub use ark_ed_on_cp6_782::constraints::*; diff --git a/arkworks/curves/ed_on_bw6_761/src/lib.rs b/arkworks/curves/ed_on_bw6_761/src/lib.rs new file mode 100644 index 00000000..1b9c6046 --- /dev/null +++ b/arkworks/curves/ed_on_bw6_761/src/lib.rs @@ -0,0 +1,21 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![forbid(unsafe_code)] +//! This library implements a twisted Edwards curve whose base field is the scalar field of the +//! curve BW6_761. *It is the same curve as that in `ark-ed_on_cp6_782`.* +//! This allows defining cryptographic primitives that use elliptic curves over the scalar field of +//! the latter curve. This curve was generated as part of the paper +//! [\[BCGMMW20, “Zexe”\]](https://eprint.iacr.org/2018/962). +//! +//! Curve information: +//! * Base field: q = 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 +//! * Scalar field: r = 32333053251621136751331591711861691692049189094364332567435817881934511297123972799646723302813083835942624121493 +//! * Valuation(q - 1, 2) = 46 +//! * Valuation(r - 1, 2) = 2 +//! * Curve equation: ax^2 + y^2 =1 + dx^2y^2, where +//! * a = -1 +//! * d = 79743 + +pub use ark_ed_on_cp6_782::*; + +#[cfg(feature = "r1cs")] +pub mod constraints; diff --git a/arkworks/curves/ed_on_cp6_782/Cargo.toml b/arkworks/curves/ed_on_cp6_782/Cargo.toml new file mode 100644 index 00000000..75a129c2 --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-ed-on-cp6-782" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A Twisted Edwards curve defined over the scalar field of the CP6-782 curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ed-on-cp6-782/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-bls12-377 = { version = "^0.3.0", path = "../bls12_377", default-features = false, features = [ "base_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-bls12-377/std" ] +r1cs = ["ark-r1cs-std"] \ No newline at end of file diff --git a/arkworks/curves/ed_on_cp6_782/src/constraints/curves.rs b/arkworks/curves/ed_on_cp6_782/src/constraints/curves.rs new file mode 100644 index 00000000..4b15106d --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::twisted_edwards::AffineVar; + +use crate::constraints::FqVar; + +/// A variable that is the R1CS equivalent of `crate::EdwardsAffine`. +pub type EdwardsVar = AffineVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::te_test::().unwrap(); +} diff --git a/arkworks/curves/ed_on_cp6_782/src/constraints/fields.rs b/arkworks/curves/ed_on_cp6_782/src/constraints/fields.rs new file mode 100644 index 00000000..f25fa336 --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/constraints/fields.rs @@ -0,0 +1,10 @@ +use crate::fq::Fq; +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FqVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_cp6_782/src/constraints/mod.rs b/arkworks/curves/ed_on_cp6_782/src/constraints/mod.rs new file mode 100644 index 00000000..e97a180b --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/constraints/mod.rs @@ -0,0 +1,108 @@ +#![allow(unreachable_pub)] +//! This module implements the R1CS equivalent of `ark_ed_on_cp6_782`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_ed_on_cp6_782::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `EdwardsVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_ed_on_cp6_782::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Edwards` elements. +//! let a_native = EdwardsProjective::rand(&mut rng); +//! let b_native = EdwardsProjective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = EdwardsVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = EdwardsVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `Edwards`. +//! let zero = EdwardsVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_cp6_782/src/curves/mod.rs b/arkworks/curves/ed_on_cp6_782/src/curves/mod.rs new file mode 100644 index 00000000..4ae769b2 --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/curves/mod.rs @@ -0,0 +1,71 @@ +use ark_ec::{ + models::{ModelParameters, MontgomeryModelParameters, TEModelParameters}, + twisted_edwards_extended::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +use crate::{fq::Fq, fr::Fr}; + +#[cfg(test)] +mod tests; + +pub type EdwardsAffine = GroupAffine; +pub type EdwardsProjective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct EdwardsParameters; + +impl ModelParameters for EdwardsParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl TEModelParameters for EdwardsParameters { + /// COEFF_A = -1 = + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "-1"); + + /// COEFF_D = 79743 + #[rustfmt::skip] + const COEFF_D: Fq = field_new!(Fq, "79743"); + + /// COFACTOR = 8 + const COFACTOR: &'static [u64] = &[8]; + + /// COFACTOR^(-1) mod r = + /// 12124894969357926281749346891948134384518445910386624712788431705725441736421489799867521238554906438478484045560 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "12124894969357926281749346891948134384518445910386624712788431705725441736421489799867521238554906438478484045560"); + + /// AFFINE_GENERATOR_COEFFS = (GENERATOR_X, GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = (GENERATOR_X, GENERATOR_Y); + + type MontgomeryModelParameters = EdwardsParameters; + + /// Multiplication by `a` is just negation. + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + -*elem + } +} + +impl MontgomeryModelParameters for EdwardsParameters { + /// COEFF_A = 0x95D53EB3F6AC3F7A53C26020144439DC6073BCAE513E03FD06B6B3BAA390F25E51534B26719E33F4CD906D4DA9B535 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "90083623084271891037116870487743067984710080209539149685414147055329063590616489392386084989619674926965747987765"); + /// COEFF_B = 0x118650763CE64AB4BE743604C8D05013DC2663652A3D58B21ECAB7BFF65B70DB8BA09F9098E61CC903B2F92B2564ACA + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "168580802928697202973535863207150465551683432545375510854470115611391404757724333382582803149953685197474573470410"); + + type TEModelParameters = EdwardsParameters; +} + +/// GENERATOR_X = +/// 174701772324485506941690903512423551998294352968833659960042362742684869862495746426366187462669992073196420267127 +#[rustfmt::skip] +const GENERATOR_X: Fq = field_new!(Fq, "174701772324485506941690903512423551998294352968833659960042362742684869862495746426366187462669992073196420267127"); + +/// GENERATOR_Y = +/// 208487200052258845495340374451540775445408439654930191324011635560142523886549663106522691296420655144190624954833 +#[rustfmt::skip] +const GENERATOR_Y: Fq = field_new!(Fq, "208487200052258845495340374451540775445408439654930191324011635560142523886549663106522691296420655144190624954833"); diff --git a/arkworks/curves/ed_on_cp6_782/src/curves/tests.rs b/arkworks/curves/ed_on_cp6_782/src/curves/tests.rs new file mode 100644 index 00000000..58db359a --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/curves/tests.rs @@ -0,0 +1,62 @@ +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + + edwards_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a = rng.gen(); + let b = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_affine_group() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_generator() { + let generator = EdwardsAffine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_conversion() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + let a_b = { + use ark_ec::group::Group; + (a + &b).double().double() + }; + let a_b2 = (a.into_projective() + &b.into_projective()) + .double() + .double(); + assert_eq!(a_b, a_b2.into_affine()); + assert_eq!(a_b.into_projective(), a_b2); +} + +#[test] +fn test_montgomery_conversion() { + montgomery_conversion_test::(); +} diff --git a/arkworks/curves/ed_on_cp6_782/src/fields/fq.rs b/arkworks/curves/ed_on_cp6_782/src/fields/fq.rs new file mode 100644 index 00000000..4361a413 --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_bls12_377::{Fq, FqParameters}; diff --git a/arkworks/curves/ed_on_cp6_782/src/fields/fr.rs b/arkworks/curves/ed_on_cp6_782/src/fields/fr.rs new file mode 100644 index 00000000..b10bbecd --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/fields/fr.rs @@ -0,0 +1,106 @@ +use ark_ff::{ + biginteger::BigInteger384 as BigInteger, + fields::{FftParameters, Fp384, Fp384Parameters, FpParameters}, +}; + +pub type Fr = Fp384; + +pub struct FrParameters; + +impl Fp384Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 2u32; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 12119792640622387781u64, + 8318439284650634613u64, + 6931324077796168275u64, + 12851391603681523141u64, + 6881015057611215092u64, + 1893962574900431u64, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 32333053251621136751331591711861691692049189094364332567435817881934511297123972799646723302813083835942624121493 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 4684667634276979349u64, + 3748803659444032385u64, + 16273581227874629698u64, + 7152942431629910641u64, + 6397188139321141543u64, + 15137289088311837u64, + ]); + + const MODULUS_BITS: u32 = 374; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 10; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 12565484300600153878u64, + 8749673077137355528u64, + 9027943686469014788u64, + 13026065139386752555u64, + 11197589485989933721u64, + 9525964145733727u64, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 17257035094703902127u64, + 16096159112880350050u64, + 3498553494623421763u64, + 333405339929360058u64, + 1125865524035793947u64, + 1586246138566285u64, + ]); + + const INV: u64 = 16242011933465909059u64; + + // 2 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 1999556893213776791u64, + 13750542494830678672u64, + 1782306145063399878u64, + 452443773434042853u64, + 15997990832658725900u64, + 3914639203155617u64, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 11565705853993265482u64, + 1874401829722016192u64, + 17360162650792090657u64, + 12799843252669731128u64, + 12421966106515346579u64, + 7568644544155918u64, + ]); + + /// 8083263312905284187832897927965422923012297273591083141858954470483627824280993199911680825703270958985656030373 + const T: BigInteger = BigInteger([ + 5782852926996632741, + 10160572951715783904, + 8680081325396045328, + 15623293663189641372, + 6210983053257673289, + 3784322272077959, + ]); + + /// 4041631656452642093916448963982711461506148636795541570929477235241813912140496599955840412851635479492828015186 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 2891426463498316370, + 5080286475857891952, + 4340040662698022664, + 17035018868449596494, + 12328863563483612452, + 1892161136038979, + ]); +} diff --git a/arkworks/curves/ed_on_cp6_782/src/fields/mod.rs b/arkworks/curves/ed_on_cp6_782/src/fields/mod.rs new file mode 100644 index 00000000..208333fb --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/fields/mod.rs @@ -0,0 +1,8 @@ +pub mod fq; +pub mod fr; + +pub use fq::*; +pub use fr::*; + +#[cfg(all(feature = "ed_on_cp6_782", test))] +mod tests; diff --git a/arkworks/curves/ed_on_cp6_782/src/fields/tests.rs b/arkworks/curves/ed_on_cp6_782/src/fields/tests.rs new file mode 100644 index 00000000..173b5ee2 --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/fields/tests.rs @@ -0,0 +1,24 @@ +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::{Fq, Fr}; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); +} diff --git a/arkworks/curves/ed_on_cp6_782/src/lib.rs b/arkworks/curves/ed_on_cp6_782/src/lib.rs new file mode 100644 index 00000000..2879a940 --- /dev/null +++ b/arkworks/curves/ed_on_cp6_782/src/lib.rs @@ -0,0 +1,30 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements a twisted Edwards curve whose base field is the scalar field of the curve CP6. +//! This allows defining cryptographic primitives that use elliptic curves over the scalar field of the latter curve. +//! This curve was generated as part of the paper [\[BCGMMW20, “Zexe”\]](https://eprint.iacr.org/2018/962). +//! +//! Curve information: +//! * Base field: q = 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 +//! * Scalar field: r = 32333053251621136751331591711861691692049189094364332567435817881934511297123972799646723302813083835942624121493 +//! * Valuation(q - 1, 2) = 46 +//! * Valuation(r - 1, 2) = 2 +//! * Curve equation: ax^2 + y^2 =1 + dx^2y^2, where +//! * a = -1 +//! * d = 79743 + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_mnt4_298/Cargo.toml b/arkworks/curves/ed_on_mnt4_298/Cargo.toml new file mode 100644 index 00000000..b923bbb3 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-ed-on-mnt4-298" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A Twisted Edwards curve defined over the scalar field of the MNT4-298 curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ed-on-mnt4-298/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-mnt4-298 = { version = "^0.3.0", path = "../mnt4_298", default-features = false, features = [ "scalar_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-mnt4-298/std" ] +r1cs = ["ark-r1cs-std"] \ No newline at end of file diff --git a/arkworks/curves/ed_on_mnt4_298/src/constraints/curves.rs b/arkworks/curves/ed_on_mnt4_298/src/constraints/curves.rs new file mode 100644 index 00000000..3ceded42 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::twisted_edwards::AffineVar; + +use crate::constraints::fields::FqVar; + +/// A variable that is the R1CS equivalent of `crate::EdwardsAffine`. +pub type EdwardsVar = AffineVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::te_test::().unwrap(); +} diff --git a/arkworks/curves/ed_on_mnt4_298/src/constraints/fields.rs b/arkworks/curves/ed_on_mnt4_298/src/constraints/fields.rs new file mode 100644 index 00000000..f25fa336 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/constraints/fields.rs @@ -0,0 +1,10 @@ +use crate::fq::Fq; +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FqVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_mnt4_298/src/constraints/mod.rs b/arkworks/curves/ed_on_mnt4_298/src/constraints/mod.rs new file mode 100644 index 00000000..ed8b2b7c --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/constraints/mod.rs @@ -0,0 +1,107 @@ +//! This module implements the R1CS equivalent of `ark_ed_on_mnt4_298`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_ed_on_mnt4_298::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `EdwardsVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_ed_on_mnt4_298::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Edwards` elements. +//! let a_native = EdwardsProjective::rand(&mut rng); +//! let b_native = EdwardsProjective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = EdwardsVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = EdwardsVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `Edwards`. +//! let zero = EdwardsVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_mnt4_298/src/curves/mod.rs b/arkworks/curves/ed_on_mnt4_298/src/curves/mod.rs new file mode 100644 index 00000000..ab12d935 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/curves/mod.rs @@ -0,0 +1,82 @@ +use ark_ec::{ + models::{ModelParameters, MontgomeryModelParameters, TEModelParameters}, + twisted_edwards_extended::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +use crate::{fq::Fq, fr::Fr}; + +#[cfg(test)] +mod tests; + +pub type EdwardsAffine = GroupAffine; +pub type EdwardsProjective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct EdwardsParameters; + +impl ModelParameters for EdwardsParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +// Many parameters need to be written down in the Montgomery residue form, +// discussed below. Some useful numbers: +// R for Fq: 223364648326281414938801705359223029554923725549792420683051274872200260503540791531766876 +// R for Fr: 104384076783966083500464392945960916666734135485183910065100558776489954102951241798239545 + +impl TEModelParameters for EdwardsParameters { + /// COEFF_A = -1 + /// Needs to be in the Montgomery residue form in Fq + /// I.e., -1 * R for Fq + /// = 252557637842979910814547544293825421990201153003031094870216460866964386803867699028196261 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "-1"); + + /// COEFF_D = 4212 + /// Needs to be in the Montgomery residue form in Fq + /// I.e., 4212 * R for Fq + /// = 389461279836940033614665658623660232171971995346409183754923941118154161474636585314923000 + #[rustfmt::skip] + const COEFF_D: Fq = field_new!(Fq, "4212"); + + /// COFACTOR = 4 + const COFACTOR: &'static [u64] = &[4]; + + /// COFACTOR_INV (mod r) = + /// 29745142885578832859584328103315528221570304936126890280067991221921526670592508030983158 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "29745142885578832859584328103315528221570304936126890280067991221921526670592508030983158"); + + /// Generated randomly + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = (GENERATOR_X, GENERATOR_Y); + + type MontgomeryModelParameters = EdwardsParameters; + + /// Multiplication by `a` is just negation. + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + -*elem + } +} + +impl MontgomeryModelParameters for EdwardsParameters { + /// COEFF_A = 203563247015667910991582090642011229452721346107806307863040223071914240315202967004285204 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "203563247015667910991582090642011229452721346107806307863040223071914240315202967004285204"); + /// COEFF_B = 272359039153593414761767159011037222092403532445017207690227512667250406992205523555677931 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "272359039153593414761767159011037222092403532445017207690227512667250406992205523555677931"); + + type TEModelParameters = EdwardsParameters; +} + +/// GENERATOR_X = +/// 282406820114868156776872298252698015906762052916420164316497572033519876761239463633892227 +#[rustfmt::skip] +const GENERATOR_X: Fq = field_new!(Fq, "282406820114868156776872298252698015906762052916420164316497572033519876761239463633892227"); + +/// GENERATOR_Y = +/// 452667754940241021433619311795265643711152068500301853535337412655162600774122192283142703 +#[rustfmt::skip] +const GENERATOR_Y: Fq = field_new!(Fq, "452667754940241021433619311795265643711152068500301853535337412655162600774122192283142703"); diff --git a/arkworks/curves/ed_on_mnt4_298/src/curves/tests.rs b/arkworks/curves/ed_on_mnt4_298/src/curves/tests.rs new file mode 100644 index 00000000..dae0ba25 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/curves/tests.rs @@ -0,0 +1,63 @@ +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + + edwards_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a = rng.gen(); + let b = rng.gen(); + + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_affine_group() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_generator() { + let generator = EdwardsAffine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_conversion() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + let a_b = { + use ark_ec::group::Group; + (a + &b).double().double() + }; + let a_b2 = (a.into_projective() + &b.into_projective()) + .double() + .double(); + assert_eq!(a_b, a_b2.into_affine()); + assert_eq!(a_b.into_projective(), a_b2); +} + +#[test] +fn test_montgomery_conversion() { + montgomery_conversion_test::(); +} diff --git a/arkworks/curves/ed_on_mnt4_298/src/fields/fq.rs b/arkworks/curves/ed_on_mnt4_298/src/fields/fq.rs new file mode 100644 index 00000000..f587b1ab --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_mnt4_298::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/ed_on_mnt4_298/src/fields/fr.rs b/arkworks/curves/ed_on_mnt4_298/src/fields/fr.rs new file mode 100644 index 00000000..6d3c5eac --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/fields/fr.rs @@ -0,0 +1,127 @@ +use ark_ff::{ + biginteger::BigInteger320 as BigInteger, + fields::{FftParameters, Fp320, Fp320Parameters, FpParameters}, +}; + +pub type Fr = Fp320; + +pub struct FrParameters; + +impl Fp320Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 1u32; + + // ROOT_OF_UNITY = GENERATOR ^ t = + // 118980571542315331438337312413262112886281219744507561120271964887686106682370032123932630 + // t is defined below + // This number needs to be in the Montgomery residue form. + // I.e., write + // 118980571542315331438337312413262112886281219744507561120271964887686106682370032123932630 + // * R + // = 14596494758349247937872919467301196219547084259323651055171406111196152579418790325693086 + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 4913018085921565342u64, + 18164325898792356216u64, + 11499902056485864693u64, + 12113224729248979119u64, + 126057789046u64, + ]); +} +impl FpParameters for FrParameters { + // MODULUS = 118980571542315331438337312413262112886281219744507561120271964887686106682370032123932631 + // Factors of MODULUS - 1: + // 2 + // 5 + // 17 + // 47 + // 3645289 + // 42373926857 + // 96404785755712297250936212793128201320333033128042968811755970858369 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 15535567651727634391u64, + 14992835038329117496u64, + 12879083654034347181u64, + 16760578290609820963u64, + 1027536270620u64, + ]); + + const MODULUS_BITS: u32 = 296; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 24; + + // see ark-ff/src/fields/mod.rs for more information + // R = pow(2,320) % + // 118980571542315331438337312413262112886281219744507561120271964887686106682370032123932631 + // R = 104384076783966083500464392945960916666734135485183910065100558776489954102951241798239545 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 10622549565806069049u64, + 15275253213246312896u64, + 1379181597548482487u64, + 4647353561360841844u64, + 901478481574u64 + ]); + + // R2 = R * R % + // 118980571542315331438337312413262112886281219744507561120271964887686106682370032123932631 + // R2 = 64940318866745953005690402896764745514897573584912026577721076893188083397226247459368768 + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 16858329796171722560u64, + 12060416575249219689u64, + 17034911964548502611u64, + 14718631438675169669u64, + 560835539754u64 + ]); + + // INV = -(118980571542315331438337312413262112886281219744507561120271964887686106682370032123932631)^(-1) % 2^64 + const INV: u64 = 9223688842165816345u64; + + // GENERATOR = 7 + // This number needs to be in the Montgomery residue form. + // I.e., write 7 * R = + // 16805108233870595873226876142153739349451629929242003734072122109313038626438499844081029 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 18037929197695780229u64, + 16969762262749485294u64, + 6166745553471500787u64, + 5754981480705173590u64, + 145131747294u64, + ]); + + // (n-1)/2 = 59490285771157665719168656206631056443140609872253780560135982443843053341185016061966315 + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 7767783825863817195u64, + 16719789556019334556u64, + 15662913863871949398u64, + 8380289145304910481u64, + 513768135310u64, + ]); + + // t = (n - 1) / 2^{TWO_ADICITY} = + // 59490285771157665719168656206631056443140609872253780560135982443843053341185016061966315 + const T: BigInteger = BigInteger([ + 7767783825863817195u64, + 16719789556019334556u64, + 15662913863871949398u64, + 8380289145304910481u64, + 513768135310u64, + ]); + + // (t-1)/2 = 29745142885578832859584328103315528221570304936126890280067991221921526670592508030983157 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 3883891912931908597u64, + 8359894778009667278u64, + 17054828968790750507u64, + 4190144572652455240u64, + 256884067655u64, + ]); +} diff --git a/arkworks/curves/ed_on_mnt4_298/src/fields/mod.rs b/arkworks/curves/ed_on_mnt4_298/src/fields/mod.rs new file mode 100644 index 00000000..2b2e27c2 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/fields/mod.rs @@ -0,0 +1,8 @@ +pub mod fq; +pub mod fr; + +pub use fq::*; +pub use fr::*; + +#[cfg(all(feature = "ed_on_mnt4_298", test))] +mod tests; diff --git a/arkworks/curves/ed_on_mnt4_298/src/fields/tests.rs b/arkworks/curves/ed_on_mnt4_298/src/fields/tests.rs new file mode 100644 index 00000000..cc0fbb1d --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/fields/tests.rs @@ -0,0 +1,24 @@ +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::fields::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); +} diff --git a/arkworks/curves/ed_on_mnt4_298/src/lib.rs b/arkworks/curves/ed_on_mnt4_298/src/lib.rs new file mode 100644 index 00000000..8fabdf0e --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/lib.rs @@ -0,0 +1,30 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements a twisted Edwards curve whose base field is the scalar field of the +//! curve MNT4-298. This allows defining cryptographic primitives that use elliptic curves over +//! the scalar field of the latter curve. +//! +//! Curve information: +//! * Base field: q = 475922286169261325753349249653048451545124878552823515553267735739164647307408490559963137 +//! * Scalar field: r = 118980571542315331438337312413262112886281219744507561120271964887686106682370032123932631 +//! * Valuation(q - 1, 2) = 30 +//! * Valuation(r - 1, 2) = 1 +//! * Curve equation: ax^2 + y^2 =1 + dx^2y^2, where +//! * a = -1 +//! * d = 4212 mod q + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_mnt4_298/src/mod.rs b/arkworks/curves/ed_on_mnt4_298/src/mod.rs new file mode 100644 index 00000000..cd5e7581 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_298/src/mod.rs @@ -0,0 +1,5 @@ +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_mnt4_753/Cargo.toml b/arkworks/curves/ed_on_mnt4_753/Cargo.toml new file mode 100644 index 00000000..25b063c7 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-ed-on-mnt4-753" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A Twisted Edwards curve defined over the scalar field of the MNT4-753 curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-ed-on-mnt4-753/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-mnt4-753 = { version = "^0.3.0", path = "../mnt4_753", default-features = false, features = [ "scalar_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-mnt4-753/std" ] +r1cs = ["ark-r1cs-std"] diff --git a/arkworks/curves/ed_on_mnt4_753/src/constraints/curves.rs b/arkworks/curves/ed_on_mnt4_753/src/constraints/curves.rs new file mode 100644 index 00000000..3ceded42 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::twisted_edwards::AffineVar; + +use crate::constraints::fields::FqVar; + +/// A variable that is the R1CS equivalent of `crate::EdwardsAffine`. +pub type EdwardsVar = AffineVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::te_test::().unwrap(); +} diff --git a/arkworks/curves/ed_on_mnt4_753/src/constraints/fields.rs b/arkworks/curves/ed_on_mnt4_753/src/constraints/fields.rs new file mode 100644 index 00000000..f25fa336 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/constraints/fields.rs @@ -0,0 +1,10 @@ +use crate::fq::Fq; +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FqVar>().unwrap(); +} diff --git a/arkworks/curves/ed_on_mnt4_753/src/constraints/mod.rs b/arkworks/curves/ed_on_mnt4_753/src/constraints/mod.rs new file mode 100644 index 00000000..f38d437a --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/constraints/mod.rs @@ -0,0 +1,107 @@ +//! This module implements the R1CS equivalent of `ark_ed_on_mnt4_753`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_ed_on_mnt4_753::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `EdwardsVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_ed_on_mnt4_753::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Edwards` elements. +//! let a_native = EdwardsProjective::rand(&mut rng); +//! let b_native = EdwardsProjective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = EdwardsVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = EdwardsVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = EdwardsVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `Edwards`. +//! let zero = EdwardsVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/ed_on_mnt4_753/src/curves/mod.rs b/arkworks/curves/ed_on_mnt4_753/src/curves/mod.rs new file mode 100644 index 00000000..bdcf8fe6 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/curves/mod.rs @@ -0,0 +1,72 @@ +use ark_ec::{ + models::{ModelParameters, MontgomeryModelParameters, TEModelParameters}, + twisted_edwards_extended::{GroupAffine, GroupProjective}, +}; +use ark_ff::field_new; + +use crate::{fq::Fq, fr::Fr}; + +#[cfg(test)] +mod tests; + +pub type EdwardsAffine = GroupAffine; +pub type EdwardsProjective = GroupProjective; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct EdwardsParameters; + +impl ModelParameters for EdwardsParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl TEModelParameters for EdwardsParameters { + /// COEFF_A = -1 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "-1"); + + /// COEFF_D = 317690 + #[rustfmt::skip] + const COEFF_D: Fq = field_new!(Fq, "317690"); + + /// COFACTOR = 8 + const COFACTOR: &'static [u64] = &[8]; + + /// COFACTOR_INV (mod r) = + /// 4582647449616135528381398492791944685893671397494963179726320631987147963874964803303316505414568319530101512550297775574042810022553679071007001162683923594233560231270043634777390699589793776691858866199511300853468155295505 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "4582647449616135528381398492791944685893671397494963179726320631987147963874964803303316505414568319530101512550297775574042810022553679071007001162683923594233560231270043634777390699589793776691858866199511300853468155295505"); + + /// Generated randomly + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = (GENERATOR_X, GENERATOR_Y); + + type MontgomeryModelParameters = EdwardsParameters; + + /// Multiplication by `a` is just negation. + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + -*elem + } +} + +impl MontgomeryModelParameters for EdwardsParameters { + /// COEFF_A = 40212480635445336270302172549278415015971955924352275480357619589919378421241453024646804979794897776496091377551124233752850182852486874251193367187677349266115879541798515219680194853352256809837126277708211496794264654247419 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "40212480635445336270302172549278415015971955924352275480357619589919378421241453024646804979794897776496091377551124233752850182852486874251193367187677349266115879541798515219680194853352256809837126277708211496794264654247419"); + + /// COEFF_B = 1686010332473617132042042241962222112198753995601673591425883331105974391329653748412088783995441144921979594337334243570322874639106980818502874667119046899605536783551549221790223284494141659774809441351696667426519821912580 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "1686010332473617132042042241962222112198753995601673591425883331105974391329653748412088783995441144921979594337334243570322874639106980818502874667119046899605536783551549221790223284494141659774809441351696667426519821912580"); + + type TEModelParameters = EdwardsParameters; +} + +/// GENERATOR_X = +/// 41126137307536311801428235632419266329480236393691483739251051053325519918069469184425962602019877935619960143044210127218431046103600632347238890180171944971817510488009355627861577881883236134824745174469522277738875418206826 +#[rustfmt::skip] +const GENERATOR_X: Fq = field_new!(Fq, "41126137307536311801428235632419266329480236393691483739251051053325519918069469184425962602019877935619960143044210127218431046103600632347238890180171944971817510488009355627861577881883236134824745174469522277738875418206826"); + +/// GENERATOR_Y = +/// 18249602579663240810999977712212098844157230095713722119136881953011435881503578209163288529034825612841855863913294174196656077002578342108932925693640046298989762289691399012056048139253937882385653600831389370198228562812681 +#[rustfmt::skip] +const GENERATOR_Y: Fq = field_new!(Fq, "18249602579663240810999977712212098844157230095713722119136881953011435881503578209163288529034825612841855863913294174196656077002578342108932925693640046298989762289691399012056048139253937882385653600831389370198228562812681"); diff --git a/arkworks/curves/ed_on_mnt4_753/src/curves/tests.rs b/arkworks/curves/ed_on_mnt4_753/src/curves/tests.rs new file mode 100644 index 00000000..dae0ba25 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/curves/tests.rs @@ -0,0 +1,63 @@ +use ark_ec::{AffineCurve, ProjectiveCurve}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + + edwards_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a = rng.gen(); + let b = rng.gen(); + + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_affine_group() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + for _i in 0..100 { + group_test::(a, b); + } +} + +#[test] +fn test_generator() { + let generator = EdwardsAffine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_conversion() { + let mut rng = test_rng(); + let a: EdwardsAffine = rng.gen(); + let b: EdwardsAffine = rng.gen(); + let a_b = { + use ark_ec::group::Group; + (a + &b).double().double() + }; + let a_b2 = (a.into_projective() + &b.into_projective()) + .double() + .double(); + assert_eq!(a_b, a_b2.into_affine()); + assert_eq!(a_b.into_projective(), a_b2); +} + +#[test] +fn test_montgomery_conversion() { + montgomery_conversion_test::(); +} diff --git a/arkworks/curves/ed_on_mnt4_753/src/fields/fq.rs b/arkworks/curves/ed_on_mnt4_753/src/fields/fq.rs new file mode 100644 index 00000000..33f3df1e --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_mnt4_753::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/ed_on_mnt4_753/src/fields/fr.rs b/arkworks/curves/ed_on_mnt4_753/src/fields/fr.rs new file mode 100644 index 00000000..98e7871a --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/fields/fr.rs @@ -0,0 +1,182 @@ +use ark_ff::{ + biginteger::BigInteger768 as BigInteger, + fields::{FftParameters, Fp768, Fp768Parameters, FpParameters}, +}; + +pub type Fr = Fp768; + +pub struct FrParameters; + +impl Fp768Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 7u32; + + // ROOT_OF_UNITY = GENERATOR ^ t = + // 5051348772165646558710889803432238607797392809516000790038615454406641638798360636639094562941749878118669420392611632754442159525900729019616828636182878045303562497793780656635901271279409699078868658041674335385318499053954 + // t is defined below + // This number needs to be in the Montgomery residue form. + // I.e., write + // 5051348772165646558710889803432238607797392809516000790038615454406641638798360636639094562941749878118669420392611632754442159525900729019616828636182878045303562497793780656635901271279409699078868658041674335385318499053954 + // * R + // = 3163945077843586747114473523156080008349200300253316071422414259389979351386670787753361998953450578171951209600907861296956453653582402723399808696724060539858637307706671971132333536614595846054039300191656599533885935499352 + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 17630237153019476056u64, + 13843632041501582123u64, + 8277579215948731020u64, + 16543319700733887487u64, + 17904443422768964556u64, + 4398189354108552378u64, + 15178824470536352826u64, + 5393472405610595666u64, + 9815530206026813666u64, + 9111703519527971535u64, + 9880873531994141485u64, + 37593433148184u64, + ]); +} +impl FpParameters for FrParameters { + // MODULUS = 5237311370989869175293026848905079641021338739994243633972937865128169101571388346632361720473792365177258871486054600656048925740061347509722287043067341250552640264308621296888446513816907173362124418513727200975392177480577 + // Factors of MODULUS - 1: + // 2^7 + // 3 + // 67 + // 193189 + // 5324381 + // 20502324317011 + // 12991385268608969143 + // 743005941432538001939136029613828619428586060274612824031793373798492678674419102414979927623550862639644071557313558044209469997283394306590808303316688123808776073253386140931 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 1918157353135465345u64, + 963476667289301255u64, + 6719983938249379016u64, + 3655897403342429413u64, + 14998997414201165002u64, + 13135040821375901270u64, + 12914675130452106995u64, + 6989506515121216945u64, + 12382362535852178190u64, + 13186314214759855613u64, + 2451174275904461237u64, + 62228802984066u64, + ]); + + const MODULUS_BITS: u32 = 750; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 18; + + // see ark_ff/src/fields/mod.rs for more information + // R = pow(2,768) % MODULUS + // R = 933352698056040166367534174176950366489065242993745918174914647273231163953185260894581718311971532174387033963715296372791285468903747270837716556902938133611910788060028435531754797383796835009316018259656953442114538695438 + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 13829252738394483470u64, + 3696102008259415033u64, + 13727024804350215797u64, + 13923468026436718769u64, + 5924604905079742862u64, + 10708002647109138222u64, + 14670460945619011269u64, + 10920055614013427454u64, + 16773322069409968132u64, + 11648025004657998992u64, + 5853759956175613481u64, + 11089930891582u64, + ]); + + // R2 = R * R % MODULUS + // R2 = 2468731867191023344597744941938926307216338526282824416880609839804154918771848044056240157551420210981962520047623686977567450338290776997282473798413876535168711321018336215486289519735826959884564283681071791441993286279295 + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 10440129917231554687u64, + 8797934528693354276u64, + 14378434434829994158u64, + 7755707164286885667u64, + 16206546501540671680u64, + 8674228973811871262u64, + 12794601382709871071u64, + 17194287857269754157u64, + 2120600029739364160u64, + 15454005187782655500u64, + 18107041519543174727u64, + 29333033326005u64, + ]); + + // INV = -(MODULUS)^(-1) % 2^64 + const INV: u64 = 3079018560679650175u64; + + // GENERATOR = 5 + // This number needs to be in the Montgomery residue form. + // I.e., write 5 * R = + // 4666763490280200831837670870884751832445326214968729590874573236366155819765926304472908591559857660871935169818576481863956427344518736354188582784514690668059553940300142177658773986918984175046580091298284767210572693477190 + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 13806031470843762502u64, + 33765967587523552u64, + 13294891800622424138u64, + 14277107911054939000u64, + 11176280451689162697u64, + 16646525088126587879u64, + 18012072506966401499u64, + 17706789922648034041u64, + 10079634052211634198u64, + 2899892802161340116u64, + 10822055707168515792u64, + 55449654457911u64, + ]); + + // (n-1)/2 = 2618655685494934587646513424452539820510669369997121816986468932564084550785694173316180860236896182588629435743027300328024462870030673754861143521533670625276320132154310648444223256908453586681062209256863600487696088740288 + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 10182450713422508480u64, + 481738333644650627u64, + 12583364005979465316u64, + 1827948701671214706u64, + 7499498707100582501u64, + 15790892447542726443u64, + 15680709602080829305u64, + 3494753257560608472u64, + 15414553304780864903u64, + 15816529144234703614u64, + 1225587137952230618u64, + 31114401492033u64, + ]); + + // t = (n - 1) / 2^{TWO_ADICITY} = + // 40916495085858352931976772257070934695479208906205028390413577071313821106026471458065325941201502852947334933484801567625382232344229277419705367523963603519942502064911103881940988389194587291891597019638493757620251386567 + const T: BigInteger = BigInteger([ + 1023791920852361927u64, + 10383820702924820450u64, + 14608133870179016345u64, + 10693085616076947257u64, + 12511085841822051593u64, + 16675864135140424508u64, + 7162540115173594813u64, + 2072218152711366715u64, + 18111135716793329142u64, + 7741123047823172587u64, + 307380175182215347u64, + 486162523313u64, + ]); + + // (t-1)/2 = 20458247542929176465988386128535467347739604453102514195206788535656910553013235729032662970600751426473667466742400783812691116172114638709852683761981801759971251032455551940970494194597293645945798509819246878810125693283 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 511895960426180963u64, + 14415282388317186033u64, + 16527438971944283980u64, + 14569914844893249436u64, + 6255542920911025796u64, + 17561304104424988062u64, + 12804642094441573214u64, + 1036109076355683357u64, + 18278939895251440379u64, + 13093933560766362101u64, + 9377062124445883481u64, + 243081261656u64, + ]); +} diff --git a/arkworks/curves/ed_on_mnt4_753/src/fields/mod.rs b/arkworks/curves/ed_on_mnt4_753/src/fields/mod.rs new file mode 100644 index 00000000..e20037a4 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/fields/mod.rs @@ -0,0 +1,8 @@ +pub mod fq; +pub mod fr; + +pub use fq::*; +pub use fr::*; + +#[cfg(all(feature = "ed_on_mnt4_753", test))] +mod tests; diff --git a/arkworks/curves/ed_on_mnt4_753/src/fields/tests.rs b/arkworks/curves/ed_on_mnt4_753/src/fields/tests.rs new file mode 100644 index 00000000..b976d39c --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/fields/tests.rs @@ -0,0 +1,23 @@ +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::{Fq, Fr}; +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + primefield_test::(); +} diff --git a/arkworks/curves/ed_on_mnt4_753/src/lib.rs b/arkworks/curves/ed_on_mnt4_753/src/lib.rs new file mode 100644 index 00000000..b42f5631 --- /dev/null +++ b/arkworks/curves/ed_on_mnt4_753/src/lib.rs @@ -0,0 +1,30 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements a twisted Edwards curve whose base field is the scalar field of the +//! curve MNT4-753. This allows defining cryptographic primitives that use elliptic curves over +//! the scalar field of the latter curve. +//! +//! Curve information: +//! * Base field: q = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888458477323173057491593855069696241854796396165721416325350064441470418137846398469611935719059908164220784476160001 +//! * Scalar field: r = 5237311370989869175293026848905079641021338739994243633972937865128169101571388346632361720473792365177258871486054600656048925740061347509722287043067341250552640264308621296888446513816907173362124418513727200975392177480577 +//! * Valuation(q - 1, 2) = 30 +//! * Valuation(r - 1, 2) = 7 +//! * Curve equation: ax^2 + y^2 =1 + dx^2y^2, where +//! * a = -1 +//! * d = 317690 mod q + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/mnt4_298/Cargo.toml b/arkworks/curves/mnt4_298/Cargo.toml new file mode 100644 index 00000000..e016986d --- /dev/null +++ b/arkworks/curves/mnt4_298/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "ark-mnt4-298" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The MNT4-298 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-mnt4-298/" +keywords = ["cryptography", "finite-fields" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [ "curve" ] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] + +curve = [ "scalar_field", "base_field" ] +scalar_field = [] +base_field = [] +r1cs = [ "base_field", "ark-r1cs-std" ] \ No newline at end of file diff --git a/arkworks/curves/mnt4_298/LICENSE-APACHE b/arkworks/curves/mnt4_298/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/curves/mnt4_298/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/curves/mnt4_298/LICENSE-MIT b/arkworks/curves/mnt4_298/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/curves/mnt4_298/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/curves/mnt4_298/src/constraints/curves.rs b/arkworks/curves/mnt4_298/src/constraints/curves.rs new file mode 100644 index 00000000..46401164 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/constraints/curves.rs @@ -0,0 +1,29 @@ +use crate::Parameters; +use ark_r1cs_std::groups::mnt4; + +/// An element of G1 in the MNT4-298 bilinear group. +pub type G1Var = mnt4::G1Var; +/// An element of G2 in the MNT4-298 bilinear group. +pub type G2Var = mnt4::G2Var; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +pub type G1PreparedVar = mnt4::G1PreparedVar; +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +pub type G2PreparedVar = mnt4::G2PreparedVar; + +#[test] +fn test() { + use ark_ec::models::mnt4::MNT4Parameters; + ark_curve_constraint_tests::curves::sw_test::< + ::G1Parameters, + G1Var, + >() + .unwrap(); + ark_curve_constraint_tests::curves::sw_test::< + ::G2Parameters, + G2Var, + >() + .unwrap(); +} diff --git a/arkworks/curves/mnt4_298/src/constraints/fields.rs b/arkworks/curves/mnt4_298/src/constraints/fields.rs new file mode 100644 index 00000000..32f2946e --- /dev/null +++ b/arkworks/curves/mnt4_298/src/constraints/fields.rs @@ -0,0 +1,26 @@ +use crate::{Fq, Fq2Parameters, Fq4Parameters}; + +use ark_r1cs_std::fields::{fp::FpVar, fp2::Fp2Var, fp4::Fp4Var}; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; +/// A variable that is the R1CS equivalent of `crate::Fq2`. +pub type Fq2Var = Fp2Var; +/// A variable that is the R1CS equivalent of `crate::Fq4`. +pub type Fq4Var = Fp4Var; + +#[test] +fn mnt4_298_field_gadgets_test() { + use super::*; + use crate::{Fq, Fq2, Fq4}; + use ark_curve_constraint_tests::fields::*; + + field_test::<_, _, FqVar>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq2Var>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq4Var>().unwrap(); + frobenius_tests::(13).unwrap(); +} diff --git a/arkworks/curves/mnt4_298/src/constraints/mod.rs b/arkworks/curves/mnt4_298/src/constraints/mod.rs new file mode 100644 index 00000000..48919084 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/constraints/mod.rs @@ -0,0 +1,163 @@ +//! This module implements the R1CS equivalent of `ark_mnt4_298`. +//! +//! It implements field variables for `crate::{Fq, Fq2, Fq4}`, +//! group variables for `crate::{G1, G2}`, and implements constraint +//! generation for computing `MNT4_298::pairing`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_mnt4_298::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `G1Var` and `G2Var`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt4_298::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `G1` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G1Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G1Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G1Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `G1`. +//! let zero = G1Var::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! Finally, one can check pairing computations as well: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_ec::PairingEngine; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt4_298::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate random `G1` and `G2` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G2Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G2Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G2Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let pairing_result_native = MNT4_298::pairing(a_native, b_native); +//! +//! // Prepare `a` and `b` for pairing. +//! let a_prep = constraints::PairingVar::prepare_g1(&a)?; +//! let b_prep = constraints::PairingVar::prepare_g2(&b)?; +//! let pairing_result = constraints::PairingVar::pairing(a_prep, b_prep)?; +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!(pairing_result.value()?, pairing_result_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! let a_prep_const = constraints::PairingVar::prepare_g1(&a_const)?; +//! let b_prep_const = constraints::PairingVar::prepare_g2(&b_const)?; +//! let pairing_result_const = constraints::PairingVar::pairing(a_prep_const, b_prep_const)?; +//! println!("Done here 3"); +//! +//! pairing_result.enforce_equal(&pairing_result_const)?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod fields; +pub use fields::*; + +#[cfg(feature = "curve")] +mod curves; +#[cfg(feature = "curve")] +mod pairing; + +#[cfg(feature = "curve")] +pub use curves::*; +#[cfg(feature = "curve")] +pub use pairing::*; diff --git a/arkworks/curves/mnt4_298/src/constraints/pairing.rs b/arkworks/curves/mnt4_298/src/constraints/pairing.rs new file mode 100644 index 00000000..7941ad05 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/constraints/pairing.rs @@ -0,0 +1,10 @@ +use crate::Parameters; + +/// Specifies the constraints for computing a pairing in the MNT4-298 bilinear group. +pub type PairingVar = ark_r1cs_std::pairing::mnt4::PairingVar; + +#[test] +fn test() { + use crate::MNT4_298; + ark_curve_constraint_tests::pairing::bilinearity_test::().unwrap() +} diff --git a/arkworks/curves/mnt4_298/src/curves/g1.rs b/arkworks/curves/mnt4_298/src/curves/g1.rs new file mode 100644 index 00000000..1cfa47ca --- /dev/null +++ b/arkworks/curves/mnt4_298/src/curves/g1.rs @@ -0,0 +1,55 @@ +use crate::{Fq, Fr, FR_ONE}; +use ark_ec::{ + mnt4, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +pub type G1Affine = mnt4::G1Affine; +pub type G1Projective = mnt4::G1Projective; +pub type G1Prepared = mnt4::G1Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 2 + /// Reference: https://github.com/scipr-lab/libff/blob/c927821ebe02e0a24b5e0f9170cec5e211a35f08/libff/algebra/curves/mnt/mnt4/mnt4_init.cpp#L116 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "2"); + + /// COEFF_B = 423894536526684178289416011533888240029318103673896002803341544124054745019340795360841685 + /// Reference: https://github.com/scipr-lab/libff/blob/c927821ebe02e0a24b5e0f9170cec5e211a35f08/libff/algebra/curves/mnt/mnt4/mnt4_init.cpp#L117 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "423894536526684178289416011533888240029318103673896002803341544124054745019340795360841685"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[1]; + + /// COFACTOR^(-1) mod r = + /// 1 + #[rustfmt::skip] + const COFACTOR_INV: Fr = FR_ONE; + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); +} + +// Generator of G1 +// X = 60760244141852568949126569781626075788424196370144486719385562369396875346601926534016838, +// Y = 363732850702582978263902770815145784459747722357071843971107674179038674942891694705904306, +/// G1_GENERATOR_X +/// Reference: https://github.com/scipr-lab/libff/blob/c927821ebe02e0a24b5e0f9170cec5e211a35f08/libff/algebra/curves/mnt/mnt4/mnt4_init.cpp#L137 +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "60760244141852568949126569781626075788424196370144486719385562369396875346601926534016838"); + +/// G1_GENERATOR_Y +/// Reference: https://github.com/scipr-lab/libff/blob/c927821ebe02e0a24b5e0f9170cec5e211a35f08/libff/algebra/curves/mnt/mnt4/mnt4_init.cpp#L138 +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "363732850702582978263902770815145784459747722357071843971107674179038674942891694705904306"); diff --git a/arkworks/curves/mnt4_298/src/curves/g2.rs b/arkworks/curves/mnt4_298/src/curves/g2.rs new file mode 100644 index 00000000..30903fd5 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/curves/g2.rs @@ -0,0 +1,91 @@ +use crate::{Fq, Fq2, Fr, FQ_ZERO, G1_COEFF_A_NON_RESIDUE}; +use ark_ec::{ + mnt4, + mnt4::MNT4Parameters, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +pub type G2Affine = mnt4::G2Affine; +pub type G2Projective = mnt4::G2Projective; +pub type G2Prepared = mnt4::G2Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq2; + type ScalarField = Fr; +} + +/// MUL_BY_A_C0 = NONRESIDUE * COEFF_A +#[rustfmt::skip] +pub const MUL_BY_A_C0: Fq = G1_COEFF_A_NON_RESIDUE; + +/// MUL_BY_A_C1 = NONRESIDUE * COEFF_A +#[rustfmt::skip] +pub const MUL_BY_A_C1: Fq = G1_COEFF_A_NON_RESIDUE; + +impl SWModelParameters for Parameters { + const COEFF_A: Fq2 = crate::Parameters::TWIST_COEFF_A; + // B coefficient of MNT4-298 G2 = + // ``` + // mnt4298_twist_coeff_b = mnt4298_Fq2(mnt4298_Fq::zero(), + // mnt4298_G1::coeff_b * mnt4298_Fq2::non_residue); + // non_residue = mnt4298_Fq2::non_residue = mnt4298_Fq("13"); + // = (ZERO, G1_B_COEFF * NON_RESIDUE); + // = + // (0, 67372828414711144619833451280373307321534573815811166723479321465776723059456513877937430) + // ``` + #[rustfmt::skip] + const COEFF_B: Fq2 = field_new!(Fq2, + FQ_ZERO, + field_new!(Fq, "67372828414711144619833451280373307321534573815811166723479321465776723059456513877937430"), + ); + + /// COFACTOR = + /// 475922286169261325753349249653048451545124879932565935237842521413255878328503110407553025 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 15480692783052488705, + 9802782456999489873, + 14622846468721090623, + 11702080941310629006, + 4110145082483, + ]; + + /// COFACTOR^(-1) mod r = + /// 475922286169261325753349249653048451545124878207887910632124039320641839552134835598065665 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "475922286169261325753349249653048451545124878207887910632124039320641839552134835598065665"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(elt: &Fq2) -> Fq2 { + field_new!(Fq2, MUL_BY_A_C0 * &elt.c0, MUL_BY_A_C1 * &elt.c1,) + } +} + +const G2_GENERATOR_X: Fq2 = field_new!(Fq2, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1); +const G2_GENERATOR_Y: Fq2 = field_new!(Fq2, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1); + +// Generator of G2 +// These are two Fq elements each because X and Y (and Z) are elements of Fq^2 +// X = 438374926219350099854919100077809681842783509163790991847867546339851681564223481322252708, +// 37620953615500480110935514360923278605464476459712393277679280819942849043649216370485641, +// Y = 37437409008528968268352521034936931842973546441370663118543015118291998305624025037512482, +// 424621479598893882672393190337420680597584695892317197646113820787463109735345923009077489, +#[rustfmt::skip] +pub const G2_GENERATOR_X_C0: Fq = field_new!(Fq, "438374926219350099854919100077809681842783509163790991847867546339851681564223481322252708"); + +#[rustfmt::skip] +pub const G2_GENERATOR_X_C1: Fq = field_new!(Fq, "37620953615500480110935514360923278605464476459712393277679280819942849043649216370485641"); + +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C0: Fq = field_new!(Fq, "37437409008528968268352521034936931842973546441370663118543015118291998305624025037512482"); + +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C1: Fq = field_new!(Fq, "424621479598893882672393190337420680597584695892317197646113820787463109735345923009077489"); diff --git a/arkworks/curves/mnt4_298/src/curves/mod.rs b/arkworks/curves/mnt4_298/src/curves/mod.rs new file mode 100644 index 00000000..a2655c43 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/curves/mod.rs @@ -0,0 +1,54 @@ +use ark_ec::models::mnt4::{MNT4Parameters, MNT4}; +use ark_ff::{biginteger::BigInteger320, field_new, Fp2}; + +use crate::{Fq, Fq2, Fq2Parameters, Fq4Parameters, Fr}; + +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +pub type MNT4_298 = MNT4; + +pub struct Parameters; + +impl MNT4Parameters for Parameters { + const TWIST: Fp2 = field_new!(Fq2, FQ_ZERO, FQ_ONE); + // A coefficient of MNT4-298 G2 = + // ``` + // mnt4298_twist_coeff_a = mnt4298_Fq2(mnt4298_G1::coeff_a * non_residue, mnt6298_Fq::zero()); + // = (A_COEFF * NONRESIDUE, ZERO) + // = (34, ZERO) + // ``` + #[rustfmt::skip] + const TWIST_COEFF_A: Fp2 = field_new!(Fq2, + G1_COEFF_A_NON_RESIDUE, + FQ_ZERO, + ); + + const ATE_LOOP_COUNT: &'static [u64] = &[993502997770534912, 5071219579242586943, 2027349]; + const ATE_IS_LOOP_COUNT_NEG: bool = false; + const FINAL_EXPONENT_LAST_CHUNK_1: BigInteger320 = BigInteger320([0x1, 0x0, 0x0, 0x0, 0x0]); + const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool = false; + const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: BigInteger320 = + BigInteger320([993502997770534913, 5071219579242586943, 2027349, 0, 0]); + type Fp = Fq; + type Fr = Fr; + type Fp2Params = Fq2Parameters; + type Fp4Params = Fq4Parameters; + type G1Parameters = self::g1::Parameters; + type G2Parameters = self::g2::Parameters; +} + +// 34 +pub const G1_COEFF_A_NON_RESIDUE: Fq = field_new!(Fq, "34"); +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); +pub const FQ_ONE: Fq = field_new!(Fq, "1"); +pub const FR_ZERO: Fr = field_new!(Fr, "0"); +pub const FR_ONE: Fr = field_new!(Fr, "1"); diff --git a/arkworks/curves/mnt4_298/src/curves/tests.rs b/arkworks/curves/mnt4_298/src/curves/tests.rs new file mode 100644 index 00000000..1997da44 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/curves/tests.rs @@ -0,0 +1,91 @@ +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, One, PrimeField, UniformRand}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let sa = a.mul(s.into_repr()); + let sb = b.mul(s.into_repr()); + + let ans1 = MNT4_298::pairing(sa, b); + let ans2 = MNT4_298::pairing(a, sb); + let ans3 = MNT4_298::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq4::one()); + assert_ne!(ans2, Fq4::one()); + assert_ne!(ans3, Fq4::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq4::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq4::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq4::one()); +} + +#[test] +fn test_product_of_pairings() { + let rng = &mut test_rng(); + + let a = G1Projective::rand(rng).into_affine(); + let b = G2Projective::rand(rng).into_affine(); + let c = G1Projective::rand(rng).into_affine(); + let d = G2Projective::rand(rng).into_affine(); + let ans1 = MNT4_298::pairing(a, b) * &MNT4_298::pairing(c, d); + let ans2 = MNT4_298::product_of_pairings(&[(a.into(), b.into()), (c.into(), d.into())]); + assert_eq!(ans1, ans2); +} diff --git a/arkworks/curves/mnt4_298/src/fields/fq.rs b/arkworks/curves/mnt4_298/src/fields/fq.rs new file mode 100644 index 00000000..028c75fd --- /dev/null +++ b/arkworks/curves/mnt4_298/src/fields/fq.rs @@ -0,0 +1,115 @@ +use ark_ff::{ + biginteger::BigInteger320 as BigInteger, + fields::{FftParameters, Fp320, Fp320Parameters, FpParameters}, +}; + +pub type Fq = Fp320; + +pub struct FqParameters; + +impl Fp320Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 17; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 9821480371597472441u64, + 9468346035609379175u64, + 9963748368231707135u64, + 14865337659602750405u64, + 3984815592673u64, + ]); + + const SMALL_SUBGROUP_BASE: Option = Some(7); + const SMALL_SUBGROUP_BASE_ADICITY: Option = Some(2); + + /// LARGE_SUBGROUP_ROOT_OF_UNITY = x * g + /// where x = (n - 1) / 2^17 / 7^2 + /// and represent this value in the Montgomery residue form. + /// I.e., write + /// 381811485921190977554243339163030148371175054922689353173385941180422489253833691237722982 + /// * R + /// = 260534023778902228073198316993669317435810479439368306496187170459125001342456918103569322 + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = Some(BigInteger([ + 7711798843682337706u64, + 16456007754393011187u64, + 7470854640069402569u64, + 10767969225751706229u64, + 2250015743691u64, + ])); +} +impl FpParameters for FqParameters { + /// MODULUS = 475922286169261325753349249653048451545124879242694725395555128576210262817955800483758081 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 14487189785281953793u64, + 4731562877756902930u64, + 14622846468719063274u64, + 11702080941310629006u64, + 4110145082483u64, + ]); + + const MODULUS_BITS: u32 = 298; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 22; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 1784298994435064924u64, + 16852041090100268533u64, + 14258261760832875328u64, + 2961187778261111191u64, + 1929014752195u64, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 28619103704175136u64, + 11702218449377544339u64, + 7403203599591297249u64, + 2248105543421449339u64, + 2357678148148u64, + ]); + + const INV: u64 = 12714121028002250751u64; + + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 2709730703260633621u64, + 13556085429182073539u64, + 10903316137158576359u64, + 5319113788683590444u64, + 4022235209932u64, + ]); + + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0x70964866b2d38b3, + 0x987520d4f1af2890, + 0x2a47657764b1ae89, + 0x6a39d133124ed3d8, + 0x1de7bde, + ]); + + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x384b24335969c59, + 0xcc3a906a78d79448, + 0x1523b2bbb258d744, + 0x351ce899892769ec, + 0xef3def, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x64866b2d38b30000, + 0x20d4f1af28900709, + 0x657764b1ae899875, + 0xd133124ed3d82a47, + 0x1de7bde6a39, + ]); +} diff --git a/arkworks/curves/mnt4_298/src/fields/fq2.rs b/arkworks/curves/mnt4_298/src/fields/fq2.rs new file mode 100644 index 00000000..46b9d427 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/fields/fq2.rs @@ -0,0 +1,30 @@ +use crate::{Fq, FQ_ONE}; +use ark_ff::{ + field_new, + fields::fp2::{Fp2, Fp2Parameters}, +}; + +pub type Fq2 = Fp2; + +pub struct Fq2Parameters; + +impl Fp2Parameters for Fq2Parameters { + type Fp = Fq; + + /// The quadratic non-residue (17) used to construct the extension is + /// the same as that used in [`libff`](https://github.com/scipr-lab/libff/blob/c927821ebe02e0a24b5e0f9170cec5e211a35f08/libff/algebra/curves/mnt/mnt4/mnt4_init.cpp#L102). + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "17"); + + /// The quadratic non-residue in Fp2 that is used + /// in the computation of square roots is (8, 1), the same as that in + /// [`libff`](https://github.com/scipr-lab/libff/blob/c927821ebe02e0a24b5e0f9170cec5e211a35f08/libff/algebra/curves/mnt/mnt4/mnt4_init.cpp#L103) + const QUADRATIC_NONRESIDUE: (Self::Fp, Self::Fp) = (field_new!(Fq, "8"), FQ_ONE); + + /// Precomputed coefficients: + /// `[1, 475922286169261325753349249653048451545124879242694725395555128576210262817955800483758080]` + const FROBENIUS_COEFF_FP2_C1: &'static [Self::Fp] = &[ + FQ_ONE, + field_new!(Fq, "475922286169261325753349249653048451545124879242694725395555128576210262817955800483758080"), + ]; +} diff --git a/arkworks/curves/mnt4_298/src/fields/fq4.rs b/arkworks/curves/mnt4_298/src/fields/fq4.rs new file mode 100644 index 00000000..7485336e --- /dev/null +++ b/arkworks/curves/mnt4_298/src/fields/fq4.rs @@ -0,0 +1,30 @@ +use crate::{Fq, Fq2, Fq2Parameters, FQ_ONE, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp4::{Fp4, Fp4Parameters}, +}; + +pub type Fq4 = Fp4; + +pub struct Fq4Parameters; + +impl Fp4Parameters for Fq4Parameters { + type Fp2Params = Fq2Parameters; + + const NONRESIDUE: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ONE); + + // Coefficients for the Frobenius automorphism. + // c1[0] = 1, + // c1[1] = 7684163245453501615621351552473337069301082060976805004625011694147890954040864167002308 + // c1[2] = 475922286169261325753349249653048451545124879242694725395555128576210262817955800483758080 + // c1[3] = 468238122923807824137727898100575114475823797181717920390930116882062371863914936316755773 + // + // These are calculated as `FROBENIUS_COEFF_FP4_C1[i] = Fp2Params::NONRESIDUE^((q^i - 1) / 4)`. + #[rustfmt::skip] + const FROBENIUS_COEFF_FP4_C1: &'static [Fq] = &[ + FQ_ONE, + field_new!(Fq, "7684163245453501615621351552473337069301082060976805004625011694147890954040864167002308"), + field_new!(Fq, "475922286169261325753349249653048451545124879242694725395555128576210262817955800483758080"), + field_new!(Fq, "468238122923807824137727898100575114475823797181717920390930116882062371863914936316755773"), + ]; +} diff --git a/arkworks/curves/mnt4_298/src/fields/fr.rs b/arkworks/curves/mnt4_298/src/fields/fr.rs new file mode 100644 index 00000000..3cfaf219 --- /dev/null +++ b/arkworks/curves/mnt4_298/src/fields/fr.rs @@ -0,0 +1,99 @@ +use ark_ff::{ + biginteger::BigInteger320 as BigInteger, + fields::{FftParameters, Fp320, Fp320Parameters, FpParameters}, +}; + +pub type Fr = Fp320; + +pub struct FrParameters; + +impl Fp320Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 34; + + #[rustfmt::skip] + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x818b361df1af7be4, + 0x2ae2750d46a53957, + 0x5784a8fe792c5f8a, + 0xf9bd39c0cdcf1bb6, + 0x6a24a0f8a8, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 475922286169261325753349249653048451545124878552823515553267735739164647307408490559963137 + #[rustfmt::skip] + const MODULUS: BigInteger = BigInteger([ + 0xbb4334a400000001, + 0xfb494c07925d6ad3, + 0xcaeec9635cf44194, + 0xa266249da7b0548e, + 0x3bcf7bcd473, + ]); + + const MODULUS_BITS: u32 = 298; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 22; + + #[rustfmt::skip] + const R: BigInteger = BigInteger([ + 0xc3177aefffbb845c, + 0x9b80c702f9961788, + 0xc5df8dcdac70a85a, + 0x29184098647b5197, + 0x1c1223d33c3, + ]); + + #[rustfmt::skip] + const R2: BigInteger = BigInteger([ + 0x465a743c68e0596b, + 0x34f9102adb68371, + 0x4bbd6dcf1e3a8386, + 0x2ff00dced8e4b6d, + 0x149bb44a342, + ]); + + const INV: u64 = 0xbb4334a3ffffffff; + + #[rustfmt::skip] + const GENERATOR: BigInteger = BigInteger([ + 0xb1ddfacffd532b94, + 0x25e295ff76674008, + 0x8f00647b48958d36, + 0x1159f37d4e0fddb2, + 0x2977770b3d1, + ]); + + #[rustfmt::skip] + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xdda19a5200000000, + 0x7da4a603c92eb569, + 0x657764b1ae7a20ca, + 0xd133124ed3d82a47, + 0x1de7bde6a39, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + + #[rustfmt::skip] + const T: BigInteger = BigInteger([ + 0xe4975ab4eed0cd29, + 0xd73d10653ed25301, + 0x69ec1523b2bbb258, + 0x3def351ce8998927, + 0xef, + ]); + + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xf24bad5a77686694, + 0x6b9e88329f692980, + 0xb4f60a91d95dd92c, + 0x9ef79a8e744cc493, + 0x77, + ]); +} diff --git a/arkworks/curves/mnt4_298/src/fields/mod.rs b/arkworks/curves/mnt4_298/src/fields/mod.rs new file mode 100644 index 00000000..719e03be --- /dev/null +++ b/arkworks/curves/mnt4_298/src/fields/mod.rs @@ -0,0 +1,22 @@ +#[cfg(feature = "scalar_field")] +pub mod fr; +#[cfg(feature = "scalar_field")] +pub use self::fr::*; + +#[cfg(feature = "base_field")] +pub mod fq; +#[cfg(feature = "base_field")] +pub use self::fq::*; + +#[cfg(feature = "curve")] +pub mod fq2; +#[cfg(feature = "curve")] +pub use self::fq2::*; + +#[cfg(feature = "curve")] +pub mod fq4; +#[cfg(feature = "curve")] +pub use self::fq4::*; + +#[cfg(all(feature = "curve", test))] +mod tests; diff --git a/arkworks/curves/mnt4_298/src/fields/tests.rs b/arkworks/curves/mnt4_298/src/fields/tests.rs new file mode 100644 index 00000000..26efa29f --- /dev/null +++ b/arkworks/curves/mnt4_298/src/fields/tests.rs @@ -0,0 +1,46 @@ +use ark_ff::Field; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq2() { + let mut rng = test_rng(); + let a: Fq2 = rng.gen(); + let b: Fq2 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + frobenius_test::(Fq::characteristic(), 13); +} + +#[test] +fn test_fq4() { + let mut rng = test_rng(); + let a: Fq4 = rng.gen(); + let b: Fq4 = rng.gen(); + field_test(a, b); + frobenius_test::(Fq::characteristic(), 13); +} diff --git a/arkworks/curves/mnt4_298/src/lib.rs b/arkworks/curves/mnt4_298/src/lib.rs new file mode 100644 index 00000000..4797d30a --- /dev/null +++ b/arkworks/curves/mnt4_298/src/lib.rs @@ -0,0 +1,42 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the MNT4_298 curve generated by +//! [\[BCTV14\]](https://eprint.iacr.org/2014/595). The name denotes that it is a +//! Miyaji--Nakabayashi--Takano curve of embedding degree 4, defined over a 298-bit (prime) field. +//! The main feature of this curve is that its scalar field and base field respectively equal the +//! base field and scalar field of MNT6_298. +//! +//! +//! Curve information: +//! * Base field: q = 475922286169261325753349249653048451545124879242694725395555128576210262817955800483758081 +//! * Scalar field: r = 475922286169261325753349249653048451545124878552823515553267735739164647307408490559963137 +//! * valuation(q - 1, 2) = 17 +//! * valuation(r - 1, 2) = 34 +//! * G1 curve equation: y^2 = x^3 + ax + b, where +//! * a = 2 +//! * b = 423894536526684178289416011533888240029318103673896002803341544124054745019340795360841685 +//! * G2 curve equation: y^2 = x^3 + Ax + B, where +//! * A = Fq2 = (a * NON_RESIDUE, 0) +//! * B = Fq2(0, b * NON_RESIDUE) +//! * NON_RESIDUE = 17 is the quadratic non-residue used for constructing the extension field Fq2 + +#[cfg(feature = "curve")] +mod curves; +#[cfg(any(feature = "scalar_field", feature = "base_field"))] +mod fields; + +#[cfg(feature = "r1cs")] +pub mod constraints; + +#[cfg(feature = "curve")] +pub use curves::*; +#[cfg(any(feature = "scalar_field", feature = "base_field"))] +pub use fields::*; diff --git a/arkworks/curves/mnt4_753/Cargo.toml b/arkworks/curves/mnt4_753/Cargo.toml new file mode 100644 index 00000000..f1dbe862 --- /dev/null +++ b/arkworks/curves/mnt4_753/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "ark-mnt4-753" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The MNT4-753 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-mnt4-753/" +keywords = ["cryptography", "finite-fields" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { path = "../../algebra/ff", version = "^0.3.0", default-features = false } +ark-ec = { path = "../../algebra/ec", version = "^0.3.0", default-features = false } +ark-std = {path = "../../std", version = "^0.3.0", default-features = false } +ark-r1cs-std = { path = "../../r1cs-std", version = "^0.3.0", default-features = false, optional = true } + +[dev-dependencies] +ark-relations = { path = "../../snark/relations", version = "^0.3.0", default-features = false } +ark-serialize = { path = "../../algebra/serialize", version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [ "curve" ] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] + +curve = [ "scalar_field", "base_field" ] +scalar_field = [] +base_field = [] +r1cs = [ "base_field", "ark-r1cs-std" ] \ No newline at end of file diff --git a/arkworks/curves/mnt4_753/LICENSE-APACHE b/arkworks/curves/mnt4_753/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/curves/mnt4_753/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/curves/mnt4_753/LICENSE-MIT b/arkworks/curves/mnt4_753/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/curves/mnt4_753/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/curves/mnt4_753/src/constraints/curves.rs b/arkworks/curves/mnt4_753/src/constraints/curves.rs new file mode 100644 index 00000000..febfeeda --- /dev/null +++ b/arkworks/curves/mnt4_753/src/constraints/curves.rs @@ -0,0 +1,29 @@ +use crate::Parameters; +use ark_r1cs_std::groups::mnt4; + +/// An element of G1 in the MNT4-753 bilinear group. +pub type G1Var = mnt4::G1Var; +/// An element of G2 in the MNT4-753 bilinear group. +pub type G2Var = mnt4::G2Var; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +pub type G1PreparedVar = mnt4::G1PreparedVar; +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +pub type G2PreparedVar = mnt4::G2PreparedVar; + +#[test] +fn test() { + use ark_ec::models::mnt4::MNT4Parameters; + ark_curve_constraint_tests::curves::sw_test::< + ::G1Parameters, + G1Var, + >() + .unwrap(); + ark_curve_constraint_tests::curves::sw_test::< + ::G2Parameters, + G2Var, + >() + .unwrap(); +} diff --git a/arkworks/curves/mnt4_753/src/constraints/fields.rs b/arkworks/curves/mnt4_753/src/constraints/fields.rs new file mode 100644 index 00000000..d99ed86e --- /dev/null +++ b/arkworks/curves/mnt4_753/src/constraints/fields.rs @@ -0,0 +1,26 @@ +use crate::{Fq, Fq2Parameters, Fq4Parameters}; + +use ark_r1cs_std::fields::{fp::FpVar, fp2::Fp2Var, fp4::Fp4Var}; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; +/// A variable that is the R1CS equivalent of `crate::Fq2`. +pub type Fq2Var = Fp2Var; +/// A variable that is the R1CS equivalent of `crate::Fq4`. +pub type Fq4Var = Fp4Var; + +#[test] +fn mnt4_753_field_gadgets_test() { + use super::*; + use crate::{Fq, Fq2, Fq4}; + use ark_curve_constraint_tests::fields::*; + + field_test::<_, _, FqVar>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq2Var>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq4Var>().unwrap(); + frobenius_tests::(13).unwrap(); +} diff --git a/arkworks/curves/mnt4_753/src/constraints/mod.rs b/arkworks/curves/mnt4_753/src/constraints/mod.rs new file mode 100644 index 00000000..4744e4c9 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/constraints/mod.rs @@ -0,0 +1,163 @@ +//! This module implements the R1CS equivalent of `ark_mnt4_753`. +//! +//! It implements field variables for `crate::{Fq, Fq2, Fq4}`, +//! group variables for `crate::{G1, G2}`, and implements constraint +//! generation for computing `MNT4_753::pairing`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_mnt4_753::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `G1Var` and `G2Var`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt4_753::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `G1` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G1Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G1Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G1Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `G1`. +//! let zero = G1Var::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! Finally, one can check pairing computations as well: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_ec::PairingEngine; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt4_753::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate random `G1` and `G2` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G2Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G2Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G2Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let pairing_result_native = MNT4_753::pairing(a_native, b_native); +//! +//! // Prepare `a` and `b` for pairing. +//! let a_prep = constraints::PairingVar::prepare_g1(&a)?; +//! let b_prep = constraints::PairingVar::prepare_g2(&b)?; +//! let pairing_result = constraints::PairingVar::pairing(a_prep, b_prep)?; +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!(pairing_result.value()?, pairing_result_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! let a_prep_const = constraints::PairingVar::prepare_g1(&a_const)?; +//! let b_prep_const = constraints::PairingVar::prepare_g2(&b_const)?; +//! let pairing_result_const = constraints::PairingVar::pairing(a_prep_const, b_prep_const)?; +//! println!("Done here 3"); +//! +//! pairing_result.enforce_equal(&pairing_result_const)?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod fields; +pub use fields::*; + +#[cfg(feature = "curve")] +mod curves; +#[cfg(feature = "curve")] +mod pairing; + +#[cfg(feature = "curve")] +pub use curves::*; +#[cfg(feature = "curve")] +pub use pairing::*; diff --git a/arkworks/curves/mnt4_753/src/constraints/pairing.rs b/arkworks/curves/mnt4_753/src/constraints/pairing.rs new file mode 100644 index 00000000..3e00df45 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/constraints/pairing.rs @@ -0,0 +1,10 @@ +use crate::Parameters; + +/// Specifies the constraints for computing a pairing in the MNT4-753 bilinear group. +pub type PairingVar = ark_r1cs_std::pairing::mnt4::PairingVar; + +#[test] +fn test() { + use crate::MNT4_753; + ark_curve_constraint_tests::pairing::bilinearity_test::().unwrap() +} diff --git a/arkworks/curves/mnt4_753/src/curves/g1.rs b/arkworks/curves/mnt4_753/src/curves/g1.rs new file mode 100644 index 00000000..42cb219d --- /dev/null +++ b/arkworks/curves/mnt4_753/src/curves/g1.rs @@ -0,0 +1,52 @@ +use ark_ec::{ + mnt4, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +use crate::{Fq, Fr, FR_ONE}; + +pub type G1Affine = mnt4::G1Affine; +pub type G1Projective = mnt4::G1Projective; +pub type G1Prepared = mnt4::G1Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 2 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "2"); + + /// COEFF_B = 0x01373684A8C9DCAE7A016AC5D7748D3313CD8E39051C596560835DF0C9E50A5B59B882A92C78DC537E51A16703EC9855C77FC3D8BB21C8D68BB8CFB9DB4B8C8FBA773111C36C8B1B4E8F1ECE940EF9EAAD265458E06372009C9A0491678EF4 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "28798803903456388891410036793299405764940372360099938340752576406393880372126970068421383312482853541572780087363938442377933706865252053507077543420534380486492786626556269083255657125025963825610840222568694137138741554679540"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[1]; + + /// COFACTOR^(-1) mod r = + /// 1 + #[rustfmt::skip] + const COFACTOR_INV: Fr = FR_ONE; + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); +} + +// Generator of G1 +// X = 7790163481385331313124631546957228376128961350185262705123068027727518350362064426002432450801002268747950550964579198552865939244360469674540925037890082678099826733417900510086646711680891516503232107232083181010099241949569, +// Y = 6913648190367314284606685101150155872986263667483624713540251048208073654617802840433842931301128643140890502238233930290161632176167186761333725658542781350626799660920481723757654531036893265359076440986158843531053720994648, +/// G1_GENERATOR_X = 7790163481385331313124631546957228376128961350185262705123068027727518350362064426002432450801002268747950550964579198552865939244360469674540925037890082678099826733417900510086646711680891516503232107232083181010099241949569 +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "7790163481385331313124631546957228376128961350185262705123068027727518350362064426002432450801002268747950550964579198552865939244360469674540925037890082678099826733417900510086646711680891516503232107232083181010099241949569"); + +/// G1_GENERATOR_Y = 6913648190367314284606685101150155872986263667483624713540251048208073654617802840433842931301128643140890502238233930290161632176167186761333725658542781350626799660920481723757654531036893265359076440986158843531053720994648 +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "6913648190367314284606685101150155872986263667483624713540251048208073654617802840433842931301128643140890502238233930290161632176167186761333725658542781350626799660920481723757654531036893265359076440986158843531053720994648"); diff --git a/arkworks/curves/mnt4_753/src/curves/g2.rs b/arkworks/curves/mnt4_753/src/curves/g2.rs new file mode 100644 index 00000000..5717dbcf --- /dev/null +++ b/arkworks/curves/mnt4_753/src/curves/g2.rs @@ -0,0 +1,99 @@ +use ark_ec::{ + mnt4, + mnt4::MNT4Parameters, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +use crate::{Fq, Fq2, Fr, FQ_ZERO, G1_COEFF_A_NON_RESIDUE}; + +pub type G2Affine = mnt4::G2Affine; +pub type G2Projective = mnt4::G2Projective; +pub type G2Prepared = mnt4::G2Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq2; + type ScalarField = Fr; +} + +/// MUL_BY_A_C0 = NONRESIDUE * COEFF_A +#[rustfmt::skip] +pub const MUL_BY_A_C0: Fq = G1_COEFF_A_NON_RESIDUE; + +/// MUL_BY_A_C1 = NONRESIDUE * COEFF_A +#[rustfmt::skip] +pub const MUL_BY_A_C1: Fq = G1_COEFF_A_NON_RESIDUE; + +impl SWModelParameters for Parameters { + const COEFF_A: Fq2 = crate::Parameters::TWIST_COEFF_A; + // B coefficient of MNT4-753 G2 = + // ``` + // mnt4753_twist_coeff_b = mnt4753_Fq2(mnt4753_Fq::zero(), + // mnt4753_G1::coeff_b * mnt4753_Fq2::non_residue); + // non_residue = mnt4753_Fq2::non_residue = mnt4753_Fq("13"); + // = (ZERO, G1_B_COEFF * NON_RESIDUE); + // = + // (0, 39196523001581428369576759982967177918859161321667605855515469914917622337081756705006832951954384669101573360625169461998308377011601613979275218690841934572954991361632773738259652003389826903175898479855893660378722437317212) + // ``` + #[rustfmt::skip] + const COEFF_B: Fq2 = field_new!(Fq2, + FQ_ZERO, + field_new!(Fq, "39196523001581428369576759982967177918859161321667605855515469914917622337081756705006832951954384669101573360625169461998308377011601613979275218690841934572954991361632773738259652003389826903175898479855893660378722437317212") + ); + + /// COFACTOR = + /// 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888049094905534395567574915333486969589229856772141392370549616644545554517640527237829320384324374366385444967219201 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 16436257212445032449, + 8690275530472745198, + 17315389657026393162, + 1645397558963170979, + 3544984605440726586, + 12665092767997125024, + 11083680675069097885, + 575819899841080717, + 6825179918269667443, + 13256793349531086829, + 1162650133526138285, + 497830423872529, + ]; + + /// COFACTOR^(-1) mod r = + /// 102345604409665481004734934052318066391634848395005988700111949231215905051467807945653833683883449458834877235200 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "102345604409665481004734934052318066391634848395005988700111949231215905051467807945653833683883449458834877235200"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(elt: &Fq2) -> Fq2 { + field_new!(Fq2, MUL_BY_A_C0 * &elt.c0, MUL_BY_A_C1 * &elt.c1,) + } +} + +const G2_GENERATOR_X: Fq2 = field_new!(Fq2, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1); +const G2_GENERATOR_Y: Fq2 = field_new!(Fq2, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1); + +// Generator of G2 +// These are two Fq elements each because X and Y (and Z) are elements of Fq^2 +// X = 29483965110843144675703364744708836524643960105538608078862508397502447349913068434941060515343254862580437318493682762113105361632548148204806052114008731372757389645383891982211245013965175213456066452587869519098351487925167, +// 19706011319630172391076079624799753948158506771222147486237995321925443331396169656568431378974558350664383559981183980668976846806019030432389169137953988990802000581078994008283967768348275973921598166274857631001635633631000, +// Y = 39940152670760519653940320314827327941993141403708338666925204282084477074754642625849927569427860786384998614863651207257467076192649385174108085803168743803491780568503369317093191101779534035377266300185099318717465441820654, +// 17608637424964395737041291373756657139607306440193731804102457011726690702169238966996114255971643893157857311132388792357391583164125870757541009035041469463366528798593952884745987697403056488744603829437448927398468360797245, +#[rustfmt::skip] +pub const G2_GENERATOR_X_C0: Fq = field_new!(Fq, "29483965110843144675703364744708836524643960105538608078862508397502447349913068434941060515343254862580437318493682762113105361632548148204806052114008731372757389645383891982211245013965175213456066452587869519098351487925167"); + +#[rustfmt::skip] +pub const G2_GENERATOR_X_C1: Fq = field_new!(Fq, "19706011319630172391076079624799753948158506771222147486237995321925443331396169656568431378974558350664383559981183980668976846806019030432389169137953988990802000581078994008283967768348275973921598166274857631001635633631000"); + +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C0: Fq = field_new!(Fq, "39940152670760519653940320314827327941993141403708338666925204282084477074754642625849927569427860786384998614863651207257467076192649385174108085803168743803491780568503369317093191101779534035377266300185099318717465441820654"); + +#[rustfmt::skip] +pub const G2_GENERATOR_Y_C1: Fq = field_new!(Fq, "17608637424964395737041291373756657139607306440193731804102457011726690702169238966996114255971643893157857311132388792357391583164125870757541009035041469463366528798593952884745987697403056488744603829437448927398468360797245"); diff --git a/arkworks/curves/mnt4_753/src/curves/mod.rs b/arkworks/curves/mnt4_753/src/curves/mod.rs new file mode 100644 index 00000000..e51d53b8 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/curves/mod.rs @@ -0,0 +1,75 @@ +use ark_ec::models::mnt4::{MNT4Parameters, MNT4}; +use ark_ff::{biginteger::BigInteger768, field_new, Fp2}; + +use crate::{Fq, Fq2, Fq2Parameters, Fq4Parameters, Fr}; + +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +pub type MNT4_753 = MNT4; + +pub struct Parameters; + +impl MNT4Parameters for Parameters { + const TWIST: Fp2 = field_new!(Fq2, FQ_ZERO, FQ_ONE); + // A coefficient of MNT4-753 G2 = + // ``` + // mnt4753_twist_coeff_a = mnt4753_Fq2(mnt4753_G1::coeff_a * non_residue, mnt6753_Fq::zero()); + // = (A_COEFF * NONRESIDUE, ZERO) + // = (26, ZERO) + // ``` + #[rustfmt::skip] + const TWIST_COEFF_A: Fp2 = field_new!(Fq2, + G1_COEFF_A_NON_RESIDUE, + FQ_ZERO, + ); + // https://github.com/o1-labs/snarky/blob/9c21ab2bb23874604640740d646a932e813432c3/snarkette/mnt4753.ml + const ATE_LOOP_COUNT: &'static [u64] = &[ + 8824542903220142080, + 7711082599397206192, + 8303354903384568230, + 5874150271971943936, + 9717849827920685054, + 95829799234282493, + ]; + const ATE_IS_LOOP_COUNT_NEG: bool = true; + const FINAL_EXPONENT_LAST_CHUNK_1: BigInteger768 = + BigInteger768([0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]); + const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool = true; + const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: BigInteger768 = BigInteger768([ + 8824542903220142079, + 7711082599397206192, + 8303354903384568230, + 5874150271971943936, + 9717849827920685054, + 95829799234282493, + 0, + 0, + 0, + 0, + 0, + 0, + ]); + type Fp = Fq; + type Fr = Fr; + type Fp2Params = Fq2Parameters; + type Fp4Params = Fq4Parameters; + type G1Parameters = self::g1::Parameters; + type G2Parameters = self::g2::Parameters; +} + +// 26 +pub const G1_COEFF_A_NON_RESIDUE: Fq = field_new!(Fq, "26"); + +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); +pub const FQ_ONE: Fq = field_new!(Fq, "1"); +pub const FR_ZERO: Fr = field_new!(Fr, "0"); +pub const FR_ONE: Fr = field_new!(Fr, "1"); diff --git a/arkworks/curves/mnt4_753/src/curves/tests.rs b/arkworks/curves/mnt4_753/src/curves/tests.rs new file mode 100644 index 00000000..8c3c9af6 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/curves/tests.rs @@ -0,0 +1,91 @@ +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, One, PrimeField, UniformRand}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let sa = a.mul(s.into_repr()); + let sb = b.mul(s.into_repr()); + + let ans1 = MNT4_753::pairing(sa, b); + let ans2 = MNT4_753::pairing(a, sb); + let ans3 = MNT4_753::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq4::one()); + assert_ne!(ans2, Fq4::one()); + assert_ne!(ans3, Fq4::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq4::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq4::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq4::one()); +} + +#[test] +fn test_product_of_pairings() { + let rng = &mut test_rng(); + + let a = G1Projective::rand(rng).into_affine(); + let b = G2Projective::rand(rng).into_affine(); + let c = G1Projective::rand(rng).into_affine(); + let d = G2Projective::rand(rng).into_affine(); + let ans1 = MNT4_753::pairing(a, b) * &MNT4_753::pairing(c, d); + let ans2 = MNT4_753::product_of_pairings(&[(a.into(), b.into()), (c.into(), d.into())]); + assert_eq!(ans1, ans2); +} diff --git a/arkworks/curves/mnt4_753/src/fields/fq.rs b/arkworks/curves/mnt4_753/src/fields/fq.rs new file mode 100644 index 00000000..8b7d3f54 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/fields/fq.rs @@ -0,0 +1,170 @@ +use ark_ff::{ + biginteger::BigInteger768 as BigInteger, + fields::{FftParameters, Fp768, Fp768Parameters, FpParameters}, +}; + +pub type Fq = Fp768; + +pub struct FqParameters; + +impl Fp768Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 15; + + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x3b079c7556ac378, + 0x2c8c74d04a3f00d4, + 0xd3b001061b90d4cf, + 0x946e77514891b0e6, + 0x79caec8ad6dc9ea1, + 0xbefd780edc81435d, + 0xe093d4dca630b154, + 0x43a0f673199f1c12, + 0x92276c78436253ff, + 0xe249d1cf014fcd24, + 0x96f36471fb7c3ec5, + 0x1080b8906b7c4, + ]); + + const SMALL_SUBGROUP_BASE: Option = Some(5); + const SMALL_SUBGROUP_BASE_ADICITY: Option = Some(2); + /// LARGE_SUBGROUP_ROOT_OF_UNITY = + /// 12249458902762217747626832919710926618510011455364963726393752854649914979954138109976331601455448780251166045203053508523342111624583986869301658366625356826888785691823710598470775453742133593634524619429629803955083254436531 + const LARGE_SUBGROUP_ROOT_OF_UNITY: Option = Some(BigInteger([ + 8926681816978929800, + 10873079436792120119, + 6519893728366769435, + 7899277225737766970, + 8416573500933450083, + 12951641800297678468, + 7093775028595490583, + 14327009285082556021, + 18228411097456927576, + 2823658094446565457, + 1708328092507553067, + 109589007594791, + ])); +} +impl FpParameters for FqParameters { + /// MODULUS = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888253786114353726529584385201591605722013126468931404347949840543007986327743462853720628051692141265303114721689601 + const MODULUS: BigInteger = BigInteger([ + 0x5e9063de245e8001, + 0xe39d54522cdd119f, + 0x638810719ac425f0, + 0x685acce9767254a4, + 0xb80f0da5cb537e38, + 0xb117e776f218059d, + 0x99d124d9a15af79d, + 0x7fdb925e8a0ed8d, + 0x5eb7e8f96c97d873, + 0xb7f997505b8fafed, + 0x10229022eee2cdad, + 0x1c4c62d92c411, + ]); + + const MODULUS_BITS: u32 = 753; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 15; + + const R: BigInteger = BigInteger([ + 0x98a8ecabd9dc6f42, + 0x91cd31c65a034686, + 0x97c3e4a0cd14572e, + 0x79589819c788b601, + 0xed269c942108976f, + 0x1e0f4d8acf031d68, + 0x320c3bb713338559, + 0x598b4302d2f00a62, + 0x4074c9cbfd8ca621, + 0xfa47edb3865e88c, + 0x95455fb31ff9a195, + 0x7b479ec8e242, + ]); + + const R2: BigInteger = BigInteger([ + 0x84717088cfd190c8, + 0xc7d9ff8e7df03c0a, + 0xa24bea56242b3507, + 0xa896a656a0714c7d, + 0x80a46659ff6f3ddf, + 0x2f47839ef88d7ce8, + 0xa8c86d4604a3b597, + 0xe03c79cac4f7ef07, + 0x2505daf1f4a81245, + 0x8e4605754c381723, + 0xb081f15bcbfdacaf, + 0x2a33e89cb485, + ]); + + const INV: u64 = 0xf2044cfbe45e7fff; + + const GENERATOR: BigInteger = BigInteger([ + 0xa8f627f0e629635e, + 0x202afce346c36872, + 0x85e1ece733493254, + 0x6d76e610664ac389, + 0xdf542f3f04441585, + 0x3aa4885bf6d4dd80, + 0xeb8b63c1c0fffc74, + 0xd2488e985f6cfa4e, + 0xcce1c2a623f7a66a, + 0x2a060f4d5085b19a, + 0xa9111a596408842f, + 0x11ca8d50bf627, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xaf4831ef122f4000, + 0x71ceaa29166e88cf, + 0x31c40838cd6212f8, + 0x342d6674bb392a52, + 0xdc0786d2e5a9bf1c, + 0xd88bf3bb790c02ce, + 0xcce8926cd0ad7bce, + 0x83fedc92f45076c6, + 0xaf5bf47cb64bec39, + 0xdbfccba82dc7d7f6, + 0x88114811777166d6, + 0xe26316c96208, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + + /// T = (MODULUS - 1) / 2^S = + /// 1278640471433073529124274133033466709233725278318907137200424283478556909563327233064541435662546964154604216671394463687571830033251476599169665701965732619291119517454523942352538645255842982596454713491581459512424155325 + const T: BigInteger = BigInteger([ + 0x233ebd20c7bc48bd, + 0x4be1c73aa8a459ba, + 0xa948c71020e33588, + 0xfc70d0b599d2ece4, + 0xb3b701e1b4b96a6, + 0xef3b622fceede430, + 0xdb1b33a249b342b5, + 0xb0e60ffb724bd141, + 0x5fdabd6fd1f2d92f, + 0x9b5b6ff32ea0b71f, + 0x882220452045ddc5, + 0x3898c5b25, + ]); + + /// (T - 1) / 2 = + /// 639320235716536764562137066516733354616862639159453568600212141739278454781663616532270717831273482077302108335697231843785915016625738299584832850982866309645559758727261971176269322627921491298227356745790729756212077662 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x119f5e9063de245e, + 0x25f0e39d54522cdd, + 0x54a4638810719ac4, + 0x7e38685acce97672, + 0x59db80f0da5cb53, + 0xf79db117e776f218, + 0xed8d99d124d9a15a, + 0xd87307fdb925e8a0, + 0xafed5eb7e8f96c97, + 0xcdadb7f997505b8f, + 0xc41110229022eee2, + 0x1c4c62d92, + ]); +} diff --git a/arkworks/curves/mnt4_753/src/fields/fq2.rs b/arkworks/curves/mnt4_753/src/fields/fq2.rs new file mode 100644 index 00000000..00a5664e --- /dev/null +++ b/arkworks/curves/mnt4_753/src/fields/fq2.rs @@ -0,0 +1,28 @@ +use crate::{Fq, FQ_ONE}; +use ark_ff::{ + field_new, + fields::fp2::{Fp2, Fp2Parameters}, +}; + +pub type Fq2 = Fp2; + +pub struct Fq2Parameters; + +impl Fp2Parameters for Fq2Parameters { + type Fp = Fq; + + // non_residue = 13 + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "13"); + + // qnr = (8, 1) + const QUADRATIC_NONRESIDUE: (Self::Fp, Self::Fp) = (field_new!(Fq, "8"), FQ_ONE); + + // Coefficients: + // [1, 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888253786114353726529584385201591605722013126468931404347949840543007986327743462853720628051692141265303114721689600] + // see https://github.com/o1-labs/snarky/blob/2cf5ef3a14989e57c17518832b3c52590068fc48/src/camlsnark_c/libsnark-caml/depends/libff/libff/algebra/curves/mnt753/mnt4753/mnt4753_init.cpp + const FROBENIUS_COEFF_FP2_C1: &'static [Self::Fp] = &[ + FQ_ONE, + field_new!(Fq, "41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888253786114353726529584385201591605722013126468931404347949840543007986327743462853720628051692141265303114721689600"), + ]; +} diff --git a/arkworks/curves/mnt4_753/src/fields/fq4.rs b/arkworks/curves/mnt4_753/src/fields/fq4.rs new file mode 100644 index 00000000..0d141387 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/fields/fq4.rs @@ -0,0 +1,30 @@ +use crate::{Fq, Fq2, Fq2Parameters, FQ_ONE, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp4::{Fp4, Fp4Parameters}, +}; + +pub type Fq4 = Fp4; + +pub struct Fq4Parameters; + +impl Fp4Parameters for Fq4Parameters { + type Fp2Params = Fq2Parameters; + + const NONRESIDUE: Fq2 = field_new!(Fq2, FQ_ZERO, FQ_ONE); + + // Coefficients for the Frobenius automorphism. + // c1[0] = 1, + // c1[1] = 18691656569803771296244054523431852464958959799019013859007259692542121208304602539555350517075508287829753932558576476751900235650227380562700444433662761577027341858128610410779088384480737679672900770810745291515010467307990 + // c1[2] = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888253786114353726529584385201591605722013126468931404347949840543007986327743462853720628051692141265303114721689600 + // c1[3] = 23206834398115182106100160267808784663211750120934935212776243228483231604266504233503543246714830633588317039329677309362453490879357004638891161288350364891904062489821230132228897943262725174047727280881395973788104254381611 + // + // These are calculated as `FROBENIUS_COEFF_FP4_C1[i] = Fp2Params::NONRESIDUE^((q^i - 1) / 4)`. + #[rustfmt::skip] + const FROBENIUS_COEFF_FP4_C1: &'static [Fq] = &[ + FQ_ONE, + field_new!(Fq, "18691656569803771296244054523431852464958959799019013859007259692542121208304602539555350517075508287829753932558576476751900235650227380562700444433662761577027341858128610410779088384480737679672900770810745291515010467307990"), + field_new!(Fq, "41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888253786114353726529584385201591605722013126468931404347949840543007986327743462853720628051692141265303114721689600"), + field_new!(Fq, "23206834398115182106100160267808784663211750120934935212776243228483231604266504233503543246714830633588317039329677309362453490879357004638891161288350364891904062489821230132228897943262725174047727280881395973788104254381611"), + ]; +} diff --git a/arkworks/curves/mnt4_753/src/fields/fr.rs b/arkworks/curves/mnt4_753/src/fields/fr.rs new file mode 100644 index 00000000..5e4d7738 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/fields/fr.rs @@ -0,0 +1,151 @@ +use ark_ff::{ + biginteger::BigInteger768 as BigInteger, + fields::{FftParameters, Fp768, Fp768Parameters, FpParameters}, +}; + +pub type Fr = Fp768; + +pub struct FrParameters; + +impl Fp768Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 30; + + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x307f66b297671883, + 0xd72a7f2b1e645f4e, + 0x67079daa9a902283, + 0xf33f7620a86c668b, + 0x8878570d66464c12, + 0xa557af5b524f522b, + 0x5fafa3f6ef19319d, + 0x1eb9e04110a65629, + 0x3f96feb3c639a0b0, + 0x4d4fe37df3ffd732, + 0xadc831bd55bcf3e9, + 0x1b9f32a8bd6ab, + ]); +} +impl FpParameters for FrParameters { + /// MODULUS = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888458477323173057491593855069696241854796396165721416325350064441470418137846398469611935719059908164220784476160001 + const MODULUS: BigInteger = BigInteger([ + 0xd90776e240000001, + 0x4ea099170fa13a4f, + 0xd6c381bc3f005797, + 0xb9dff97634993aa4, + 0x3eebca9429212636, + 0xb26c5c28c859a99b, + 0x99d124d9a15af79d, + 0x7fdb925e8a0ed8d, + 0x5eb7e8f96c97d873, + 0xb7f997505b8fafed, + 0x10229022eee2cdad, + 0x1c4c62d92c411, + ]); + + const MODULUS_BITS: u32 = 753; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 15; + + const R: BigInteger = BigInteger([ + 0xb99680147fff6f42, + 0x4eb16817b589cea8, + 0xa1ebd2d90c79e179, + 0xf725caec549c0da, + 0xab0c4ee6d3e6dad4, + 0x9fbca908de0ccb62, + 0x320c3bb713338498, + 0x598b4302d2f00a62, + 0x4074c9cbfd8ca621, + 0xfa47edb3865e88c, + 0x95455fb31ff9a195, + 0x7b479ec8e242, + ]); + + const R2: BigInteger = BigInteger([ + 0x3f9c69c7b7f4c8d1, + 0x70a50fa9ee48d127, + 0xcdbe6702009569cb, + 0x6bd8c6c6c49edc38, + 0x7955876cc35ee94e, + 0xc7285529be54a3f4, + 0xded52121ecec77cf, + 0x99be80f2ee12ee8e, + 0xc8a0ff01493bdcef, + 0xacc27988f3d9a316, + 0xd9e817a8fb44b3c9, + 0x5b58037e0e4, + ]); + + const INV: u64 = 0xc90776e23fffffff; + + const GENERATOR: BigInteger = BigInteger([ + 0xeee0a5d37ff6635e, + 0xff458536cfa1cff4, + 0x659af978d8169ab0, + 0x1f1841c24780e3f1, + 0x602213036dcfef3a, + 0xd1d5c8f39d72db20, + 0xeb8b63c1c0ffefab, + 0xd2488e985f6cfa4e, + 0xcce1c2a623f7a66a, + 0x2a060f4d5085b19a, + 0xa9111a596408842f, + 0x11ca8d50bf627, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xec83bb7120000000, + 0xa7504c8b87d09d27, + 0x6b61c0de1f802bcb, + 0x5ceffcbb1a4c9d52, + 0x9f75e54a1490931b, + 0xd9362e14642cd4cd, + 0xcce8926cd0ad7bce, + 0x83fedc92f45076c6, + 0xaf5bf47cb64bec39, + 0xdbfccba82dc7d7f6, + 0x88114811777166d6, + 0xe26316c96208, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + + /// T = (MODULUS - 1) / 2^S = + /// 39021010480745652133919498688765463538626870065884617224134041854204007249857398469987226430131438115069708760723898631821547688442835449306011425196003537779414482717728302293895201885929702287178426719326440397855625 + const T: BigInteger = BigInteger([ + 0x3e84e93f641ddb89, + 0xfc015e5d3a82645c, + 0xd264ea935b0e06f0, + 0xa48498dae77fe5d8, + 0x2166a66cfbaf2a50, + 0x856bde76c9b170a3, + 0xa283b63667449366, + 0xb25f61cc1ff6e497, + 0x6e3ebfb57adfa3e5, + 0xbb8b36b6dfe65d41, + 0xb64b1044408a408b, + 0x71318, + ]); + + /// (T - 1) / 2 = + /// 19510505240372826066959749344382731769313435032942308612067020927102003624928699234993613215065719057534854380361949315910773844221417724653005712598001768889707241358864151146947600942964851143589213359663220198927812 + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x1f42749fb20eedc4, + 0x7e00af2e9d41322e, + 0x69327549ad870378, + 0x52424c6d73bff2ec, + 0x90b353367dd79528, + 0x42b5ef3b64d8b851, + 0xd141db1b33a249b3, + 0xd92fb0e60ffb724b, + 0xb71f5fdabd6fd1f2, + 0xddc59b5b6ff32ea0, + 0x5b25882220452045, + 0x3898c, + ]); +} diff --git a/arkworks/curves/mnt4_753/src/fields/mod.rs b/arkworks/curves/mnt4_753/src/fields/mod.rs new file mode 100644 index 00000000..719e03be --- /dev/null +++ b/arkworks/curves/mnt4_753/src/fields/mod.rs @@ -0,0 +1,22 @@ +#[cfg(feature = "scalar_field")] +pub mod fr; +#[cfg(feature = "scalar_field")] +pub use self::fr::*; + +#[cfg(feature = "base_field")] +pub mod fq; +#[cfg(feature = "base_field")] +pub use self::fq::*; + +#[cfg(feature = "curve")] +pub mod fq2; +#[cfg(feature = "curve")] +pub use self::fq2::*; + +#[cfg(feature = "curve")] +pub mod fq4; +#[cfg(feature = "curve")] +pub use self::fq4::*; + +#[cfg(all(feature = "curve", test))] +mod tests; diff --git a/arkworks/curves/mnt4_753/src/fields/tests.rs b/arkworks/curves/mnt4_753/src/fields/tests.rs new file mode 100644 index 00000000..26efa29f --- /dev/null +++ b/arkworks/curves/mnt4_753/src/fields/tests.rs @@ -0,0 +1,46 @@ +use ark_ff::Field; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq2() { + let mut rng = test_rng(); + let a: Fq2 = rng.gen(); + let b: Fq2 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + frobenius_test::(Fq::characteristic(), 13); +} + +#[test] +fn test_fq4() { + let mut rng = test_rng(); + let a: Fq4 = rng.gen(); + let b: Fq4 = rng.gen(); + field_test(a, b); + frobenius_test::(Fq::characteristic(), 13); +} diff --git a/arkworks/curves/mnt4_753/src/lib.rs b/arkworks/curves/mnt4_753/src/lib.rs new file mode 100644 index 00000000..f7d0bb69 --- /dev/null +++ b/arkworks/curves/mnt4_753/src/lib.rs @@ -0,0 +1,40 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the MNT4_753 curve generated in +//! [\[BCTV14\]](https://eprint.iacr.org/2014/595). The name denotes that it is a +//! Miyaji--Nakabayashi--Takano curve of embedding degree 4, defined over a 753-bit (prime) field. +//! The main feature of this curve is that its scalar field and base field respectively equal the +//! base field and scalar field of MNT6_753. +//! +//! Curve information: +//! * Base field: q = 0x01C4C62D92C41110229022EEE2CDADB7F997505B8FAFED5EB7E8F96C97D87307FDB925E8A0ED8D99D124D9A15AF79DB117E776F218059DB80F0DA5CB537E38685ACCE9767254A4638810719AC425F0E39D54522CDD119F5E9063DE245E8001 +//! * Scalar field: r = 0x01C4C62D92C41110229022EEE2CDADB7F997505B8FAFED5EB7E8F96C97D87307FDB925E8A0ED8D99D124D9A15AF79DB26C5C28C859A99B3EEBCA9429212636B9DFF97634993AA4D6C381BC3F0057974EA099170FA13A4FD90776E240000001 +//! * valuation(q - 1, 2) = 15 +//! * valuation(r - 1, 2) = 30 +//! * G1 curve equation: y^2 = x^3 + ax + b, where +//! * a = 2 +//! * b = 0x01373684A8C9DCAE7A016AC5D7748D3313CD8E39051C596560835DF0C9E50A5B59B882A92C78DC537E51A16703EC9855C77FC3D8BB21C8D68BB8CFB9DB4B8C8FBA773111C36C8B1B4E8F1ECE940EF9EAAD265458E06372009C9A0491678EF4 +//! * G2 curve equation: y^2 = x^3 + Ax + B, where +//! * A = Fq2 = (a * NON_RESIDUE, 0) +//! * B = Fq2(0, b * NON_RESIDUE) +//! * NON_RESIDUE = 13 is the quadratic non-residue used to construct the extension field Fq2 + +#[cfg(feature = "r1cs")] +pub mod constraints; +#[cfg(feature = "curve")] +mod curves; +#[cfg(any(feature = "scalar_field", feature = "base_field"))] +mod fields; + +#[cfg(feature = "curve")] +pub use curves::*; +#[cfg(any(feature = "scalar_field", feature = "base_field"))] +pub use fields::*; diff --git a/arkworks/curves/mnt6_298/Cargo.toml b/arkworks/curves/mnt6_298/Cargo.toml new file mode 100644 index 00000000..68e5ec9e --- /dev/null +++ b/arkworks/curves/mnt6_298/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-mnt6-298" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The MNT6-298 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-mnt6-298/" +keywords = ["cryptography", "finite-fields", "elliptic-curves"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-mnt4-298 = { version = "^0.3.0", path = "../mnt4_298", default-features = false, features = [ "scalar_field", "base_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] +r1cs = [ "ark-r1cs-std" ] \ No newline at end of file diff --git a/arkworks/curves/mnt6_298/src/constraints/curves.rs b/arkworks/curves/mnt6_298/src/constraints/curves.rs new file mode 100644 index 00000000..069ee9e2 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/constraints/curves.rs @@ -0,0 +1,29 @@ +use crate::Parameters; +use ark_r1cs_std::groups::mnt6; + +/// An element of G1 in the MNT6-298 bilinear group. +pub type G1Var = mnt6::G1Var; +/// An element of G2 in the MNT6-298 bilinear group. +pub type G2Var = mnt6::G2Var; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +pub type G1PreparedVar = mnt6::G1PreparedVar; +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +pub type G2PreparedVar = mnt6::G2PreparedVar; + +#[test] +fn test() { + use ark_ec::models::mnt6::MNT6Parameters; + ark_curve_constraint_tests::curves::sw_test::< + ::G1Parameters, + G1Var, + >() + .unwrap(); + ark_curve_constraint_tests::curves::sw_test::< + ::G2Parameters, + G2Var, + >() + .unwrap(); +} diff --git a/arkworks/curves/mnt6_298/src/constraints/fields.rs b/arkworks/curves/mnt6_298/src/constraints/fields.rs new file mode 100644 index 00000000..045db64f --- /dev/null +++ b/arkworks/curves/mnt6_298/src/constraints/fields.rs @@ -0,0 +1,26 @@ +use crate::{Fq, Fq3Parameters, Fq6Parameters}; + +use ark_r1cs_std::fields::{fp::FpVar, fp3::Fp3Var, fp6_2over3::Fp6Var}; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; +/// A variable that is the R1CS equivalent of `crate::Fq3`. +pub type Fq3Var = Fp3Var; +/// A variable that is the R1CS equivalent of `crate::Fq6`. +pub type Fq6Var = Fp6Var; + +#[test] +fn mnt6_298_field_gadgets_test() { + use super::*; + use crate::{Fq, Fq3, Fq6}; + use ark_curve_constraint_tests::fields::*; + + field_test::<_, _, FqVar>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq3Var>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq6Var>().unwrap(); + frobenius_tests::(13).unwrap(); +} diff --git a/arkworks/curves/mnt6_298/src/constraints/mod.rs b/arkworks/curves/mnt6_298/src/constraints/mod.rs new file mode 100644 index 00000000..00925676 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/constraints/mod.rs @@ -0,0 +1,158 @@ +//! This module implements the R1CS equivalent of `ark_mnt6_298`. +//! +//! It implements field variables for `crate::{Fq, Fq3, Fq6}`, +//! group variables for `crate::{G1, G2}`, and implements constraint +//! generation for computing `MNT6_298::pairing`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_mnt6_298::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `G1Var` and `G2Var`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt6_298::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `G1` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G1Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G1Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G1Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `G1`. +//! let zero = G1Var::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! Finally, one can check pairing computations as well: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_ec::PairingEngine; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt6_298::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate random `G1` and `G2` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G2Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G2Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G2Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let pairing_result_native = MNT6_298::pairing(a_native, b_native); +//! +//! // Prepare `a` and `b` for pairing. +//! let a_prep = constraints::PairingVar::prepare_g1(&a)?; +//! let b_prep = constraints::PairingVar::prepare_g2(&b)?; +//! let pairing_result = constraints::PairingVar::pairing(a_prep, b_prep)?; +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!(pairing_result.value()?, pairing_result_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! let a_prep_const = constraints::PairingVar::prepare_g1(&a_const)?; +//! let b_prep_const = constraints::PairingVar::prepare_g2(&b_const)?; +//! let pairing_result_const = constraints::PairingVar::pairing(a_prep_const, b_prep_const)?; +//! println!("Done here 3"); +//! +//! pairing_result.enforce_equal(&pairing_result_const)?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; +mod pairing; + +pub use curves::*; +pub use fields::*; +pub use pairing::*; diff --git a/arkworks/curves/mnt6_298/src/constraints/pairing.rs b/arkworks/curves/mnt6_298/src/constraints/pairing.rs new file mode 100644 index 00000000..14641bea --- /dev/null +++ b/arkworks/curves/mnt6_298/src/constraints/pairing.rs @@ -0,0 +1,10 @@ +use crate::Parameters; + +/// Specifies the constraints for computing a pairing in the MNT6-298 bilinear group. +pub type PairingVar = ark_r1cs_std::pairing::mnt6::PairingVar; + +#[test] +fn test() { + use crate::MNT6_298; + ark_curve_constraint_tests::pairing::bilinearity_test::().unwrap() +} diff --git a/arkworks/curves/mnt6_298/src/curves/g1.rs b/arkworks/curves/mnt6_298/src/curves/g1.rs new file mode 100644 index 00000000..da37c1c5 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/curves/g1.rs @@ -0,0 +1,49 @@ +use ark_ec::{ + mnt6, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +use crate::{Fq, Fr}; + +pub type G1Affine = mnt6::G1Affine; +pub type G1Projective = mnt6::G1Projective; +pub type G1Prepared = mnt6::G1Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 11 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "11"); + + /// COEFF_B = 106700080510851735677967319632585352256454251201367587890185989362936000262606668469523074 + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "106700080510851735677967319632585352256454251201367587890185989362936000262606668469523074"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[1]; + + /// COFACTOR^(-1) mod r = + /// 1 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "1"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); +} + +/// G1_GENERATOR_X = +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "336685752883082228109289846353937104185698209371404178342968838739115829740084426881123453"); + +/// G1_GENERATOR_Y = +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "402596290139780989709332707716568920777622032073762749862342374583908837063963736098549800"); diff --git a/arkworks/curves/mnt6_298/src/curves/g2.rs b/arkworks/curves/mnt6_298/src/curves/g2.rs new file mode 100644 index 00000000..b9ce22c3 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/curves/g2.rs @@ -0,0 +1,108 @@ +use ark_ec::{ + mnt6, + mnt6::MNT6Parameters, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +use crate::{g1, Fq, Fq3, Fr, FQ_ZERO}; + +pub type G2Affine = mnt6::G2Affine; +pub type G2Projective = mnt6::G2Projective; +pub type G2Prepared = mnt6::G2Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq3; + type ScalarField = Fr; +} + +/// MUL_BY_A_C0 = NONRESIDUE * COEFF_A = 5 * 11 + #[rustfmt::skip] +pub const MUL_BY_A_C0: Fq = field_new!(Fq, "55"); + +/// MUL_BY_A_C1 = NONRESIDUE * COEFF_A + #[rustfmt::skip] +pub const MUL_BY_A_C1: Fq = field_new!(Fq, "55"); + +/// MUL_BY_A_C2 = COEFF_A +pub const MUL_BY_A_C2: Fq = g1::Parameters::COEFF_A; + +impl SWModelParameters for Parameters { + const COEFF_A: Fq3 = crate::Parameters::TWIST_COEFF_A; + #[rustfmt::skip] + const COEFF_B: Fq3 = field_new!(Fq3, + // 5 * G1::COEFF_B + field_new!(Fq, "57578116384997352636487348509878309737146377454014423897662211075515354005624851787652233"), + FQ_ZERO, + FQ_ZERO, + ); + + /// COFACTOR = + /// 226502022472576270196498690498308461791828762732602586162207535351960270082712694977333372361549082214519252261735048131889018501404377856786623430385820659037970876666767495659520 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 15308190245346869248, + 10669098443577192943, + 4561413759929581409, + 3680089780298582849, + 17336300687782721465, + 10745756320947240891, + 17479264233688728128, + 16828697388537672097, + 4184034152442024798, + 915787, + ]; + + /// COFACTOR^(-1) mod r = + /// 79320381028210220958891541608841408590854146655427655872973753568875979721417185067925504 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "79320381028210220958891541608841408590854146655427655872973753568875979721417185067925504"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(elt: &Fq3) -> Fq3 { + field_new!( + Fq3, + MUL_BY_A_C0 * &elt.c1, + MUL_BY_A_C1 * &elt.c2, + MUL_BY_A_C2 * &elt.c0, + ) + } +} + +const G2_GENERATOR_X: Fq3 = + field_new!(Fq3, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1, G2_GENERATOR_X_C2); +const G2_GENERATOR_Y: Fq3 = + field_new!(Fq3, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1, G2_GENERATOR_Y_C2); + +pub const G2_GENERATOR_X_C0: Fq = field_new!( + Fq, + "421456435772811846256826561593908322288509115489119907560382401870203318738334702321297427" +); +pub const G2_GENERATOR_X_C1: Fq = field_new!( + Fq, + "103072927438548502463527009961344915021167584706439945404959058962657261178393635706405114" +); +pub const G2_GENERATOR_X_C2: Fq = field_new!( + Fq, + "143029172143731852627002926324735183809768363301149009204849580478324784395590388826052558" +); + +pub const G2_GENERATOR_Y_C0: Fq = field_new!( + Fq, + "464673596668689463130099227575639512541218133445388869383893594087634649237515554342751377" +); +pub const G2_GENERATOR_Y_C1: Fq = field_new!( + Fq, + "100642907501977375184575075967118071807821117960152743335603284583254620685343989304941678" +); +pub const G2_GENERATOR_Y_C2: Fq = field_new!( + Fq, + "123019855502969896026940545715841181300275180157288044663051565390506010149881373807142903" +); diff --git a/arkworks/curves/mnt6_298/src/curves/mod.rs b/arkworks/curves/mnt6_298/src/curves/mod.rs new file mode 100644 index 00000000..94e180b1 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/curves/mod.rs @@ -0,0 +1,48 @@ +use ark_ff::{biginteger::BigInteger320, field_new, Fp3}; + +use ark_ec::{ + models::mnt6::{MNT6Parameters, MNT6}, + SWModelParameters, +}; + +use crate::{Fq, Fq3, Fq3Parameters, Fq6Parameters, Fr}; + +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +pub type MNT6_298 = MNT6; + +pub struct Parameters; + +impl MNT6Parameters for Parameters { + const TWIST: Fp3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); + #[rustfmt::skip] + const TWIST_COEFF_A: Fp3 = field_new!(Fq3, + FQ_ZERO, + FQ_ZERO, + g1::Parameters::COEFF_A, + ); + const ATE_LOOP_COUNT: &'static [u64] = &[0xdc9a1b671660000, 0x46609756bec2a33f, 0x1eef55]; + const ATE_IS_LOOP_COUNT_NEG: bool = true; + const FINAL_EXPONENT_LAST_CHUNK_1: BigInteger320 = BigInteger320([0x1, 0x0, 0x0, 0x0, 0x0]); + const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool = true; + const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: BigInteger320 = + BigInteger320([0xdc9a1b671660000, 0x46609756bec2a33f, 0x1eef55, 0x0, 0x0]); + type Fp = Fq; + type Fr = Fr; + type Fp3Params = Fq3Parameters; + type Fp6Params = Fq6Parameters; + type G1Parameters = self::g1::Parameters; + type G2Parameters = self::g2::Parameters; +} + +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); +pub const FQ_ONE: Fq = field_new!(Fq, "1"); diff --git a/arkworks/curves/mnt6_298/src/curves/tests.rs b/arkworks/curves/mnt6_298/src/curves/tests.rs new file mode 100644 index 00000000..ea2bd472 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/curves/tests.rs @@ -0,0 +1,91 @@ +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, One, PrimeField, UniformRand}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let sa = a.mul(s.into_repr()); + let sb = b.mul(s.into_repr()); + + let ans1 = MNT6_298::pairing(sa, b); + let ans2 = MNT6_298::pairing(a, sb); + let ans3 = MNT6_298::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq6::one()); + assert_ne!(ans2, Fq6::one()); + assert_ne!(ans3, Fq6::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq6::one()); +} + +#[test] +fn test_product_of_pairings() { + let rng = &mut test_rng(); + + let a = G1Projective::rand(rng).into_affine(); + let b = G2Projective::rand(rng).into_affine(); + let c = G1Projective::rand(rng).into_affine(); + let d = G2Projective::rand(rng).into_affine(); + let ans1 = MNT6_298::pairing(a, b) * &MNT6_298::pairing(c, d); + let ans2 = MNT6_298::product_of_pairings(&[(a.into(), b.into()), (c.into(), d.into())]); + assert_eq!(ans1, ans2); +} diff --git a/arkworks/curves/mnt6_298/src/fields/fq.rs b/arkworks/curves/mnt6_298/src/fields/fq.rs new file mode 100644 index 00000000..f587b1ab --- /dev/null +++ b/arkworks/curves/mnt6_298/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_mnt4_298::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/mnt6_298/src/fields/fq3.rs b/arkworks/curves/mnt6_298/src/fields/fq3.rs new file mode 100644 index 00000000..671688b7 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/fields/fq3.rs @@ -0,0 +1,57 @@ +use crate::{fq::Fq, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp3::{Fp3, Fp3Parameters}, +}; + +pub type Fq3 = Fp3; + +pub struct Fq3Parameters; + +impl Fp3Parameters for Fq3Parameters { + type Fp = Fq; + + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "5"); + + const TWO_ADICITY: u32 = 34; + + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: &'static [u64] = &[ + 0x69232b75663933bd, + 0xca650efcfc00ee0, + 0x77ca3963fe36f720, + 0xe4cb46632f9bcf7e, + 0xef510453f08f9f30, + 0x9dd5b8fc72f02d83, + 0x7f8d017ed86608ab, + 0xeb2219b3697c97a4, + 0xc8663846ab96996f, + 0x833cd532053eac7d, + 0x1d5b73dfb20bd3cc, + 0x6f5f6da606b59873, + 0x62e990f43dfc42d6, + 0x6878f58, + ]; + + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE_TO_T: (Fq, Fq, Fq) = ( + field_new!(Fq, "154361449678783505076984156275977937654331103361174469632346230549735979552469642799720052"), + FQ_ZERO, + FQ_ZERO, + ); + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C1: &'static [Fq] = &[ + field_new!(Fq, "1"), + field_new!(Fq, "471738898967521029133040851318449165997304108729558973770077319830005517129946578866686956"), + field_new!(Fq, "4183387201740296620308398334599285547820769823264541783190415909159130177461911693276180"), + ]; + + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C2: &'static [Fq] = &[ + Self::FROBENIUS_COEFF_FP3_C1[0], + Self::FROBENIUS_COEFF_FP3_C1[2], + Self::FROBENIUS_COEFF_FP3_C1[1], + ]; +} diff --git a/arkworks/curves/mnt6_298/src/fields/fq6.rs b/arkworks/curves/mnt6_298/src/fields/fq6.rs new file mode 100644 index 00000000..0879380a --- /dev/null +++ b/arkworks/curves/mnt6_298/src/fields/fq6.rs @@ -0,0 +1,24 @@ +use crate::{Fq, Fq3, Fq3Parameters, FQ_ONE, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp6_2over3::{Fp6, Fp6Parameters}, +}; + +pub type Fq6 = Fp6; + +pub struct Fq6Parameters; + +impl Fp6Parameters for Fq6Parameters { + type Fp3Params = Fq3Parameters; + + const NONRESIDUE: Fq3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); + + const FROBENIUS_COEFF_FP6_C1: &'static [Fq] = &[ + field_new!(Fq, "1"), + field_new!(Fq, "471738898967521029133040851318449165997304108729558973770077319830005517129946578866686957"), + field_new!(Fq, "471738898967521029133040851318449165997304108729558973770077319830005517129946578866686956"), + field_new!(Fq, "475922286169261325753349249653048451545124878552823515553267735739164647307408490559963136"), + field_new!(Fq, "4183387201740296620308398334599285547820769823264541783190415909159130177461911693276180"), + field_new!(Fq, "4183387201740296620308398334599285547820769823264541783190415909159130177461911693276181"), + ]; +} diff --git a/arkworks/curves/mnt6_298/src/fields/fr.rs b/arkworks/curves/mnt6_298/src/fields/fr.rs new file mode 100644 index 00000000..274bfe30 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/fields/fr.rs @@ -0,0 +1 @@ +pub use ark_mnt4_298::{Fq as Fr, FqParameters as FrParameters}; diff --git a/arkworks/curves/mnt6_298/src/fields/mod.rs b/arkworks/curves/mnt6_298/src/fields/mod.rs new file mode 100644 index 00000000..bf9cff75 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/fields/mod.rs @@ -0,0 +1,14 @@ +pub mod fr; +pub use self::fr::*; + +pub mod fq; +pub use self::fq::*; + +pub mod fq3; +pub use self::fq3::*; + +pub mod fq6; +pub use self::fq6::*; + +#[cfg(all(feature = "mnt6_298", test))] +mod tests; diff --git a/arkworks/curves/mnt6_298/src/fields/tests.rs b/arkworks/curves/mnt6_298/src/fields/tests.rs new file mode 100644 index 00000000..54d071b4 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/fields/tests.rs @@ -0,0 +1,53 @@ +use ark_ff::{ + fields::{models::fp6_2over3::*, quadratic_extension::QuadExtParameters}, + Field, +}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq3() { + let mut rng = test_rng(); + let a: Fq3 = rng.gen(); + let b: Fq3 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + frobenius_test::(Fq::characteristic(), 13); + assert_eq!( + a * Fq6Parameters::NONRESIDUE, + >::mul_base_field_by_nonresidue(&a) + ); +} + +#[test] +fn test_fq6() { + let mut rng = test_rng(); + let a: Fq6 = rng.gen(); + let b: Fq6 = rng.gen(); + field_test(a, b); + frobenius_test::(Fq::characteristic(), 13); +} diff --git a/arkworks/curves/mnt6_298/src/lib.rs b/arkworks/curves/mnt6_298/src/lib.rs new file mode 100644 index 00000000..1effbd98 --- /dev/null +++ b/arkworks/curves/mnt6_298/src/lib.rs @@ -0,0 +1,37 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the MNT6_298 curve generated in +//! [\[BCTV14\]](https://eprint.iacr.org/2014/595). The name denotes that it is a +//! Miyaji--Nakabayashi--Takano curve of embedding degree 6, defined over a 298-bit (prime) field. +//! The main feature of this curve is that its scalar field and base field respectively equal the +//! base field and scalar field of MNT4_298. +//! +//! +//! Curve information: +//! * Base field: q = 475922286169261325753349249653048451545124878552823515553267735739164647307408490559963137 +//! * Scalar field: r = 475922286169261325753349249653048451545124879242694725395555128576210262817955800483758081 +//! * valuation(q - 1, 2) = 34 +//! * valuation(r - 1, 2) = 17 +//! * G1 curve equation: y^2 = x^3 + ax + b, where +//! * a = 11 +//! * b = 106700080510851735677967319632585352256454251201367587890185989362936000262606668469523074 +//! * G2 curve equation: y^2 = x^3 + Ax + B, where +//! * A = Fq2 = (0, 0, a) +//! * B = Fq2(b * NON_RESIDUE, 0, 0) +//! * NON_RESIDUE = 5 is the cubic non-residue used to construct the field extension Fq3 + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/mnt6_753/Cargo.toml b/arkworks/curves/mnt6_753/Cargo.toml new file mode 100644 index 00000000..43a46bd5 --- /dev/null +++ b/arkworks/curves/mnt6_753/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-mnt6-753" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "The MNT6-753 pairing-friendly elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/algebra" +documentation = "https://docs.rs/ark-mnt6-753/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-std = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-mnt4-753 = { version = "^0.3.0", path = "../mnt4_753", default-features = false, features = [ "scalar_field", "base_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] +r1cs = ["ark-r1cs-std"] diff --git a/arkworks/curves/mnt6_753/src/constraints/curves.rs b/arkworks/curves/mnt6_753/src/constraints/curves.rs new file mode 100644 index 00000000..e747fd23 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/constraints/curves.rs @@ -0,0 +1,29 @@ +use crate::Parameters; +use ark_r1cs_std::groups::mnt6; + +/// An element of G1 in the MNT6-753 bilinear group. +pub type G1Var = mnt6::G1Var; +/// An element of G2 in the MNT6-753 bilinear group. +pub type G2Var = mnt6::G2Var; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +pub type G1PreparedVar = mnt6::G1PreparedVar; +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +pub type G2PreparedVar = mnt6::G2PreparedVar; + +#[test] +fn test() { + use ark_ec::models::mnt6::MNT6Parameters; + ark_curve_constraint_tests::curves::sw_test::< + ::G1Parameters, + G1Var, + >() + .unwrap(); + ark_curve_constraint_tests::curves::sw_test::< + ::G2Parameters, + G2Var, + >() + .unwrap(); +} diff --git a/arkworks/curves/mnt6_753/src/constraints/fields.rs b/arkworks/curves/mnt6_753/src/constraints/fields.rs new file mode 100644 index 00000000..de156ee8 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/constraints/fields.rs @@ -0,0 +1,26 @@ +use crate::{Fq, Fq3Parameters, Fq6Parameters}; + +use ark_r1cs_std::fields::{fp::FpVar, fp3::Fp3Var, fp6_2over3::Fp6Var}; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FqVar = FpVar; +/// A variable that is the R1CS equivalent of `crate::Fq3`. +pub type Fq3Var = Fp3Var; +/// A variable that is the R1CS equivalent of `crate::Fq6`. +pub type Fq6Var = Fp6Var; + +#[test] +fn mnt6_753_field_gadgets_test() { + use super::*; + use crate::{Fq, Fq3, Fq6}; + use ark_curve_constraint_tests::fields::*; + + field_test::<_, _, FqVar>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq3Var>().unwrap(); + frobenius_tests::(13).unwrap(); + + field_test::<_, _, Fq6Var>().unwrap(); + frobenius_tests::(13).unwrap(); +} diff --git a/arkworks/curves/mnt6_753/src/constraints/mod.rs b/arkworks/curves/mnt6_753/src/constraints/mod.rs new file mode 100644 index 00000000..c540ac7f --- /dev/null +++ b/arkworks/curves/mnt6_753/src/constraints/mod.rs @@ -0,0 +1,158 @@ +//! This module implements the R1CS equivalent of `ark_mnt6_753`. +//! +//! It implements field variables for `crate::{Fq, Fq3, Fq6}`, +//! group variables for `crate::{G1, G2}`, and implements constraint +//! generation for computing `MNT6_753::pairing`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FqVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_mnt6_753::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FqVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FqVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FqVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FqVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FqVar::one(); +//! let zero = FqVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `G1Var` and `G2Var`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt6_753::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `G1` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G1Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G1Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G1Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity of `G1`. +//! let zero = G1Var::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! Finally, one can check pairing computations as well: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_ec::PairingEngine; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_mnt6_753::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate random `G1` and `G2` elements. +//! let a_native = G1Projective::rand(&mut rng); +//! let b_native = G2Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = G1Var::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = G2Var::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = G1Var::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = G2Var::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let pairing_result_native = MNT6_753::pairing(a_native, b_native); +//! +//! // Prepare `a` and `b` for pairing. +//! let a_prep = constraints::PairingVar::prepare_g1(&a)?; +//! let b_prep = constraints::PairingVar::prepare_g2(&b)?; +//! let pairing_result = constraints::PairingVar::pairing(a_prep, b_prep)?; +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!(pairing_result.value()?, pairing_result_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! let a_prep_const = constraints::PairingVar::prepare_g1(&a_const)?; +//! let b_prep_const = constraints::PairingVar::prepare_g2(&b_const)?; +//! let pairing_result_const = constraints::PairingVar::pairing(a_prep_const, b_prep_const)?; +//! println!("Done here 3"); +//! +//! pairing_result.enforce_equal(&pairing_result_const)?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; +mod pairing; + +pub use curves::*; +pub use fields::*; +pub use pairing::*; diff --git a/arkworks/curves/mnt6_753/src/constraints/pairing.rs b/arkworks/curves/mnt6_753/src/constraints/pairing.rs new file mode 100644 index 00000000..8979aec8 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/constraints/pairing.rs @@ -0,0 +1,10 @@ +use crate::Parameters; + +/// Specifies the constraints for computing a pairing in the MNT6-753 bilinear group. +pub type PairingVar = ark_r1cs_std::pairing::mnt6::PairingVar; + +#[test] +fn test() { + use crate::MNT6_753; + ark_curve_constraint_tests::pairing::bilinearity_test::().unwrap() +} diff --git a/arkworks/curves/mnt6_753/src/curves/g1.rs b/arkworks/curves/mnt6_753/src/curves/g1.rs new file mode 100644 index 00000000..0df91b28 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/curves/g1.rs @@ -0,0 +1,54 @@ +use ark_ec::{ + mnt6, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +use crate::{Fq, Fr, FR_ONE}; + +pub type G1Affine = mnt6::G1Affine; +pub type G1Projective = mnt6::G1Projective; +pub type G1Prepared = mnt6::G1Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +impl SWModelParameters for Parameters { + /// COEFF_A = 11 + #[rustfmt::skip] + const COEFF_A: Fq = field_new!(Fq, "11"); + + /// COEFF_B = 0x7DA285E70863C79D56446237CE2E1468D14AE9BB64B2BB01B10E60A5D5DFE0A25714B7985993F62F03B22A9A3C737A1A1E0FCF2C43D7BF847957C34CCA1E3585F9A80A95F401867C4E80F4747FDE5ABA7505BA6FCF2485540B13DFC8468A + #[rustfmt::skip] + const COEFF_B: Fq = field_new!(Fq, "11625908999541321152027340224010374716841167701783584648338908235410859267060079819722747939267925389062611062156601938166010098747920378738927832658133625454260115409075816187555055859490253375704728027944315501122723426879114"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[1]; + + /// COFACTOR^(-1) mod r = + /// 1 + #[rustfmt::skip] + const COFACTOR_INV: Fr = FR_ONE; + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G1_GENERATOR_X, G1_GENERATOR_Y); +} + +// Generator of G1 +// X = 3458420969484235708806261200128850544017070333833944116801482064540723268149235477762870414664917360605949659630933184751526227993647030875167687492714052872195770088225183259051403087906158701786758441889742618916006546636728, +// Y = 27460508402331965149626600224382137254502975979168371111640924721589127725376473514838234361114855175488242007431439074223827742813911899817930728112297763448010814764117701403540298764970469500339646563344680868495474127850569, +/// G1_GENERATOR_X = +/// 3458420969484235708806261200128850544017070333833944116801482064540723268149235477762870414664917360605949659630933184751526227993647030875167687492714052872195770088225183259051403087906158701786758441889742618916006546636728, +#[rustfmt::skip] +pub const G1_GENERATOR_X: Fq = field_new!(Fq, "3458420969484235708806261200128850544017070333833944116801482064540723268149235477762870414664917360605949659630933184751526227993647030875167687492714052872195770088225183259051403087906158701786758441889742618916006546636728"); + +/// G1_GENERATOR_Y = +/// 27460508402331965149626600224382137254502975979168371111640924721589127725376473514838234361114855175488242007431439074223827742813911899817930728112297763448010814764117701403540298764970469500339646563344680868495474127850569, +#[rustfmt::skip] +pub const G1_GENERATOR_Y: Fq = field_new!(Fq, "27460508402331965149626600224382137254502975979168371111640924721589127725376473514838234361114855175488242007431439074223827742813911899817930728112297763448010814764117701403540298764970469500339646563344680868495474127850569"); diff --git a/arkworks/curves/mnt6_753/src/curves/g2.rs b/arkworks/curves/mnt6_753/src/curves/g2.rs new file mode 100644 index 00000000..f875d836 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/curves/g2.rs @@ -0,0 +1,126 @@ +use ark_ec::{ + mnt6, + mnt6::MNT6Parameters, + models::{ModelParameters, SWModelParameters}, +}; +use ark_ff::field_new; + +use crate::{g1, Fq, Fq3, Fr, FQ_ZERO}; + +pub type G2Affine = mnt6::G2Affine; +pub type G2Projective = mnt6::G2Projective; +pub type G2Prepared = mnt6::G2Prepared; + +#[derive(Clone, Default, PartialEq, Eq)] +pub struct Parameters; + +impl ModelParameters for Parameters { + type BaseField = Fq3; + type ScalarField = Fr; +} + +/// MUL_BY_A_C0 = NONRESIDUE * COEFF_A +/// = 11 * 11 +/// = 121 +#[rustfmt::skip] +pub const MUL_BY_A_C0: Fq = field_new!(Fq, "121"); + +/// MUL_BY_A_C1 = NONRESIDUE * COEFF_A +/// = 11 * 11 +/// = 121 +#[rustfmt::skip] +pub const MUL_BY_A_C1: Fq = field_new!(Fq, "121"); + +/// MUL_BY_A_C2 = COEFF_A +pub const MUL_BY_A_C2: Fq = g1::Parameters::COEFF_A; + +impl SWModelParameters for Parameters { + const COEFF_A: Fq3 = crate::Parameters::TWIST_COEFF_A; + // B coefficient of MNT6-753 G2 = + // ``` + // mnt6753_twist_coeff_b = mnt6753_Fq3(mnt6753_G1::coeff_b * mnt6753_Fq3::non_residue, + // mnt6753_Fq::zero(), mnt6753_Fq::zero()); + // non_residue = mnt6753_Fq3::non_residue = mnt6753_Fq("11"); + // = (G1_B_COEFF * NON_RESIDUE, ZERO, ZERO); + // = + // (2189526091197672465268098090392210500740714959757583916377481826443393499947557697773546040576162515434508768057245887856591913752342600919117433675080691499697020523783784738694360040853591723916201150207746019687604267190251, + // 0, 0) + // ``` + #[rustfmt::skip] + const COEFF_B: Fq3 = field_new!( + Fq3, + field_new!(Fq, "2189526091197672465268098090392210500740714959757583916377481826443393499947557697773546040576162515434508768057245887856591913752342600919117433675080691499697020523783784738694360040853591723916201150207746019687604267190251"), + FQ_ZERO, + FQ_ZERO, + ); + + /// COFACTOR = + /// 1755483545388786116744270475466687259186947712032004459714210070280389500116987496124098574823389466285978151140155508638765729019174599527183600372094760023144398285325863550664578643924584541949466179502227232245309952839189635010671372908411609248348904807785904229403747495114436660255866932060472369629692502198423138429922875792635236729929780298333055698257230963645509826963717287902205842627121011526048163097042046361575549171961352924692480000 + #[rustfmt::skip] + const COFACTOR: &'static [u64] = &[ + 17839255819456086016, + 500623104730997740, + 2110252009236161768, + 1500878543414750896, + 12839751506594314239, + 8978537329634833065, + 13830010955957826199, + 7626514311663165506, + 14876243211944528805, + 2316601947950921451, + 2601177562497904269, + 18300670698693155036, + 17321427554953155530, + 12586270719596716948, + 807965545138267130, + 13086323046094411844, + 16597411233431396880, + 5578519820383338987, + 16478065054289650824, + 12110148809888520863, + 5901144846689643164, + 3407195776166256068, + 14663852814447346059, + 13435169368, + ]; + + /// COFACTOR^(-1) mod r = + /// 6983081827986492233724035798540106188028451653325658178630583820170892135428517795509815627298389820236345161981341515817589065927929152555581161598204976128690232061758269440757592419606754539638220064054062394397574161203200 + #[rustfmt::skip] + const COFACTOR_INV: Fr = field_new!(Fr, "6983081827986492233724035798540106188028451653325658178630583820170892135428517795509815627298389820236345161981341515817589065927929152555581161598204976128690232061758269440757592419606754539638220064054062394397574161203200"); + + /// AFFINE_GENERATOR_COEFFS = (G2_GENERATOR_X, G2_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G2_GENERATOR_X, G2_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(elt: &Fq3) -> Fq3 { + field_new!( + Fq3, + MUL_BY_A_C0 * &elt.c1, + MUL_BY_A_C1 * &elt.c2, + MUL_BY_A_C2 * &elt.c0, + ) + } +} + +const G2_GENERATOR_X: Fq3 = + field_new!(Fq3, G2_GENERATOR_X_C0, G2_GENERATOR_X_C1, G2_GENERATOR_X_C2); +const G2_GENERATOR_Y: Fq3 = + field_new!(Fq3, G2_GENERATOR_Y_C0, G2_GENERATOR_Y_C1, G2_GENERATOR_Y_C2); + +// Generator of G2 +// These are three Fq elements each because X and Y (and Z) are elements of Fq^3 +// X = 27250797394340459586637772414334383652934225310678303542554641987990991970766156209996739240400887081904395745019996048910447071686918567661896491214767494514394154061111870331668445455228882471000120574964265209669155206168252, +// 35762481056967998715733586393399457882827322353696313323665483142561285210083843314423554450886956650265947502285422529615273790981238406393402603210224104850580302463396274854098657541573494421834514772635884262388058080180368, +// 36955296703808958167583270646821654948157955258947892285629161090141878438357164213613114995903637211606408001037026832604054121847388692538440756596264746452765613740820430501353237866984394057660379098674983614861254438847846, +// Y = 2540920530670785421282147216459500299597350984927286541981768941513322907384197363939300669100157141915897390694710534916701460991329498878429407641200901974650893207493883271892985923686300670742888673128384350189165542294615, +// 7768974215205248225654340523113146529854477025417883273460270519532499370133542215655437897583245920162220909271982265882784840026754554720358946490360213245668334549692889019612343620295335698052097726325099648573158597797497, +// 21014872727619291834131369222699267167761185012487859171850226473555446863681002782100371394603357586906967186931035615146288030444598977758226767063525819170917389755555854704165900869058188909090444447822088242504281789869689, +pub const G2_GENERATOR_X_C0: Fq = field_new!(Fq, "27250797394340459586637772414334383652934225310678303542554641987990991970766156209996739240400887081904395745019996048910447071686918567661896491214767494514394154061111870331668445455228882471000120574964265209669155206168252"); +pub const G2_GENERATOR_X_C1: Fq = field_new!(Fq, "35762481056967998715733586393399457882827322353696313323665483142561285210083843314423554450886956650265947502285422529615273790981238406393402603210224104850580302463396274854098657541573494421834514772635884262388058080180368"); +pub const G2_GENERATOR_X_C2: Fq = field_new!(Fq, "36955296703808958167583270646821654948157955258947892285629161090141878438357164213613114995903637211606408001037026832604054121847388692538440756596264746452765613740820430501353237866984394057660379098674983614861254438847846"); + +pub const G2_GENERATOR_Y_C0: Fq = field_new!(Fq, "2540920530670785421282147216459500299597350984927286541981768941513322907384197363939300669100157141915897390694710534916701460991329498878429407641200901974650893207493883271892985923686300670742888673128384350189165542294615"); +pub const G2_GENERATOR_Y_C1: Fq = field_new!(Fq, "7768974215205248225654340523113146529854477025417883273460270519532499370133542215655437897583245920162220909271982265882784840026754554720358946490360213245668334549692889019612343620295335698052097726325099648573158597797497"); +pub const G2_GENERATOR_Y_C2: Fq = field_new!(Fq, "21014872727619291834131369222699267167761185012487859171850226473555446863681002782100371394603357586906967186931035615146288030444598977758226767063525819170917389755555854704165900869058188909090444447822088242504281789869689"); diff --git a/arkworks/curves/mnt6_753/src/curves/mod.rs b/arkworks/curves/mnt6_753/src/curves/mod.rs new file mode 100644 index 00000000..e7fe9ba5 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/curves/mod.rs @@ -0,0 +1,76 @@ +use ark_ec::models::{ + mnt6::{MNT6Parameters, MNT6}, + SWModelParameters, +}; +use ark_ff::{biginteger::BigInteger768, field_new, Fp3}; + +use crate::{Fq, Fq3, Fq3Parameters, Fq6Parameters, Fr}; + +pub mod g1; +pub mod g2; + +#[cfg(test)] +mod tests; + +pub use self::{ + g1::{G1Affine, G1Prepared, G1Projective}, + g2::{G2Affine, G2Prepared, G2Projective}, +}; + +pub type MNT6_753 = MNT6; + +pub struct Parameters; + +impl MNT6Parameters for Parameters { + const TWIST: Fp3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); + // A coefficient of MNT6-753 G2 = + // ``` + // mnt6753_twist_coeff_a = mnt6753_Fq3(mnt6753_Fq::zero(), mnt6753_Fq::zero(), + // mnt6753_G1::coeff_a); + // = (ZERO, ZERO, A_COEFF); + // ``` + #[rustfmt::skip] + const TWIST_COEFF_A: Fp3 = field_new!(Fq3, + FQ_ZERO, + FQ_ZERO, + g1::Parameters::COEFF_A, + ); + // https://github.com/o1-labs/snarky/blob/9c21ab2bb23874604640740d646a932e813432c3/snarkette/mnt6753.ml + const ATE_LOOP_COUNT: &'static [u64] = &[ + 8824542903220142080, + 7711082599397206192, + 8303354903384568230, + 5874150271971943936, + 9717849827920685054, + 95829799234282493, + ]; + const ATE_IS_LOOP_COUNT_NEG: bool = false; + const FINAL_EXPONENT_LAST_CHUNK_1: BigInteger768 = + BigInteger768([0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]); + const FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG: bool = false; + const FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0: BigInteger768 = BigInteger768([ + 8824542903220142080, + 7711082599397206192, + 8303354903384568230, + 5874150271971943936, + 9717849827920685054, + 95829799234282493, + 0, + 0, + 0, + 0, + 0, + 0, + ]); + type Fp = Fq; + type Fr = Fr; + type Fp3Params = Fq3Parameters; + type Fp6Params = Fq6Parameters; + type G1Parameters = self::g1::Parameters; + type G2Parameters = self::g2::Parameters; +} + +pub const FQ_ZERO: Fq = field_new!(Fq, "0"); +pub const FQ_ONE: Fq = field_new!(Fq, "1"); +pub const FR_ZERO: Fr = field_new!(Fr, "0"); +pub const FR_ONE: Fr = field_new!(Fr, "1"); diff --git a/arkworks/curves/mnt6_753/src/curves/tests.rs b/arkworks/curves/mnt6_753/src/curves/tests.rs new file mode 100644 index 00000000..c3ab1bdd --- /dev/null +++ b/arkworks/curves/mnt6_753/src/curves/tests.rs @@ -0,0 +1,91 @@ +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, One, PrimeField, UniformRand}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::{curves::*, groups::*}; + +#[test] +fn test_g1_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g1_projective_group() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G1Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g1_generator() { + let generator = G1Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_g2_projective_curve() { + curve_tests::(); + + sw_tests::(); +} + +#[test] +fn test_g2_projective_group() { + let mut rng = test_rng(); + let a: G2Projective = rng.gen(); + let b: G2Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_g2_generator() { + let generator = G2Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} + +#[test] +fn test_bilinearity() { + let mut rng = test_rng(); + let a: G1Projective = rng.gen(); + let b: G2Projective = rng.gen(); + let s: Fr = rng.gen(); + + let sa = a.mul(s.into_repr()); + let sb = b.mul(s.into_repr()); + + let ans1 = MNT6_753::pairing(sa, b); + let ans2 = MNT6_753::pairing(a, sb); + let ans3 = MNT6_753::pairing(a, b).pow(s.into_repr()); + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, Fq6::one()); + assert_ne!(ans2, Fq6::one()); + assert_ne!(ans3, Fq6::one()); + + assert_eq!(ans1.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans2.pow(Fr::characteristic()), Fq6::one()); + assert_eq!(ans3.pow(Fr::characteristic()), Fq6::one()); +} + +#[test] +fn test_product_of_pairings() { + let rng = &mut test_rng(); + + let a = G1Projective::rand(rng).into_affine(); + let b = G2Projective::rand(rng).into_affine(); + let c = G1Projective::rand(rng).into_affine(); + let d = G2Projective::rand(rng).into_affine(); + let ans1 = MNT6_753::pairing(a, b) * &MNT6_753::pairing(c, d); + let ans2 = MNT6_753::product_of_pairings(&[(a.into(), b.into()), (c.into(), d.into())]); + assert_eq!(ans1, ans2); +} diff --git a/arkworks/curves/mnt6_753/src/fields/fq.rs b/arkworks/curves/mnt6_753/src/fields/fq.rs new file mode 100644 index 00000000..33f3df1e --- /dev/null +++ b/arkworks/curves/mnt6_753/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_mnt4_753::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/mnt6_753/src/fields/fq3.rs b/arkworks/curves/mnt6_753/src/fields/fq3.rs new file mode 100644 index 00000000..7235b3b1 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/fields/fq3.rs @@ -0,0 +1,84 @@ +use crate::{fq::Fq, FQ_ONE, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp3::{Fp3, Fp3Parameters}, +}; + +pub type Fq3 = Fp3; + +pub struct Fq3Parameters; + +impl Fp3Parameters for Fq3Parameters { + type Fp = Fq; + + #[rustfmt::skip] + const NONRESIDUE: Fq = field_new!(Fq, "11"); + + const TWO_ADICITY: u32 = 30; + + #[rustfmt::skip] + const T_MINUS_ONE_DIV_TWO: &'static [u64] = &[ + 15439605736802142541, + 18190868848461853149, + 6220121510046940818, + 10310485528612680366, + 5032137869959796540, + 3943048799800510054, + 1971151279016362045, + 6096644900171872841, + 12908407994230849218, + 4163225373804228290, + 10382959950522770522, + 9008828410264446883, + 18411821899404157689, + 12386199240837247984, + 13370099281150720481, + 11909278545073807560, + 5964354403900302648, + 15347506722065009035, + 7045354120681109597, + 14294096902719509929, + 6180325033003959541, + 14381489272445870003, + 18159920240207503954, + 17487026929061632528, + 12314108197538755669, + 12116872703077811769, + 3401400733784294722, + 13905351619889935522, + 10972472942574358218, + 6104159581753028261, + 4690139121547787552, + 4880965491878697414, + 1926648890365125214, + 13532564555356297305, + 3114545746551080, + ]; + + /// (11^T, 0, 0) + #[rustfmt::skip] + const QUADRATIC_NONRESIDUE_TO_T: (Fq, Fq, Fq) = ( + field_new!(Fq, "22168644070733283197994897338612733221095941481265408161807376791727499343083607817089033595478370212662133368413166734396127674284827734481031659015434501966360165723728649019457855887066657739809176476252080335185730833468062"), + FQ_ZERO, + FQ_ZERO, + ); + + // Coefficients for the Frobenius automorphism. + // c1[0] = 1, + // c1[1] = 24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052132 + // c1[2] = 17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107868, + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C1: &'static [Fq] = &[ + FQ_ONE, + field_new!(Fq, "24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052132"), + field_new!(Fq, "17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107868"), + ]; + + // c2 = {c1[0], c1[2], c1[1]} + #[rustfmt::skip] + const FROBENIUS_COEFF_FP3_C2: &'static [Fq] = &[ + FQ_ONE, + Self::FROBENIUS_COEFF_FP3_C1[2], + Self::FROBENIUS_COEFF_FP3_C1[1], + ]; +} diff --git a/arkworks/curves/mnt6_753/src/fields/fq6.rs b/arkworks/curves/mnt6_753/src/fields/fq6.rs new file mode 100644 index 00000000..d5c872b7 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/fields/fq6.rs @@ -0,0 +1,33 @@ +use crate::{Fq, Fq3, Fq3Parameters, FQ_ONE, FQ_ZERO}; +use ark_ff::{ + field_new, + fields::fp6_2over3::{Fp6, Fp6Parameters}, +}; + +pub type Fq6 = Fp6; + +pub struct Fq6Parameters; + +impl Fp6Parameters for Fq6Parameters { + type Fp3Params = Fq3Parameters; + + #[rustfmt::skip] + const NONRESIDUE: Fq3 = field_new!(Fq3, FQ_ZERO, FQ_ONE, FQ_ZERO); + + // Coefficients for the Frobenius automorphism. + // c1[0] = 1, + // c1[1] = 24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052133 + // c1[2] = 24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052132 + // c1[3] = 41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888458477323173057491593855069696241854796396165721416325350064441470418137846398469611935719059908164220784476160000 + // c1[4] = 17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107868 + // c1[5] = 17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107869 + #[rustfmt::skip] + const FROBENIUS_COEFF_FP6_C1: &'static [Fq] = &[ + FQ_ONE, + field_new!(Fq, "24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052133"), + field_new!(Fq, "24129022407817241407134263419936114379815707076943508280977368156625538709102831814843582780138963119807143081677569721953561801075623741378629346409604471234573396989178424163772589090105392407118197799904755622897541183052132"), + field_new!(Fq, "41898490967918953402344214791240637128170709919953949071783502921025352812571106773058893763790338921418070971888458477323173057491593855069696241854796396165721416325350064441470418137846398469611935719059908164220784476160000"), + field_new!(Fq, "17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107868"), + field_new!(Fq, "17769468560101711995209951371304522748355002843010440790806134764399814103468274958215310983651375801610927890210888755369611256415970113691066895445191924931148019336171640277697829047741006062493737919155152541323243293107869"), + ]; +} diff --git a/arkworks/curves/mnt6_753/src/fields/fr.rs b/arkworks/curves/mnt6_753/src/fields/fr.rs new file mode 100644 index 00000000..5f91ca20 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/fields/fr.rs @@ -0,0 +1 @@ +pub use ark_mnt4_753::{Fq as Fr, FqParameters as FrParameters}; diff --git a/arkworks/curves/mnt6_753/src/fields/mod.rs b/arkworks/curves/mnt6_753/src/fields/mod.rs new file mode 100644 index 00000000..f394204e --- /dev/null +++ b/arkworks/curves/mnt6_753/src/fields/mod.rs @@ -0,0 +1,14 @@ +pub mod fr; +pub use self::fr::*; + +pub mod fq; +pub use self::fq::*; + +pub mod fq3; +pub use self::fq3::*; + +pub mod fq6; +pub use self::fq6::*; + +#[cfg(all(feature = "mnt6_753", test))] +mod tests; diff --git a/arkworks/curves/mnt6_753/src/fields/tests.rs b/arkworks/curves/mnt6_753/src/fields/tests.rs new file mode 100644 index 00000000..54d071b4 --- /dev/null +++ b/arkworks/curves/mnt6_753/src/fields/tests.rs @@ -0,0 +1,53 @@ +use ark_ff::{ + fields::{models::fp6_2over3::*, quadratic_extension::QuadExtParameters}, + Field, +}; +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq3() { + let mut rng = test_rng(); + let a: Fq3 = rng.gen(); + let b: Fq3 = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + frobenius_test::(Fq::characteristic(), 13); + assert_eq!( + a * Fq6Parameters::NONRESIDUE, + >::mul_base_field_by_nonresidue(&a) + ); +} + +#[test] +fn test_fq6() { + let mut rng = test_rng(); + let a: Fq6 = rng.gen(); + let b: Fq6 = rng.gen(); + field_test(a, b); + frobenius_test::(Fq::characteristic(), 13); +} diff --git a/arkworks/curves/mnt6_753/src/lib.rs b/arkworks/curves/mnt6_753/src/lib.rs new file mode 100644 index 00000000..0555999e --- /dev/null +++ b/arkworks/curves/mnt6_753/src/lib.rs @@ -0,0 +1,36 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the MNT6_753 curve generated in +//! [\[BCTV14\]](https://eprint.iacr.org/2014/595). The name denotes that it is a +//! Miyaji--Nakabayashi--Takano curve of embedding degree 6, defined over a 753-bit (prime) field. +//! The main feature of this curve is that its scalar field and base field respectively equal the +//! base field and scalar field of MNT4_753. +//! +//! Curve information: +//! * Base field: q = 0x01C4C62D92C41110229022EEE2CDADB7F997505B8FAFED5EB7E8F96C97D87307FDB925E8A0ED8D99D124D9A15AF79DB26C5C28C859A99B3EEBCA9429212636B9DFF97634993AA4D6C381BC3F0057974EA099170FA13A4FD90776E240000001 +//! * Scalar field: r = 0x01C4C62D92C41110229022EEE2CDADB7F997505B8FAFED5EB7E8F96C97D87307FDB925E8A0ED8D99D124D9A15AF79DB117E776F218059DB80F0DA5CB537E38685ACCE9767254A4638810719AC425F0E39D54522CDD119F5E9063DE245E8001 +//! * valuation(q - 1, 2) = 30 +//! * valuation(r - 1, 2) = 15 +//! * G1 curve equation: y^2 = x^3 + ax + b, where +//! * a = 11 +//! * b = 0x7DA285E70863C79D56446237CE2E1468D14AE9BB64B2BB01B10E60A5D5DFE0A25714B7985993F62F03B22A9A3C737A1A1E0FCF2C43D7BF847957C34CCA1E3585F9A80A95F401867C4E80F4747FDE5ABA7505BA6FCF2485540B13DFC8468A +//! * G2 curve equation: y^2 = x^3 + Ax + B, where +//! * A = Fq3(0, 0, a) +//! * B = Fq3(b * NON_RESIDUE, 0, 0) +//! * NON_RESIDUE = 11 is the cubic non-residue used to construct the extension field Fq3 + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/pallas/Cargo.toml b/arkworks/curves/pallas/Cargo.toml new file mode 100644 index 00000000..d581ef40 --- /dev/null +++ b/arkworks/curves/pallas/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "ark-pallas" +version = "0.3.0" +authors = [ "Ying Tong Lai", "Daira Hopwood", "O(1) Labs", "arkworks contributors" ] +description = "The Pallas prime-order elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/curves" +documentation = "https://docs.rs/ark-pallas/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-std = { version = "^0.3.0", default-features = false } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [ "curve" ] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] + +curve = [ "scalar_field", "base_field" ] +scalar_field = [] +base_field = [] +r1cs = [ "base_field", "ark-r1cs-std" ] diff --git a/arkworks/curves/pallas/src/constraints/curves.rs b/arkworks/curves/pallas/src/constraints/curves.rs new file mode 100644 index 00000000..4f839b16 --- /dev/null +++ b/arkworks/curves/pallas/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::short_weierstrass::ProjectiveVar; + +use crate::constraints::FBaseVar; + +/// A group element in the Pallas prime-order group. +pub type GVar = ProjectiveVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::sw_test::().unwrap(); +} diff --git a/arkworks/curves/pallas/src/constraints/fields.rs b/arkworks/curves/pallas/src/constraints/fields.rs new file mode 100644 index 00000000..e2f08b05 --- /dev/null +++ b/arkworks/curves/pallas/src/constraints/fields.rs @@ -0,0 +1,10 @@ +use crate::fq::Fq; +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FBaseVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FBaseVar>().unwrap(); +} diff --git a/arkworks/curves/pallas/src/constraints/mod.rs b/arkworks/curves/pallas/src/constraints/mod.rs new file mode 100644 index 00000000..e363da88 --- /dev/null +++ b/arkworks/curves/pallas/src/constraints/mod.rs @@ -0,0 +1,107 @@ +//! This module implements the R1CS equivalent of `ark_pallas`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FBaseVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_pallas::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FBaseVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FBaseVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FBaseVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FBaseVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FBaseVar::one(); +//! let zero = FBaseVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `GVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_pallas::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Projective` elements. +//! let a_native = Projective::rand(&mut rng); +//! let b_native = Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = GVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = GVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = GVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = GVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity. +//! let zero = GVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/pallas/src/curves/mod.rs b/arkworks/curves/pallas/src/curves/mod.rs new file mode 100644 index 00000000..f95edc5e --- /dev/null +++ b/arkworks/curves/pallas/src/curves/mod.rs @@ -0,0 +1,49 @@ +use crate::{fq::Fq, fr::Fr}; +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, +}; +use ark_ff::{field_new, Zero}; + +#[cfg(test)] +mod tests; + +#[derive(Copy, Clone, Default, PartialEq, Eq)] +pub struct PallasParameters; + +impl ModelParameters for PallasParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +pub type Affine = GroupAffine; +pub type Projective = GroupProjective; + +impl SWModelParameters for PallasParameters { + /// COEFF_A = 0 + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = 5 + const COEFF_B: Fq = field_new!(Fq, "5"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = 1 + const COFACTOR_INV: Fr = field_new!(Fr, "1"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G_GENERATOR_X, G_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +/// G_GENERATOR_X = -1 +pub const G_GENERATOR_X: Fq = field_new!(Fq, "-1"); + +/// G_GENERATOR_Y = 2 +pub const G_GENERATOR_Y: Fq = field_new!(Fq, "2"); diff --git a/arkworks/curves/pallas/src/curves/tests.rs b/arkworks/curves/pallas/src/curves/tests.rs new file mode 100644 index 00000000..ec59e770 --- /dev/null +++ b/arkworks/curves/pallas/src/curves/tests.rs @@ -0,0 +1,39 @@ +#![allow(unused_imports)] +use ark_ff::{ + fields::{Field, FpParameters, PrimeField, SquareRootField}, + One, Zero, +}; +use ark_serialize::CanonicalSerialize; +use ark_std::test_rng; + +use ark_ec::{models::SWModelParameters, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_std::ops::{AddAssign, MulAssign}; +use ark_std::rand::Rng; + +use crate::{Affine, PallasParameters, Projective}; + +use ark_algebra_test_templates::{ + curves::{curve_tests, sw_tests}, + groups::group_test, +}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + sw_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a: Projective = rng.gen(); + let b: Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_generator() { + let generator = Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} diff --git a/arkworks/curves/pallas/src/fields/fq.rs b/arkworks/curves/pallas/src/fields/fq.rs new file mode 100644 index 00000000..a94f8ab1 --- /dev/null +++ b/arkworks/curves/pallas/src/fields/fq.rs @@ -0,0 +1,90 @@ +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters}, +}; + +pub type Fq = Fp256; + +pub struct FqParameters; + +impl Fp256Parameters for FqParameters {} +impl FftParameters for FqParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 32; + + // TWO_ADIC_ROOT_OF_UNITY = GENERATOR^T + // Encoded in Montgomery form, so the value here is (5^T)R mod p. + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0xa28db849bad6dbf0, + 0x9083cd03d3b539df, + 0xfba6b9ca9dc8448e, + 0x3ec928747b89c6da, + ]); +} + +impl ark_ff::fields::FpParameters for FqParameters { + // 28948022309329048855892746252171976963363056481941560715954676764349967630337 + const MODULUS: BigInteger = BigInteger([ + 0x992d30ed00000001, + 0x224698fc094cf91b, + 0x0000000000000000, + 0x4000000000000000, + ]); + + // R = 2^256 mod p + const R: BigInteger = BigInteger([ + 0x34786d38fffffffd, + 0x992c350be41914ad, + 0xffffffffffffffff, + 0x3fffffffffffffff, + ]); + + // R2 = (2^256)^2 mod p + const R2: BigInteger = BigInteger([ + 0x8c78ecb30000000f, + 0xd7d30dbd8b0de0e7, + 0x7797a99bc3c95d18, + 0x096d41af7b9cb714, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xcc96987680000000, + 0x11234c7e04a67c8d, + 0x0000000000000000, + 0x2000000000000000, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + const T: BigInteger = BigInteger([ + 0x094cf91b992d30ed, + 0x00000000224698fc, + 0x0000000000000000, + 0x0000000040000000, + ]); + + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x04a67c8dcc969876, + 0x0000000011234c7e, + 0x0000000000000000, + 0x0000000020000000, + ]); + + // GENERATOR = 5 + // Encoded in Montgomery form, so the value here is 5R mod p. + const GENERATOR: BigInteger = BigInteger([ + 0xa1a55e68ffffffed, + 0x74c2a54b4f4982f3, + 0xfffffffffffffffd, + 0x3fffffffffffffff, + ]); + + const MODULUS_BITS: u32 = 255; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 1; + + // INV = -p^{-1} (mod 2^64) + const INV: u64 = 11037532056220336127; +} diff --git a/arkworks/curves/pallas/src/fields/fr.rs b/arkworks/curves/pallas/src/fields/fr.rs new file mode 100644 index 00000000..f13a78be --- /dev/null +++ b/arkworks/curves/pallas/src/fields/fr.rs @@ -0,0 +1,91 @@ +use ark_ff::{ + biginteger::BigInteger256 as BigInteger, + fields::{FftParameters, Fp256, Fp256Parameters, FpParameters}, +}; + +pub struct FrParameters; + +pub type Fr = Fp256; + +impl Fp256Parameters for FrParameters {} +impl FftParameters for FrParameters { + type BigInt = BigInteger; + + const TWO_ADICITY: u32 = 32; + + // TWO_ADIC_ROOT_OF_UNITY = GENERATOR^T + // Encoded in Montgomery form, so the value here is (5^T)R mod q. + const TWO_ADIC_ROOT_OF_UNITY: BigInteger = BigInteger([ + 0x218077428c9942de, + 0xcc49578921b60494, + 0xac2e5d27b2efbee2, + 0x0b79fa897f2db056, + ]); +} + +impl FpParameters for FrParameters { + // 28948022309329048855892746252171976963363056481941647379679742748393362948097 + const MODULUS: BigInteger = BigInteger([ + 0x8c46eb2100000001, + 0x224698fc0994a8dd, + 0x0000000000000000, + 0x4000000000000000, + ]); + + // R = 2^256 mod q + const R: BigInteger = BigInteger([ + 0x5b2b3e9cfffffffd, + 0x992c350be3420567, + 0xffffffffffffffff, + 0x3fffffffffffffff, + ]); + + // R2 = (2^256)^2 mod q + const R2: BigInteger = BigInteger([ + 0xfc9678ff0000000f, + 0x67bb433d891a16e3, + 0x7fae231004ccf590, + 0x096d41af7ccfdaa9, + ]); + + const MODULUS_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0xc623759080000000, + 0x11234c7e04ca546e, + 0x0000000000000000, + 0x2000000000000000, + ]); + + // T and T_MINUS_ONE_DIV_TWO, where MODULUS - 1 = 2^S * T + + const T: BigInteger = BigInteger([ + 0x0994a8dd8c46eb21, + 0x00000000224698fc, + 0x0000000000000000, + 0x0000000040000000, + ]); + + const T_MINUS_ONE_DIV_TWO: BigInteger = BigInteger([ + 0x04ca546ec6237590, + 0x0000000011234c7e, + 0x0000000000000000, + 0x0000000020000000, + ]); + + // GENERATOR = 5 + // Encoded in Montgomery form, so the value here is 5R mod q. + const GENERATOR: BigInteger = BigInteger([ + 0x96bc8c8cffffffed, + 0x74c2a54b49f7778e, + 0xfffffffffffffffd, + 0x3fffffffffffffff, + ]); + + const MODULUS_BITS: u32 = 255; + + const CAPACITY: u32 = Self::MODULUS_BITS - 1; + + const REPR_SHAVE_BITS: u32 = 1; + + // INV = -q^{-1} (mod 2^64) + const INV: u64 = 10108024940646105087; +} diff --git a/arkworks/curves/pallas/src/fields/mod.rs b/arkworks/curves/pallas/src/fields/mod.rs new file mode 100644 index 00000000..00906f27 --- /dev/null +++ b/arkworks/curves/pallas/src/fields/mod.rs @@ -0,0 +1,12 @@ +#[cfg(feature = "base_field")] +pub mod fq; +#[cfg(feature = "base_field")] +pub use self::fq::*; + +#[cfg(feature = "scalar_field")] +pub mod fr; +#[cfg(feature = "scalar_field")] +pub use self::fr::*; + +#[cfg(all(feature = "curve", test))] +mod tests; diff --git a/arkworks/curves/pallas/src/fields/tests.rs b/arkworks/curves/pallas/src/fields/tests.rs new file mode 100644 index 00000000..26807e23 --- /dev/null +++ b/arkworks/curves/pallas/src/fields/tests.rs @@ -0,0 +1,26 @@ +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} diff --git a/arkworks/curves/pallas/src/lib.rs b/arkworks/curves/pallas/src/lib.rs new file mode 100644 index 00000000..ccbfc34d --- /dev/null +++ b/arkworks/curves/pallas/src/lib.rs @@ -0,0 +1,36 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the prime-order curve Pallas, generated by +//! [Daira Hopwood](https://github.com/zcash/pasta). The main feature of this +//! curve is that it forms a cycle with Vesta, i.e. its scalar field and base +//! field respectively are the base field and scalar field of Vesta. +//! +//! +//! Curve information: +//! * Base field: q = +//! 28948022309329048855892746252171976963363056481941560715954676764349967630337 +//! * Scalar field: r = +//! 28948022309329048855892746252171976963363056481941647379679742748393362948097 +//! * Curve equation: y^2 = x^3 + 5 +//! * Valuation(q - 1, 2) = 32 +//! * Valuation(r - 1, 2) = 32 + +#[cfg(feature = "r1cs")] +pub mod constraints; +#[cfg(feature = "curve")] +mod curves; +#[cfg(any(feature = "scalar_field", feature = "base_field"))] +mod fields; + +#[cfg(feature = "curve")] +pub use curves::*; +#[cfg(any(feature = "scalar_field", feature = "base_field"))] +pub use fields::*; diff --git a/arkworks/curves/rustfmt.toml b/arkworks/curves/rustfmt.toml new file mode 100644 index 00000000..71712138 --- /dev/null +++ b/arkworks/curves/rustfmt.toml @@ -0,0 +1,9 @@ +reorder_imports = true +wrap_comments = true +normalize_comments = true +use_try_shorthand = true +match_block_trailing_comma = true +use_field_init_shorthand = true +edition = "2018" +condense_wildcard_suffixes = true +merge_imports = true diff --git a/arkworks/curves/scripts/install-hook.sh b/arkworks/curves/scripts/install-hook.sh new file mode 100755 index 00000000..eafcf818 --- /dev/null +++ b/arkworks/curves/scripts/install-hook.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +# This script will install the provided directory ../.hooks as the hook +# directory for the present repo. See there for hooks, including a pre-commit +# hook that runs rustfmt on files before a commit. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HOOKS_DIR="${DIR}/../.hooks" + +git config core.hooksPath "$HOOKS_DIR" diff --git a/arkworks/curves/scripts/linkify_changelog.py b/arkworks/curves/scripts/linkify_changelog.py new file mode 100644 index 00000000..867ae14d --- /dev/null +++ b/arkworks/curves/scripts/linkify_changelog.py @@ -0,0 +1,31 @@ +import re +import sys +import fileinput +import os + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +if len(sys.argv) < 2: + print("Must include path to changelog as the first argument to the script") + print("Example Usage: python3 linkify_changelog.py ../CHANGELOG.md") + exit() + +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub(r"\- #([0-9]*)", r"- [\\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", line.rstrip()) + # edits the current file + print(line) \ No newline at end of file diff --git a/arkworks/curves/vesta/Cargo.toml b/arkworks/curves/vesta/Cargo.toml new file mode 100644 index 00000000..a255948a --- /dev/null +++ b/arkworks/curves/vesta/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "ark-vesta" +version = "0.3.0" +authors = [ "Ying Tong Lai", "Daira Hopwood", "O(1) Labs", "arkworks contributors" ] +description = "The Vesta prime-order elliptic curve" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/curves" +documentation = "https://docs.rs/ark-vesta/" +keywords = ["cryptography", "finite-fields", "elliptic-curves" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { version = "^0.3.0", default-features = false } +ark-ec = { version = "^0.3.0", default-features = false } +ark-r1cs-std = { version = "^0.3.0", default-features = false, optional = true } +ark-std = { version = "^0.3.0", default-features = false } +ark-pallas = { version = "^0.3.0", path = "../pallas", default-features = false, features = [ "scalar_field", "base_field" ] } + +[dev-dependencies] +ark-relations = { version = "^0.3.0", default-features = false } +ark-serialize = { version = "^0.3.0", default-features = false } +ark-algebra-test-templates = { version = "^0.3.0", default-features = false } +ark-curve-constraint-tests = { path = "../curve-constraint-tests", default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std" ] +r1cs = [ "ark-r1cs-std" ] diff --git a/arkworks/curves/vesta/src/constraints/curves.rs b/arkworks/curves/vesta/src/constraints/curves.rs new file mode 100644 index 00000000..fac522a5 --- /dev/null +++ b/arkworks/curves/vesta/src/constraints/curves.rs @@ -0,0 +1,12 @@ +use crate::*; +use ark_r1cs_std::groups::curves::short_weierstrass::ProjectiveVar; + +use crate::constraints::FBaseVar; + +/// A group element in the Vesta prime-order group. +pub type GVar = ProjectiveVar; + +#[test] +fn test() { + ark_curve_constraint_tests::curves::sw_test::().unwrap(); +} diff --git a/arkworks/curves/vesta/src/constraints/fields.rs b/arkworks/curves/vesta/src/constraints/fields.rs new file mode 100644 index 00000000..e2f08b05 --- /dev/null +++ b/arkworks/curves/vesta/src/constraints/fields.rs @@ -0,0 +1,10 @@ +use crate::fq::Fq; +use ark_r1cs_std::fields::fp::FpVar; + +/// A variable that is the R1CS equivalent of `crate::Fq`. +pub type FBaseVar = FpVar; + +#[test] +fn test() { + ark_curve_constraint_tests::fields::field_test::<_, _, FBaseVar>().unwrap(); +} diff --git a/arkworks/curves/vesta/src/constraints/mod.rs b/arkworks/curves/vesta/src/constraints/mod.rs new file mode 100644 index 00000000..a2ac129d --- /dev/null +++ b/arkworks/curves/vesta/src/constraints/mod.rs @@ -0,0 +1,107 @@ +//! This module implements the R1CS equivalent of `ark_vesta`. +//! +//! It implements field variables for `crate::Fq`, +//! and group variables for `crate::GroupProjective`. +//! +//! The field underlying these constraints is `crate::Fq`. +//! +//! # Examples +//! +//! One can perform standard algebraic operations on `FBaseVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! use ark_std::UniformRand; +//! use ark_relations::r1cs::*; +//! use ark_r1cs_std::prelude::*; +//! use ark_vesta::{*, constraints::*}; +//! +//! let cs = ConstraintSystem::::new_ref(); +//! // This rng is just for test purposes; do not use it +//! // in real applications. +//! let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Fq` elements. +//! let a_native = Fq::rand(&mut rng); +//! let b_native = Fq::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = FBaseVar::new_witness(ark_relations::ns!(cs, "generate_a"), || Ok(a_native))?; +//! let b = FBaseVar::new_witness(ark_relations::ns!(cs, "generate_b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = FBaseVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = FBaseVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! let one = FBaseVar::one(); +//! let zero = FBaseVar::zero(); +//! +//! // Sanity check one + one = two +//! let two = &one + &one + &zero; +//! two.enforce_equal(&one.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that the value of &a * &b is correct. +//! assert_eq!((&a * &b).value()?, a_native * &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` +//! +//! One can also perform standard algebraic operations on `GVar`: +//! +//! ``` +//! # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { +//! # use ark_std::UniformRand; +//! # use ark_relations::r1cs::*; +//! # use ark_r1cs_std::prelude::*; +//! # use ark_vesta::{*, constraints::*}; +//! +//! # let cs = ConstraintSystem::::new_ref(); +//! # let mut rng = ark_std::test_rng(); +//! +//! // Generate some random `Projective` elements. +//! let a_native = Projective::rand(&mut rng); +//! let b_native = Projective::rand(&mut rng); +//! +//! // Allocate `a_native` and `b_native` as witness variables in `cs`. +//! let a = GVar::new_witness(ark_relations::ns!(cs, "a"), || Ok(a_native))?; +//! let b = GVar::new_witness(ark_relations::ns!(cs, "b"), || Ok(b_native))?; +//! +//! // Allocate `a_native` and `b_native` as constants in `cs`. This does not add any +//! // constraints or variables. +//! let a_const = GVar::new_constant(ark_relations::ns!(cs, "a_as_constant"), a_native)?; +//! let b_const = GVar::new_constant(ark_relations::ns!(cs, "b_as_constant"), b_native)?; +//! +//! // This returns the identity. +//! let zero = GVar::zero(); +//! +//! // Sanity check one + one = two +//! let two_a = &a + &a + &zero; +//! two_a.enforce_equal(&a.double()?)?; +//! +//! assert!(cs.is_satisfied()?); +//! +//! // Check that the value of &a + &b is correct. +//! assert_eq!((&a + &b).value()?, a_native + &b_native); +//! +//! // Check that operations on variables and constants are equivalent. +//! (&a + &b).enforce_equal(&(&a_const + &b_const))?; +//! assert!(cs.is_satisfied()?); +//! # Ok(()) +//! # } +//! ``` + +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/curves/vesta/src/curves/mod.rs b/arkworks/curves/vesta/src/curves/mod.rs new file mode 100644 index 00000000..865e4594 --- /dev/null +++ b/arkworks/curves/vesta/src/curves/mod.rs @@ -0,0 +1,51 @@ +use crate::{fq::Fq, fr::Fr}; +use ark_ec::{ + models::{ModelParameters, SWModelParameters}, + short_weierstrass_jacobian::{GroupAffine, GroupProjective}, +}; +use ark_ff::{field_new, Zero}; + +#[cfg(test)] +mod tests; + +#[derive(Copy, Clone, Default, PartialEq, Eq)] +pub struct VestaParameters; + +impl ModelParameters for VestaParameters { + type BaseField = Fq; + type ScalarField = Fr; +} + +pub type Affine = GroupAffine; +pub type Projective = GroupProjective; + +impl SWModelParameters for VestaParameters { + /// COEFF_A = 0 + const COEFF_A: Fq = field_new!(Fq, "0"); + + /// COEFF_B = 5 + const COEFF_B: Fq = field_new!(Fq, "5"); + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = 1 + const COFACTOR_INV: Fr = field_new!(Fr, "1"); + + /// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y) + const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) = + (G_GENERATOR_X, G_GENERATOR_Y); + + #[inline(always)] + fn mul_by_a(_: &Self::BaseField) -> Self::BaseField { + Self::BaseField::zero() + } +} + +/// G_GENERATOR_X = -1 +/// Encoded in Montgomery form, so the value here is -R mod p. +pub const G_GENERATOR_X: Fq = field_new!(Fq, "-1"); + +/// G_GENERATOR_Y = 2 +/// Encoded in Montgomery form, so the value here is 2R mod p. +pub const G_GENERATOR_Y: Fq = field_new!(Fq, "2"); diff --git a/arkworks/curves/vesta/src/curves/tests.rs b/arkworks/curves/vesta/src/curves/tests.rs new file mode 100644 index 00000000..7522a799 --- /dev/null +++ b/arkworks/curves/vesta/src/curves/tests.rs @@ -0,0 +1,39 @@ +#![allow(unused_imports)] +use ark_ff::{ + fields::{Field, FpParameters, PrimeField, SquareRootField}, + One, Zero, +}; +use ark_serialize::CanonicalSerialize; +use ark_std::test_rng; + +use ark_ec::{models::SWModelParameters, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_std::ops::{AddAssign, MulAssign}; +use ark_std::rand::Rng; + +use crate::{Affine, Projective, VestaParameters}; + +use ark_algebra_test_templates::{ + curves::{curve_tests, sw_tests}, + groups::group_test, +}; + +#[test] +fn test_projective_curve() { + curve_tests::(); + sw_tests::(); +} + +#[test] +fn test_projective_group() { + let mut rng = test_rng(); + let a: Projective = rng.gen(); + let b: Projective = rng.gen(); + group_test(a, b); +} + +#[test] +fn test_generator() { + let generator = Affine::prime_subgroup_generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); +} diff --git a/arkworks/curves/vesta/src/fields/fq.rs b/arkworks/curves/vesta/src/fields/fq.rs new file mode 100644 index 00000000..1346b1a6 --- /dev/null +++ b/arkworks/curves/vesta/src/fields/fq.rs @@ -0,0 +1 @@ +pub use ark_pallas::{Fr as Fq, FrParameters as FqParameters}; diff --git a/arkworks/curves/vesta/src/fields/fr.rs b/arkworks/curves/vesta/src/fields/fr.rs new file mode 100644 index 00000000..b8207ec6 --- /dev/null +++ b/arkworks/curves/vesta/src/fields/fr.rs @@ -0,0 +1 @@ +pub use ark_pallas::{Fq as Fr, FqParameters as FrParameters}; diff --git a/arkworks/curves/vesta/src/fields/mod.rs b/arkworks/curves/vesta/src/fields/mod.rs new file mode 100644 index 00000000..5156179a --- /dev/null +++ b/arkworks/curves/vesta/src/fields/mod.rs @@ -0,0 +1,8 @@ +pub mod fq; +pub use self::fq::*; + +pub mod fr; +pub use self::fr::*; + +#[cfg(test)] +mod tests; diff --git a/arkworks/curves/vesta/src/fields/tests.rs b/arkworks/curves/vesta/src/fields/tests.rs new file mode 100644 index 00000000..26807e23 --- /dev/null +++ b/arkworks/curves/vesta/src/fields/tests.rs @@ -0,0 +1,26 @@ +use ark_std::rand::Rng; +use ark_std::test_rng; + +use crate::*; + +use ark_algebra_test_templates::fields::*; + +#[test] +fn test_fr() { + let mut rng = test_rng(); + let a: Fr = rng.gen(); + let b: Fr = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} + +#[test] +fn test_fq() { + let mut rng = test_rng(); + let a: Fq = rng.gen(); + let b: Fq = rng.gen(); + field_test(a, b); + sqrt_field_test(a); + primefield_test::(); +} diff --git a/arkworks/curves/vesta/src/lib.rs b/arkworks/curves/vesta/src/lib.rs new file mode 100644 index 00000000..6b1d64bb --- /dev/null +++ b/arkworks/curves/vesta/src/lib.rs @@ -0,0 +1,33 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![forbid(unsafe_code)] + +//! This library implements the prime-order curve Vesta, generated by +//! [Daira Hopwood](https://github.com/zcash/pasta). The main feature of this +//! curve is that it forms a cycle with Pallas, i.e. its scalar field and base +//! field respectively are the base field and scalar field of Pallas. +//! +//! +//! Curve information: +//! Vesta: +//! * Base field: q = +//! 28948022309329048855892746252171976963363056481941647379679742748393362948097 +//! * Scalar field: r = +//! 28948022309329048855892746252171976963363056481941560715954676764349967630337 +//! * Curve equation: y^2 = x^3 + 5 +//! * Valuation(q - 1, 2) = 32 +//! * Valuation(r - 1, 2) = 32 + +#[cfg(feature = "r1cs")] +pub mod constraints; +mod curves; +mod fields; + +pub use curves::*; +pub use fields::*; diff --git a/arkworks/groth16/.github/ISSUE_TEMPLATE/bug_report.md b/arkworks/groth16/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e01ca941 --- /dev/null +++ b/arkworks/groth16/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! + +--- + +∂ + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + + + diff --git a/arkworks/groth16/.github/ISSUE_TEMPLATE/feature_request.md b/arkworks/groth16/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..7d5ed5df --- /dev/null +++ b/arkworks/groth16/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Feature Request +about: Create a proposal to request a feature + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/arkworks/groth16/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/groth16/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/groth16/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/groth16/.github/dependabot.yml b/arkworks/groth16/.github/dependabot.yml new file mode 100644 index 00000000..5b2a1dc4 --- /dev/null +++ b/arkworks/groth16/.github/dependabot.yml @@ -0,0 +1,11 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 + ignore: + - dependency-name: rand + versions: + - 0.8.0 diff --git a/arkworks/groth16/.github/workflows/ci.yml b/arkworks/groth16/.github/workflows/ci.yml new file mode 100644 index 00000000..fa3894de --- /dev/null +++ b/arkworks/groth16/.github/workflows/ci.yml @@ -0,0 +1,119 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: --all-features + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: thumbv6m-none-eabi + override: true + + - name: Install Rust ARM64 (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: groth16 + run: | + cargo build --no-default-features --target aarch64-unknown-none + cargo check --examples --no-default-features --target aarch64-unknown-none diff --git a/arkworks/groth16/.github/workflows/linkify_changelog.yml b/arkworks/groth16/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..0cbe85f1 --- /dev/null +++ b/arkworks/groth16/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push \ No newline at end of file diff --git a/arkworks/groth16/.gitignore b/arkworks/groth16/.gitignore new file mode 100644 index 00000000..9b5e101e --- /dev/null +++ b/arkworks/groth16/.gitignore @@ -0,0 +1,11 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo diff --git a/arkworks/groth16/.hooks/pre-commit b/arkworks/groth16/.hooks/pre-commit new file mode 100755 index 00000000..8d4d19fe --- /dev/null +++ b/arkworks/groth16/.hooks/pre-commit @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +rustfmt --version &>/dev/null +if [ $? != 0 ]; then + printf "[pre_commit] \033[0;31merror\033[0m: \"rustfmt\" not available. \n" + printf "[pre_commit] \033[0;31merror\033[0m: rustfmt can be installed via - \n" + printf "[pre_commit] $ rustup component add rustfmt \n" + exit 1 +fi + +problem_files=() + +# collect ill-formatted files +for file in $(git diff --name-only --cached); do + if [ ${file: -3} == ".rs" ]; then + rustfmt +stable --check $file &>/dev/null + if [ $? != 0 ]; then + problem_files+=($file) + fi + fi +done + +if [ ${#problem_files[@]} == 0 ]; then + # done + printf "[pre_commit] rustfmt \033[0;32mok\033[0m \n" +else + # reformat the files that need it and re-stage them. + printf "[pre_commit] the following files were rustfmt'd before commit: \n" + for file in ${problem_files[@]}; do + rustfmt +stable $file + git add $file + printf "\033[0;32m $file\033[0m \n" + done +fi + +exit 0 diff --git a/arkworks/groth16/CHANGELOG.md b/arkworks/groth16/CHANGELOG.md new file mode 100644 index 00000000..07928505 --- /dev/null +++ b/arkworks/groth16/CHANGELOG.md @@ -0,0 +1,48 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +- [\#21](https://github.com/arkworks-rs/groth16/pull/21) Change the `generate_parameters` interface to take generators as input. + +### Features + +- [\#30](https://github.com/arkworks-rs/groth16/pull/30) Add proof input preprocessing. + +### Improvements + +### Bug fixes + +## v0.2.0 + +### Breaking changes +- [\#4](https://github.com/arkworks-rs/groth16/pull/4) Change groth16's logic to implement the `SNARK` trait. +- Minimum version on crates from `arkworks-rs/algebra` and `arkworks-rs/curves` is now `v0.2.0` +- [\#24](https://github.com/arkworks-rs/groth16/pull/24) Switch from `bench-utils` to `ark_std::perf_trace` + +### Features +- [\#5](https://github.com/arkworks-rs/groth16/pull/5) Add R1CS constraints for the groth16 verifier. +- [\#8](https://github.com/arkworks-rs/groth16/pull/8) Add benchmarks for the prover +- [\#16](https://github.com/arkworks-rs/groth16/pull/16) Add proof re-randomization + +### Improvements +- [\#9](https://github.com/arkworks-rs/groth16/pull/9) Improve memory consumption by manually dropping large vectors once they're no longer needed + +### Bug fixes +- [c9bc5519](https://github.com/arkworks-rs/groth16/commit/885b9b569522f59a7eb428d1095f442ec9bc5519) Fix parallel feature flag +- [\#22](https://github.com/arkworks-rs/groth16/pull/22) Compile with `panic='abort'` in release mode, for safety of the library across FFI boundaries. + +## v0.1.0 + +_Initial release_ diff --git a/arkworks/groth16/CONTRIBUTING.md b/arkworks/groth16/CONTRIBUTING.md new file mode 100644 index 00000000..b3152b35 --- /dev/null +++ b/arkworks/groth16/CONTRIBUTING.md @@ -0,0 +1,65 @@ +# Contributing + +Thank you for considering making contributions to `arkworks-rs/groth16`! + +Contributing to this repo can be done in several forms, such as participating in discussion or proposing code changes. +To ensure a smooth workflow for all contributors, the following general procedure for contributing has been established: + +1) Either open or find an issue you'd like to help with +2) Participate in thoughtful discussion on that issue +3) If you would like to contribute: + * If the issue is a feature proposal, ensure that the proposal has been accepted + * Ensure that nobody else has already begun working on this issue. + If they have, please try to contact them to collaborate + * If nobody has been assigned for the issue and you would like to work on it, make a comment on the issue to inform the community of your intentions to begin work. (So we can avoid duplication of efforts) + * We suggest using standard Github best practices for contributing: fork the repo, branch from the HEAD of master, make some commits on your branch, and submit a PR from the branch to master. + More detail on this is below + * Be sure to include a relevant change log entry in the Pending section of CHANGELOG.md (see file for log format) + * If the change is breaking, we may add migration instructions. + +Note that for very small or clear problems (such as typos), or well isolated improvements, it is not required to an open issue to submit a PR. +But be aware that for more complex problems/features touching multiple parts of the codebase, if a PR is opened before an adequate design discussion has taken place in a github issue, that PR runs a larger likelihood of being rejected. + +Looking for a good place to start contributing? How about checking out some good first issues + +## Branch Structure + +`groth16` has its default branch as `master`, which is where PRs are merged into. Releases will be periodically made, on no set schedule. +All other branches should be assumed to be miscellaneous feature development branches. + +All downstream users of the library should be using tagged versions of the library pulled from cargo. + +## How to work on a fork +Please skip this section if you're familiar with contributing to opensource github projects. + +First fork the repo from the github UI, and clone it locally. +Then in the repo, you want to add the repo you forked from as a new remote. You do this as: +```bash +git remote add upstream git@github.com:arkworks-rs/groth16.git +``` + +Then the way you make code contributions is to first think of a branch name that describes your change. +Then do the following: +```bash +git checkout master +git pull upstream master +git checkout -b $BRANCH_NAME +``` +and then work as normal on that branch, and pull request to upstream master when you're done =) + +## Updating documentation + +All PRs should aim to leave the code more documented than it started with. +Please don't assume that its easy to infer what the code is doing, +as that is usually not the case for these complex protocols. +(Even when you understand the paper!) + +Its often very useful to describe what is the high level view of what a code block is doing, +and either refer to the relevant section of a paper or include a short proof/argument for why it makes sense before the actual logic. + +## Performance improvements + +All performance improvements should be accompanied with benchmarks improving, or otherwise have it be clear that things have improved. +For some areas of the codebase, performance roughly follows the number of field multiplications, but there are also many areas where +hard to predict low level system effects such as cache locality and superscalar operations become important for performance. +Thus performance can often become very non-intuitive / diverge from minimizing the number of arithmetic operations. \ No newline at end of file diff --git a/arkworks/groth16/Cargo.toml b/arkworks/groth16/Cargo.toml new file mode 100644 index 00000000..2557ebd2 --- /dev/null +++ b/arkworks/groth16/Cargo.toml @@ -0,0 +1,71 @@ +[package] +name = "ark-groth16" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "An implementation of the Groth 2016 zkSNARK proof system" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/groth16" +documentation = "https://docs.rs/ark-groth16/" +keywords = [ "zero-knowledge", "cryptography", "zkSNARK", "SNARK", "Groth-Maller" ] +categories = [ "cryptography" ] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +################################# Dependencies ################################ + +[dependencies] +ark-ff = { path = "../algebra/ff", version = "^0.3.0", default-features = false } +ark-ec = { path = "../algebra/ec", version = "^0.3.0", default-features = false } +ark-serialize = { path = "../algebra/serialize", version = "^0.3.0", default-features = false, features = [ "derive" ] } +ark-poly = { path = "../algebra/poly", version = "^0.3.0", default-features = false } +ark-std = { path = "../std", version = "^0.3.0", default-features = false } +ark-relations = { path = "../snark/relations", version = "^0.3.0", default-features = false } +ark-crypto-primitives = { path = "../crypto-primitives", version = "^0.3.0", default-features = false } +ark-r1cs-std = { path = "../r1cs-std", version = "^0.3.0", default-features = false, optional = true } + +tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true } +derivative = { version = "2.0", features = ["use_core"], optional = true} + +rayon = { version = "1", optional = true } + +[dev-dependencies] +csv = { version = "1" } +ark-bls12-381 = { version = "^0.3.0", default-features = false, features = ["curve"] } +ark-bls12-377 = { version = "^0.3.0", default-features = false, features = ["curve"] } +ark-cp6-782 = { version = "^0.3.0", default-features = false } +ark-mnt4-298 = { version = "^0.3.0", default-features = false, features = ["r1cs", "curve"] } +ark-mnt6-298 = { version = "^0.3.0", default-features = false, features = ["r1cs"] } +ark-mnt4-753 = { version = "^0.3.0", default-features = false, features = ["r1cs", "curve"] } +ark-mnt6-753 = { version = "^0.3.0", default-features = false, features = ["r1cs"] } + +[profile.release] +opt-level = 3 +panic = 'abort' + +[profile.dev] +opt-level = 0 +panic = 'abort' + +[features] +default = ["parallel"] +std = ["ark-ff/std", "ark-ec/std", "ark-poly/std", "ark-relations/std", "ark-crypto-primitives/std", "ark-std/std" ] +parallel = ["std", "ark-ff/parallel", "ark-poly/parallel", "ark-ec/parallel", "ark-crypto-primitives/parallel", "ark-std/parallel", "rayon"] +r1cs = [ "ark-crypto-primitives/r1cs", "ark-r1cs-std", "tracing", "derivative" ] +print-trace = [ "ark-std/print-trace" ] + +[[example]] +name = "groth16" +path = "examples/snark-scalability/groth16.rs" +required-features = ["std"] + +[[bench]] +name = "groth16-benches" +path = "benches/bench.rs" +harness = false +required-features = ["std"] + +# [[example]] +# name = "groth16-recursive" +# path = "examples/recursive-snark/groth16.rs" +# required-features = ["std"] diff --git a/arkworks/groth16/LICENSE-APACHE b/arkworks/groth16/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/groth16/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/groth16/LICENSE-MIT b/arkworks/groth16/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/groth16/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/groth16/README.md b/arkworks/groth16/README.md new file mode 100644 index 00000000..b27cb773 --- /dev/null +++ b/arkworks/groth16/README.md @@ -0,0 +1,53 @@ +

ark-groth16

+ +

+ + + + +

+ +The arkworks ecosystem consist of Rust libraries for designing and working with __zero knowledge succinct non-interactive arguments (zkSNARKs)__. This repository contains an efficient implementation of the zkSNARK of [[Groth16]](https://eprint.iacr.org/2016/260). + +This library is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic proof-of-concept prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler. To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: +```bash +rustup install stable +``` + +After that, use `cargo`, the standard Rust build tool, to build the library: +```bash +git clone https://github.com/arkworks-rs/groth16.git +cargo build --release +``` + +This library comes with unit tests for each of the provided crates. Run the tests with: +```bash +cargo test +``` + +## License + +This library is licensed under either of the following licenses, at your discretion. + + * Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +## Acknowledgements + +This work was supported by: +a Google Faculty Award; +the National Science Foundation; +the UC Berkeley Center for Long-Term Cybersecurity; +and donations from the Ethereum Foundation, the Interchain Foundation, and Qtum. + +An earlier version of this library was developed as part of the paper *"[ZEXE: Enabling Decentralized Private Computation][zexe]"*. + +[zexe]: https://ia.cr/2018/962 diff --git a/arkworks/groth16/benches/bench.rs b/arkworks/groth16/benches/bench.rs new file mode 100644 index 00000000..144e7fea --- /dev/null +++ b/arkworks/groth16/benches/bench.rs @@ -0,0 +1,140 @@ +// For benchmark, run: +// RAYON_NUM_THREADS=N cargo bench --no-default-features --features "std parallel" -- --nocapture +// where N is the number of threads you want to use (N = 1 for single-thread). + +use ark_bls12_381::{Bls12_381, Fr as BlsFr}; +use ark_crypto_primitives::SNARK; +use ark_ff::{PrimeField, UniformRand}; +use ark_groth16::Groth16; +use ark_mnt4_298::{Fr as MNT4Fr, MNT4_298}; +use ark_mnt4_753::{Fr as MNT4BigFr, MNT4_753}; +use ark_mnt6_298::{Fr as MNT6Fr, MNT6_298}; +use ark_mnt6_753::{Fr as MNT6BigFr, MNT6_753}; +use ark_relations::{ + lc, + r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}, +}; +use ark_std::ops::Mul; + +const NUM_PROVE_REPEATITIONS: usize = 10; +const NUM_VERIFY_REPEATITIONS: usize = 50; + +#[derive(Copy)] +struct DummyCircuit { + pub a: Option, + pub b: Option, + pub num_variables: usize, + pub num_constraints: usize, +} + +impl Clone for DummyCircuit { + fn clone(&self) -> Self { + DummyCircuit { + a: self.a.clone(), + b: self.b.clone(), + num_variables: self.num_variables.clone(), + num_constraints: self.num_constraints.clone(), + } + } +} + +impl ConstraintSynthesizer for DummyCircuit { + fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + let a = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.new_witness_variable(|| self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.new_input_variable(|| { + let a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(a * b) + })?; + + for _ in 0..(self.num_variables - 3) { + let _ = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + } + + for _ in 0..self.num_constraints - 1 { + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + } + + cs.enforce_constraint(lc!(), lc!(), lc!())?; + + Ok(()) + } +} + +macro_rules! groth16_prove_bench { + ($bench_name:ident, $bench_field:ty, $bench_pairing_engine:ty) => { + let rng = &mut ark_std::test_rng(); + let c = DummyCircuit::<$bench_field> { + a: Some(<$bench_field>::rand(rng)), + b: Some(<$bench_field>::rand(rng)), + num_variables: 10, + num_constraints: 65536, + }; + + let (pk, _) = Groth16::<$bench_pairing_engine>::circuit_specific_setup(c, rng).unwrap(); + + let start = ark_std::time::Instant::now(); + + for _ in 0..NUM_PROVE_REPEATITIONS { + let _ = Groth16::<$bench_pairing_engine>::prove(&pk, c.clone(), rng).unwrap(); + } + + println!( + "per-constraint proving time for {}: {} ns/constraint", + stringify!($bench_pairing_engine), + start.elapsed().as_nanos() / NUM_PROVE_REPEATITIONS as u128 / 65536u128 + ); + }; +} + +macro_rules! groth16_verify_bench { + ($bench_name:ident, $bench_field:ty, $bench_pairing_engine:ty) => { + let rng = &mut ark_std::test_rng(); + let c = DummyCircuit::<$bench_field> { + a: Some(<$bench_field>::rand(rng)), + b: Some(<$bench_field>::rand(rng)), + num_variables: 10, + num_constraints: 65536, + }; + + let (pk, vk) = Groth16::<$bench_pairing_engine>::circuit_specific_setup(c, rng).unwrap(); + let proof = Groth16::<$bench_pairing_engine>::prove(&pk, c.clone(), rng).unwrap(); + + let v = c.a.unwrap().mul(c.b.unwrap()); + + let start = ark_std::time::Instant::now(); + + for _ in 0..NUM_VERIFY_REPEATITIONS { + let _ = Groth16::<$bench_pairing_engine>::verify(&vk, &vec![v], &proof).unwrap(); + } + + println!( + "verifying time for {}: {} ns", + stringify!($bench_pairing_engine), + start.elapsed().as_nanos() / NUM_VERIFY_REPEATITIONS as u128 + ); + }; +} + +fn bench_prove() { + groth16_prove_bench!(bls, BlsFr, Bls12_381); + groth16_prove_bench!(mnt4, MNT4Fr, MNT4_298); + groth16_prove_bench!(mnt6, MNT6Fr, MNT6_298); + groth16_prove_bench!(mnt4big, MNT4BigFr, MNT4_753); + groth16_prove_bench!(mnt6big, MNT6BigFr, MNT6_753); +} + +fn bench_verify() { + groth16_verify_bench!(bls, BlsFr, Bls12_381); + groth16_verify_bench!(mnt4, MNT4Fr, MNT4_298); + groth16_verify_bench!(mnt6, MNT6Fr, MNT6_298); + groth16_verify_bench!(mnt4big, MNT4BigFr, MNT4_753); + groth16_verify_bench!(mnt6big, MNT6BigFr, MNT6_753); +} + +fn main() { + bench_prove(); + bench_verify(); +} diff --git a/arkworks/groth16/examples/recursive-snark/constraints.rs b/arkworks/groth16/examples/recursive-snark/constraints.rs new file mode 100644 index 00000000..4d90fa2a --- /dev/null +++ b/arkworks/groth16/examples/recursive-snark/constraints.rs @@ -0,0 +1,364 @@ +use algebra::{ + fields::{FftParameters, FpParameters}, + BigInteger, Field, PrimeField, +}; +use algebra_core::{PairingEngine, ToConstraintField}; +use core::ops::MulAssign; +use crypto_primitives::nizk::{ + constraints::NIZKVerifierGadget, + groth16::{ + constraints::{Groth16VerifierGadget, ProofVar, VerifyingKeyVar}, + Groth16, + }, +}; +use groth16::{Parameters, Proof}; +use r1cs_core::{lc, ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; +use r1cs_std::{fields::fp::FpVar, pairing::PairingVar as PG, prelude::*}; +use std::marker::PhantomData; + +pub trait CurvePair +where + ::G1Projective: + MulAssign<::Fq>, + ::G2Projective: + MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + type TickGroup: PairingEngine< + Fq = ::Fr, + Fr = ::Fq, + >; + type TockGroup: PairingEngine; + + const TICK_CURVE: &'static str; + const TOCK_CURVE: &'static str; +} + +// Verifying InnerCircuit in MiddleCircuit +type InnerProofSystem = Groth16< + ::TickGroup, + InnerCircuit<<::TickGroup as PairingEngine>::Fr>, + <::TickGroup as PairingEngine>::Fr, +>; + +type InnerVerifierGadget = Groth16VerifierGadget<::TickGroup, PV>; +type InnerProofVar = ProofVar<::TickGroup, PV>; +type InnerVkVar = VerifyingKeyVar<::TickGroup, PV>; + +// Verifying MiddleCircuit in OuterCircuit +type MiddleProofSystem = Groth16< + ::TockGroup, + MiddleCircuit, + <::TockGroup as PairingEngine>::Fr, +>; +type MiddleVerifierGadget = Groth16VerifierGadget<::TockGroup, PV>; +type MiddleProofVar = ProofVar<::TockGroup, PV>; +type MiddleVkVar = VerifyingKeyVar<::TockGroup, PV>; + +pub struct InnerCircuit { + num_constraints: usize, + inputs: Vec, +} + +impl InnerCircuit { + pub fn new(num_constraints: usize, inputs: Vec) -> Self { + Self { + num_constraints, + inputs, + } + } +} + +impl ConstraintSynthesizer for InnerCircuit { + fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + assert!(self.inputs.len() >= 2); + assert!(self.num_constraints >= self.inputs.len()); + + let mut variables: Vec<_> = Vec::with_capacity(self.inputs.len()); + for input in self.inputs.into_iter() { + let input_var = cs.new_input_variable(|| Ok(input))?; + variables.push((input, input_var)); + } + + for i in 0..self.num_constraints { + let new_entry = { + let (input_1_val, input_1_var) = variables[i]; + let (input_2_val, input_2_var) = variables[i + 1]; + let result_val = input_1_val * input_2_val; + let result_var = cs.new_witness_variable(|| Ok(result_val))?; + cs.enforce_constraint( + lc!() + input_1_var, + lc!() + input_2_var, + lc!() + result_var, + )?; + (result_val, result_var) + }; + variables.push(new_entry); + } + Ok(()) + } +} + +pub struct MiddleCircuit> +where + ::G1Projective: MulAssign<::Fq>, + ::G2Projective: MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + inputs: Vec<::Fr>, + params: Parameters, + proof: Proof, + _curve_pair: PhantomData, + _tick_pairing: PhantomData, +} + +impl> MiddleCircuit +where + ::G1Projective: MulAssign<::Fq>, + ::G2Projective: MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + pub fn new( + inputs: Vec<::Fr>, + params: Parameters, + proof: Proof, + ) -> Self { + Self { + inputs, + params, + proof, + _curve_pair: PhantomData, + _tick_pairing: PhantomData, + } + } + + pub fn inputs( + inputs: &[::Fr], + ) -> Vec<::Fr> { + let input_bytes = inputs + .iter() + .flat_map(|input| { + input + .into_repr() + .as_ref() + .iter() + .flat_map(|l| l.to_le_bytes().to_vec()) + .collect::>() + }) + .collect::>(); + + input_bytes[..].to_field_elements().unwrap() + } +} + +impl ConstraintSynthesizer<::Fr> + for MiddleCircuit +where + C: CurvePair, + TickPairing: PG, + ::G1Projective: MulAssign<::Fq>, + ::G2Projective: MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + fn generate_constraints( + self, + cs: ConstraintSystemRef<::Fr>, + ) -> Result<(), SynthesisError> { + let params = self.params; + let proof = self.proof; + let inputs = self.inputs; + let input_gadgets; + + { + let ns = r1cs_core::ns!(cs, "Allocate Input"); + let cs = ns.cs(); + // Chain all input values in one large byte array. + let input_bytes = inputs + .into_iter() + .flat_map(|input| { + input + .into_repr() + .as_ref() + .iter() + .flat_map(|l| l.to_le_bytes().to_vec()) + .collect::>() + }) + .collect::>(); + + // Allocate this byte array as input packed into field elements. + let input_bytes = UInt8::new_input_vec(r1cs_core::ns!(cs, "Input"), &input_bytes[..])?; + // 40 byte + let element_size = + <<::Fr as PrimeField>::Params as FftParameters>::BigInt::NUM_LIMBS * 8; + input_gadgets = input_bytes + .chunks(element_size) + .map(|chunk| { + chunk + .iter() + .flat_map(|byte| byte.to_bits_le().unwrap()) + .collect::>() + }) + .collect::>(); + } + println!("|---- Num inputs for sub-SNARK: {}", input_gadgets.len()); + let num_constraints = cs.num_constraints(); + println!( + "|---- Num constraints to prepare inputs: {}", + num_constraints + ); + + let vk_var = + InnerVkVar::::new_witness(r1cs_core::ns!(cs, "Vk"), || Ok(¶ms.vk))?; + let proof_var = + InnerProofVar::::new_witness(r1cs_core::ns!(cs, "Proof"), || { + Ok(proof.clone()) + })?; + as NIZKVerifierGadget< + InnerProofSystem, + ::Fr, + >>::verify(&vk_var, input_gadgets.iter(), &proof_var)? + .enforce_equal(&Boolean::TRUE)?; + println!( + "|---- Num constraints for sub-SNARK verification: {}", + cs.num_constraints() - num_constraints + ); + Ok(()) + } +} + +pub struct OuterCircuit, TickPairing: PG> +where + ::G1Projective: MulAssign<::Fq>, + ::G2Projective: MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + inputs: Vec<::Fr>, + params: Parameters, + proof: Proof, + _curve_pair: PhantomData, + _tock_pairing: PhantomData, + _tick_pairing: PhantomData, +} + +impl, TickPairing: PG> + OuterCircuit +where + ::G1Projective: MulAssign<::Fq>, + ::G2Projective: MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + pub fn new( + inputs: Vec<::Fr>, + params: Parameters, + proof: Proof, + ) -> Self { + Self { + inputs, + params, + proof, + _curve_pair: PhantomData, + _tock_pairing: PhantomData, + _tick_pairing: PhantomData, + } + } +} + +impl, TickPairing: PG> + ConstraintSynthesizer<::Fr> + for OuterCircuit +where + ::G1Projective: MulAssign<::Fq>, + ::G2Projective: MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + fn generate_constraints( + self, + cs: ConstraintSystemRef<::Fr>, + ) -> Result<(), SynthesisError> { + let params = self.params; + let proof = self.proof; + let inputs = self.inputs; + let mut input_gadgets = Vec::new(); + + { + let bigint_size = + <::Fr as PrimeField>::BigInt::NUM_LIMBS * 64; + let mut input_bits = Vec::new(); + let ns = r1cs_core::ns!(cs, "Allocate Input"); + let cs = ns.cs(); + + for input in inputs.into_iter() { + let input_gadget = FpVar::new_input(r1cs_core::ns!(cs, "Input"), || Ok(input))?; + let mut fp_bits = input_gadget.to_bits_le()?; + + // Use 320 bits per element. + for _ in fp_bits.len()..bigint_size { + fp_bits.push(Boolean::constant(false)); + } + input_bits.extend_from_slice(&fp_bits); + } + + // Pack input bits into field elements of the underlying circuit. + let max_size = 8 + * (<<::Fr as PrimeField>::Params as FpParameters>::CAPACITY / 8) + as usize; + let bigint_size = + <<::Fr as PrimeField>::Params as FftParameters>::BigInt::NUM_LIMBS * 64; + for chunk in input_bits.chunks(max_size) { + let mut chunk = chunk.to_vec(); + let len = chunk.len(); + for _ in len..bigint_size { + chunk.push(Boolean::constant(false)); + } + input_gadgets.push(chunk); + } + } + println!("|---- Num inputs for sub-SNARK: {}", input_gadgets.len()); + let num_constraints = cs.num_constraints(); + println!( + "|---- Num constraints to prepare inputs: {}", + num_constraints + ); + + let vk_var = + MiddleVkVar::::new_witness( + r1cs_core::ns!(cs, "Vk"), + || Ok(¶ms.vk), + )?; + let proof_var = + MiddleProofVar::::new_witness(r1cs_core::ns!(cs, "Proof"), || { + Ok(proof.clone()) + })?; + as NIZKVerifierGadget< + MiddleProofSystem, + ::Fr, + >>::verify(&vk_var, &input_gadgets, &proof_var)? + .enforce_equal(&Boolean::TRUE)?; + println!( + "|---- Num constraints for sub-SNARK verification: {}", + cs.num_constraints() - num_constraints + ); + Ok(()) + } +} diff --git a/arkworks/groth16/examples/recursive-snark/groth16.rs b/arkworks/groth16/examples/recursive-snark/groth16.rs new file mode 100644 index 00000000..d21b31ed --- /dev/null +++ b/arkworks/groth16/examples/recursive-snark/groth16.rs @@ -0,0 +1,325 @@ +#![warn(unused)] +#![deny( + trivial_casts, + trivial_numeric_casts, + variant_size_differences, + stable_features, + non_shorthand_field_patterns, + renamed_and_removed_lints, + private_in_public, + unsafe_code +)] + +use csv; +use std::ops::MulAssign; + +// For randomness (during paramgen and proof generation) +use algebra_core::{test_rng, PairingEngine}; + +// For benchmarking +use std::{ + env, + error::Error, + fs::OpenOptions, + path::PathBuf, + process, + time::{Duration, Instant}, +}; + +pub use algebra::{mnt4_298, mnt6_298, Field, ToConstraintField, UniformRand}; +use r1cs_std::pairing::PairingVar as PG; + +// We're going to use the Groth 16 proving system. +use groth16::{ + create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof, +}; + +mod constraints; +use crate::constraints::{CurvePair, InnerCircuit, MiddleCircuit, OuterCircuit}; + +struct MNT46; +impl CurvePair for MNT46 { + type TickGroup = mnt4_298::MNT4_298; + type TockGroup = mnt6_298::MNT6_298; + const TICK_CURVE: &'static str = "MNT4_298"; + const TOCK_CURVE: &'static str = "MNT6_298"; +} +struct MNT64; +impl CurvePair for MNT64 { + type TickGroup = mnt6_298::MNT6_298; + type TockGroup = mnt4_298::MNT4_298; + const TICK_CURVE: &'static str = "MNT6_298"; + const TOCK_CURVE: &'static str = "MNT4_298"; +} + +fn main() -> Result<(), Box> { + let args: Vec = env::args().collect(); + if args.len() < 3 || args[1] == "-h" || args[1] == "--help" { + println!( + "\nHelp: Invoke this as []\n" + ); + println!(" defines the order in which the MNT4/6 curves should be used:"); + println!("46 (default) uses the MNT4_298 curve for the inner and outer circuit;"); + println!("64 uses the MNT6_298 curve for the inner and outer circuit."); + return Ok(()); + } + let num_constraints: usize = args[1].parse().unwrap(); + let output_file_path = PathBuf::from(args[2].clone()); + + if args.len() < 4 || args[3] == "46" { + run::( + num_constraints, + output_file_path, + ) + } else { + run::( + num_constraints, + output_file_path, + ) + } +} + +fn run, TockPairing: PG>( + num_constraints: usize, + output_file_path: PathBuf, +) -> Result<(), Box> +where + ::G1Projective: MulAssign<::Fq>, + ::G2Projective: MulAssign<::Fq>, + ::G1Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, + ::G2Affine: + ToConstraintField<<::Fr as Field>::BasePrimeField>, +{ + let mut wtr = if !output_file_path.exists() { + println!("Creating output file"); + let f = OpenOptions::new() + .create(true) + .append(true) + .open(output_file_path)?; + let mut wtr = csv::Writer::from_writer(f); + wtr.write_record(&[ + "num_constraints", + "setup_inner", + "prover_inner", + "setup_middle", + "prover_middle", + "setup_outer", + "prover_outer", + "verifier_outer", + ])?; + wtr + } else if output_file_path.is_file() { + let f = OpenOptions::new().append(true).open(output_file_path)?; + csv::Writer::from_writer(f) + } else { + println!("Path to output file does not point to a file."); + process::exit(1); + }; + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut test_rng(); + + // Let's benchmark stuff! + let samples = 1; + let mut total_setup_inner = Duration::new(0, 0); + let mut total_proving_inner = Duration::new(0, 0); + let mut total_setup_middle = Duration::new(0, 0); + let mut total_proving_middle = Duration::new(0, 0); + let mut total_setup_outer = Duration::new(0, 0); + let mut total_proving_outer = Duration::new(0, 0); + let mut total_verifying_outer = Duration::new(0, 0); + + // Just a place to put the proof data, so we can + // benchmark deserialization. + // let mut proof_vec = vec![]; + + for sample in 0..samples { + println!("Running sample {}/{}", sample + 1, samples); + let mut inputs: Vec<::Fr> = + Vec::with_capacity(num_constraints); + for _ in 0..num_constraints { + inputs.push(<::Fr as UniformRand>::rand( + rng, + )); + } + + // Create parameters for our inner circuit + println!("|-- Generating inner parameters ({})", C::TICK_CURVE); + let start = Instant::now(); + let params_inner = { + let c = InnerCircuit::<::Fr>::new( + num_constraints, + inputs.clone(), + ); + generate_random_parameters(c, rng)? + }; + total_setup_inner += start.elapsed(); + + // proof_vec.truncate(0); + println!("|-- Generating inner proof ({})", C::TICK_CURVE); + let start = Instant::now(); + let proof_inner = { + // Create an instance of our inner circuit (with the witness) + let c = InnerCircuit::new(num_constraints, inputs.clone()); + // Create a proof with our parameters. + create_random_proof(c, ¶ms_inner, rng)? + }; + total_proving_inner += start.elapsed(); + + // Verify inner proof. + let pvk = prepare_verifying_key(¶ms_inner.vk); + assert!(verify_proof(&pvk, &proof_inner, &inputs).unwrap()); + + // Create parameters for our middle circuit + println!("|-- Generating middle parameters ({})", C::TOCK_CURVE); + let start = Instant::now(); + let params_middle = { + let c = MiddleCircuit::::new( + inputs.clone(), + params_inner.clone(), + proof_inner.clone(), + ); + generate_random_parameters(c, rng)? + }; + total_setup_middle += start.elapsed(); + + // proof_vec.truncate(0); + println!("|-- Generating middle proof ({})", C::TOCK_CURVE); + let start = Instant::now(); + let proof_middle = { + // Create an instance of our middle circuit (with the witness) + let c = MiddleCircuit::::new( + inputs.clone(), + params_inner.clone(), + proof_inner.clone(), + ); + // Create a proof with our parameters. + create_random_proof(c, ¶ms_middle, rng)? + }; + total_proving_middle += start.elapsed(); + + { + let pvk = prepare_verifying_key(¶ms_middle.vk); + assert!(verify_proof( + &pvk, + &proof_middle, + &MiddleCircuit::::inputs(&inputs) + ) + .unwrap()); + } + + // Create parameters for our outer circuit + println!("|-- Generating outer parameters ({})", C::TICK_CURVE); + let start = Instant::now(); + let params_outer = { + let c = OuterCircuit::::new( + inputs.clone(), + params_middle.clone(), + proof_middle.clone(), + ); + generate_random_parameters::(c, rng)? + }; + + // Prepare the verification key (for proof verification) + let pvk = prepare_verifying_key(¶ms_outer.vk); + total_setup_outer += start.elapsed(); + + // proof_vec.truncate(0); + println!("|-- Generating outer proof ({})", C::TICK_CURVE); + let start = Instant::now(); + let proof_outer = { + // Create an instance of our outer circuit (with the witness) + let c = OuterCircuit::::new( + inputs.clone(), + params_middle.clone(), + proof_middle.clone(), + ); + // Create a proof with our parameters. + create_random_proof(c, ¶ms_outer, rng)? + }; + total_proving_outer += start.elapsed(); + + println!("|-- Verify outer proof ({})", C::TICK_CURVE); + let start = Instant::now(); + // let proof = Proof::read(&proof_vec[..]).unwrap(); + // Check the proof + let r = verify_proof(&pvk, &proof_outer, &inputs).unwrap(); + assert!(r); + total_verifying_outer += start.elapsed(); + } + + let setup_inner_avg = total_setup_inner / samples; + let setup_inner_avg = setup_inner_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (setup_inner_avg.as_secs() as f64); + + let proving_inner_avg = total_proving_inner / samples; + let proving_inner_avg = proving_inner_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (proving_inner_avg.as_secs() as f64); + + let setup_middle_avg = total_setup_middle / samples; + let setup_middle_avg = setup_middle_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (setup_middle_avg.as_secs() as f64); + + let proving_middle_avg = total_proving_middle / samples; + let proving_middle_avg = proving_middle_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (proving_middle_avg.as_secs() as f64); + + let setup_outer_avg = total_setup_outer / samples; + let setup_outer_avg = setup_outer_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (setup_outer_avg.as_secs() as f64); + + let proving_outer_avg = total_proving_outer / samples; + let proving_outer_avg = proving_outer_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (proving_outer_avg.as_secs() as f64); + + let verifying_outer_avg = total_verifying_outer / samples; + let verifying_outer_avg = verifying_outer_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (verifying_outer_avg.as_secs() as f64); + + println!( + "=== Benchmarking recursive Groth16 with {} constraints on inner circuit: ====", + num_constraints + ); + println!( + "Average setup time (inner circuit): {:?} seconds", + setup_inner_avg + ); + println!( + "Average proving time (inner circuit): {:?} seconds", + proving_inner_avg + ); + println!( + "Average setup time (middle circuit): {:?} seconds", + setup_middle_avg + ); + println!( + "Average proving time (middle circuit): {:?} seconds", + proving_middle_avg + ); + println!( + "Average setup time (outer circuit): {:?} seconds", + setup_outer_avg + ); + println!( + "Average proving time (outer circuit): {:?} seconds", + proving_outer_avg + ); + println!( + "Average verifying time (outer circuit): {:?} seconds", + verifying_outer_avg + ); + + wtr.write_record(&[ + format!("{}", num_constraints), + format!("{}", setup_inner_avg), + format!("{}", proving_inner_avg), + format!("{}", setup_middle_avg), + format!("{}", proving_middle_avg), + format!("{}", setup_outer_avg), + format!("{}", proving_outer_avg), + format!("{}", verifying_outer_avg), + ])?; + wtr.flush()?; + Ok(()) +} diff --git a/arkworks/groth16/examples/snark-scalability/constraints.rs b/arkworks/groth16/examples/snark-scalability/constraints.rs new file mode 100644 index 00000000..8a2ede0e --- /dev/null +++ b/arkworks/groth16/examples/snark-scalability/constraints.rs @@ -0,0 +1,78 @@ +use ark_ff::Field; +use ark_relations::{ + lc, + r1cs::{ + ConstraintSynthesizer, ConstraintSystemRef, LinearCombination, SynthesisError, Variable, + }, +}; +use std::marker::PhantomData; + +pub struct Benchmark { + num_constraints: usize, + _engine: PhantomData, +} + +impl Benchmark { + pub fn new(num_constraints: usize) -> Self { + Self { + num_constraints, + _engine: PhantomData, + } + } +} + +impl ConstraintSynthesizer for Benchmark { + fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + let mut assignments = Vec::new(); + let mut a_val = F::one(); + let mut a_var = cs.new_input_variable(|| Ok(a_val))?; + assignments.push((a_val, a_var)); + + let mut b_val = F::one(); + let mut b_var = cs.new_input_variable(|| Ok(b_val))?; + assignments.push((a_val, a_var)); + + for i in 0..self.num_constraints - 1 { + if i % 2 != 0 { + let c_val = a_val * &b_val; + let c_var = cs.new_witness_variable(|| Ok(c_val))?; + + cs.enforce_constraint(lc!() + a_var, lc!() + b_var, lc!() + c_var)?; + + assignments.push((c_val, c_var)); + a_val = b_val; + a_var = b_var; + b_val = c_val; + b_var = c_var; + } else { + let c_val = a_val + &b_val; + let c_var = cs.new_witness_variable(|| Ok(c_val))?; + + cs.enforce_constraint(lc!() + a_var + b_var, lc!() + Variable::One, lc!() + c_var)?; + + assignments.push((c_val, c_var)); + a_val = b_val; + a_var = b_var; + b_val = c_val; + b_var = c_var; + } + } + + let mut a_lc = LinearCombination::zero(); + let mut b_lc = LinearCombination::zero(); + let mut c_val = F::zero(); + + for (val, var) in assignments { + a_lc = a_lc + var; + b_lc = b_lc + var; + c_val = c_val + &val; + } + c_val = c_val.square(); + + let c_var = cs.new_witness_variable(|| Ok(c_val))?; + + cs.enforce_constraint(lc!() + a_lc, lc!() + b_lc, lc!() + c_var)?; + + Ok(()) + } +} diff --git a/arkworks/groth16/examples/snark-scalability/groth16.rs b/arkworks/groth16/examples/snark-scalability/groth16.rs new file mode 100644 index 00000000..194c5c34 --- /dev/null +++ b/arkworks/groth16/examples/snark-scalability/groth16.rs @@ -0,0 +1,143 @@ +#![warn(unused)] +#![deny( + trivial_casts, + trivial_numeric_casts, + variant_size_differences, + stable_features, + non_shorthand_field_patterns, + renamed_and_removed_lints, + private_in_public, + unsafe_code +)] + +use csv; + +// For randomness (during paramgen and proof generation) +use ark_ff::One; +use ark_std::test_rng; + +// For benchmarking +use std::{ + error::Error, + time::{Duration, Instant}, +}; + +// Bring in some tools for using pairing-friendly curves +// We're going to use the BLS12-377 pairing-friendly elliptic curve. +use ark_bls12_377::{Bls12_377, Fr}; + +// We're going to use the Groth 16 proving system. +use ark_groth16::{ + create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof, +}; + +use std::{env, fs::OpenOptions, path::PathBuf, process}; + +mod constraints; +use crate::constraints::Benchmark; + +fn main() -> Result<(), Box> { + let args: Vec = env::args().collect(); + if args.len() < 3 || args[1] == "-h" || args[1] == "--help" { + println!("\nHelp: Invoke this as \n"); + } + let num_constraints: usize = args[1].parse().unwrap(); + let output_file_path = PathBuf::from(args[2].clone()); + let mut wtr = if !output_file_path.exists() { + println!("Creating output file"); + let f = OpenOptions::new() + .create(true) + .append(true) + .open(output_file_path)?; + let mut wtr = csv::Writer::from_writer(f); + wtr.write_record(&["num_constraints", "setup", "prover", "verifier"])?; + wtr + } else if output_file_path.is_file() { + let f = OpenOptions::new().append(true).open(output_file_path)?; + csv::Writer::from_writer(f) + } else { + println!("Path to output file does not point to a file."); + process::exit(1); + }; + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut test_rng(); + + // Let's benchmark stuff! + let samples = if num_constraints > 10000 { + 1 + } else if num_constraints > 4096 { + 2 + } else { + 4 + }; + let mut total_setup = Duration::new(0, 0); + let mut total_proving = Duration::new(0, 0); + let mut total_verifying = Duration::new(0, 0); + + // Just a place to put the proof data, so we can + // benchmark deserialization. + // let mut proof_vec = vec![]; + + for _ in 0..samples { + // Create parameters for our circuit + let start = Instant::now(); + let params = { + let c = Benchmark::::new(num_constraints); + generate_random_parameters::(c, rng)? + }; + + // Prepare the verification key (for proof verification) + let pvk = prepare_verifying_key(¶ms.vk); + total_setup += start.elapsed(); + + // proof_vec.truncate(0); + let start = Instant::now(); + let proof = { + // Create an instance of our circuit (with the witness) + let c = Benchmark::new(num_constraints); + // Create a proof with our parameters. + create_random_proof(c, ¶ms, rng)? + }; + + total_proving += start.elapsed(); + + let inputs: Vec<_> = [Fr::one(); 2].to_vec(); + + let start = Instant::now(); + // let proof = Proof::read(&proof_vec[..]).unwrap(); + // Check the proof + let r = verify_proof(&pvk, &proof, &inputs).unwrap(); + assert!(r); + total_verifying += start.elapsed(); + } + + let setup_avg = total_setup / samples; + let setup_avg = + setup_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (setup_avg.as_secs() as f64); + + let proving_avg = total_proving / samples; + let proving_avg = + proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (proving_avg.as_secs() as f64); + + let verifying_avg = total_verifying / samples; + let verifying_avg = + verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (verifying_avg.as_secs() as f64); + + println!( + "=== Benchmarking Groth16 with {} constraints: ====", + num_constraints + ); + println!("Average setup time: {:?} seconds", setup_avg); + println!("Average proving time: {:?} seconds", proving_avg); + println!("Average verifying time: {:?} seconds", verifying_avg); + + wtr.write_record(&[ + format!("{}", num_constraints), + format!("{}", setup_avg), + format!("{}", proving_avg), + format!("{}", verifying_avg), + ])?; + wtr.flush()?; + Ok(()) +} diff --git a/arkworks/groth16/rustfmt.toml b/arkworks/groth16/rustfmt.toml new file mode 100644 index 00000000..71712138 --- /dev/null +++ b/arkworks/groth16/rustfmt.toml @@ -0,0 +1,9 @@ +reorder_imports = true +wrap_comments = true +normalize_comments = true +use_try_shorthand = true +match_block_trailing_comma = true +use_field_init_shorthand = true +edition = "2018" +condense_wildcard_suffixes = true +merge_imports = true diff --git a/arkworks/groth16/scripts/install-hook.sh b/arkworks/groth16/scripts/install-hook.sh new file mode 100755 index 00000000..eafcf818 --- /dev/null +++ b/arkworks/groth16/scripts/install-hook.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +# This script will install the provided directory ../.hooks as the hook +# directory for the present repo. See there for hooks, including a pre-commit +# hook that runs rustfmt on files before a commit. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HOOKS_DIR="${DIR}/../.hooks" + +git config core.hooksPath "$HOOKS_DIR" diff --git a/arkworks/groth16/scripts/linkify_changelog.py b/arkworks/groth16/scripts/linkify_changelog.py new file mode 100644 index 00000000..867ae14d --- /dev/null +++ b/arkworks/groth16/scripts/linkify_changelog.py @@ -0,0 +1,31 @@ +import re +import sys +import fileinput +import os + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +if len(sys.argv) < 2: + print("Must include path to changelog as the first argument to the script") + print("Example Usage: python3 linkify_changelog.py ../CHANGELOG.md") + exit() + +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub(r"\- #([0-9]*)", r"- [\\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", line.rstrip()) + # edits the current file + print(line) \ No newline at end of file diff --git a/arkworks/groth16/src/constraints.rs b/arkworks/groth16/src/constraints.rs new file mode 100644 index 00000000..5ff8af42 --- /dev/null +++ b/arkworks/groth16/src/constraints.rs @@ -0,0 +1,535 @@ +use crate::{Groth16, PreparedVerifyingKey, Proof, VerifyingKey}; +use ark_crypto_primitives::snark::constraints::{CircuitSpecificSetupSNARKGadget, SNARKGadget}; +use ark_crypto_primitives::snark::{BooleanInputVar, SNARK}; +use ark_ec::{AffineCurve, PairingEngine}; +use ark_r1cs_std::groups::CurveVar; +use ark_r1cs_std::{ + alloc::{AllocVar, AllocationMode}, + bits::boolean::Boolean, + bits::uint8::UInt8, + eq::EqGadget, + pairing::PairingVar, + ToBitsGadget, ToBytesGadget, +}; +use ark_relations::r1cs::{Namespace, SynthesisError}; +use ark_std::{borrow::Borrow, marker::PhantomData, vec::Vec}; + +/// The proof variable for the Groth16 construction +#[derive(Derivative)] +#[derivative(Clone(bound = "P::G1Var: Clone, P::G2Var: Clone"))] +pub struct ProofVar> { + /// The `A` element in `G1`. + pub a: P::G1Var, + /// The `B` element in `G2`. + pub b: P::G2Var, + /// The `C` element in `G1`. + pub c: P::G1Var, +} + +/// A variable representing the Groth16 verifying key in the constraint system. +#[derive(Derivative)] +#[derivative( + Clone(bound = "P::G1Var: Clone, P::GTVar: Clone, P::G1PreparedVar: Clone, \ + P::G2PreparedVar: Clone, ") +)] +pub struct VerifyingKeyVar> { + #[doc(hidden)] + pub alpha_g1: P::G1Var, + #[doc(hidden)] + pub beta_g2: P::G2Var, + #[doc(hidden)] + pub gamma_g2: P::G2Var, + #[doc(hidden)] + pub delta_g2: P::G2Var, + #[doc(hidden)] + pub gamma_abc_g1: Vec, +} + +impl> VerifyingKeyVar { + /// Prepare `self` for use in proof verification. + pub fn prepare(&self) -> Result, SynthesisError> { + let alpha_g1_pc = P::prepare_g1(&self.alpha_g1)?; + let beta_g2_pc = P::prepare_g2(&self.beta_g2)?; + + let alpha_g1_beta_g2 = P::pairing(alpha_g1_pc, beta_g2_pc)?; + let gamma_g2_neg_pc = P::prepare_g2(&self.gamma_g2.negate()?)?; + let delta_g2_neg_pc = P::prepare_g2(&self.delta_g2.negate()?)?; + + Ok(PreparedVerifyingKeyVar { + alpha_g1_beta_g2, + gamma_g2_neg_pc, + delta_g2_neg_pc, + gamma_abc_g1: self.gamma_abc_g1.clone(), + }) + } +} + +/// Preprocessed verification key parameters variable for the Groth16 construction +#[derive(Derivative)] +#[derivative( + Clone(bound = "P::G1Var: Clone, P::GTVar: Clone, P::G1PreparedVar: Clone, \ + P::G2PreparedVar: Clone, ") +)] +pub struct PreparedVerifyingKeyVar> { + #[doc(hidden)] + pub alpha_g1_beta_g2: P::GTVar, + #[doc(hidden)] + pub gamma_g2_neg_pc: P::G2PreparedVar, + #[doc(hidden)] + pub delta_g2_neg_pc: P::G2PreparedVar, + #[doc(hidden)] + pub gamma_abc_g1: Vec, +} + +/// Constraints for the verifier of the SNARK of [[Groth16]](https://eprint.iacr.org/2016/260.pdf). +pub struct Groth16VerifierGadget +where + E: PairingEngine, + P: PairingVar, +{ + _pairing_engine: PhantomData, + _pairing_gadget: PhantomData

, +} + +impl> SNARKGadget> + for Groth16VerifierGadget +{ + type ProcessedVerifyingKeyVar = PreparedVerifyingKeyVar; + type VerifyingKeyVar = VerifyingKeyVar; + type InputVar = BooleanInputVar; + type ProofVar = ProofVar; + + type VerifierSize = usize; + + fn verifier_size( + circuit_vk: & as SNARK>::VerifyingKey, + ) -> Self::VerifierSize { + circuit_vk.gamma_abc_g1.len() + } + + /// Allocates `N::Proof` in `cs` without performing + /// subgroup checks. + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_proof_unchecked>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + f().and_then(|proof| { + let proof = proof.borrow(); + let a = CurveVar::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "Proof.a"), + || Ok(proof.a.into_projective()), + mode, + )?; + let b = CurveVar::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "Proof.b"), + || Ok(proof.b.into_projective()), + mode, + )?; + let c = CurveVar::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "Proof.c"), + || Ok(proof.c.into_projective()), + mode, + )?; + Ok(ProofVar { a, b, c }) + }) + } + + /// Allocates `N::Proof` in `cs` without performing + /// subgroup checks. + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_verification_key_unchecked>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + f().and_then(|vk| { + let vk = vk.borrow(); + let alpha_g1 = P::G1Var::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "alpha_g1"), + || Ok(vk.alpha_g1.into_projective()), + mode, + )?; + let beta_g2 = P::G2Var::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "beta_g2"), + || Ok(vk.beta_g2.into_projective()), + mode, + )?; + let gamma_g2 = P::G2Var::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "gamma_g2"), + || Ok(vk.gamma_g2.into_projective()), + mode, + )?; + let delta_g2 = P::G2Var::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "delta_g2"), + || Ok(vk.delta_g2.into_projective()), + mode, + )?; + let gamma_abc_g1 = vk + .gamma_abc_g1 + .iter() + .map(|g| { + P::G1Var::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "gamma_abc_g1"), + || Ok(g.into_projective()), + mode, + ) + }) + .collect::, _>>()?; + + Ok(VerifyingKeyVar { + alpha_g1, + beta_g2, + gamma_g2, + delta_g2, + gamma_abc_g1, + }) + }) + } + + #[tracing::instrument(target = "r1cs", skip(circuit_pvk, x, proof))] + fn verify_with_processed_vk( + circuit_pvk: &Self::ProcessedVerifyingKeyVar, + x: &Self::InputVar, + proof: &Self::ProofVar, + ) -> Result, SynthesisError> { + let circuit_pvk = circuit_pvk.clone(); + + let g_ic = { + let mut g_ic: P::G1Var = circuit_pvk.gamma_abc_g1[0].clone(); + let mut input_len = 1; + let mut public_inputs = x.clone().into_iter(); + for (input, b) in public_inputs + .by_ref() + .zip(circuit_pvk.gamma_abc_g1.iter().skip(1)) + { + let encoded_input_i: P::G1Var = b.scalar_mul_le(input.to_bits_le()?.iter())?; + g_ic += encoded_input_i; + input_len += 1; + } + // Check that the input and the query in the verification are of the + // same length. + assert!(input_len == circuit_pvk.gamma_abc_g1.len() && public_inputs.next().is_none()); + g_ic + }; + + let test_exp = { + let proof_a_prep = P::prepare_g1(&proof.a)?; + let proof_b_prep = P::prepare_g2(&proof.b)?; + let proof_c_prep = P::prepare_g1(&proof.c)?; + + let g_ic_prep = P::prepare_g1(&g_ic)?; + + P::miller_loop( + &[proof_a_prep, g_ic_prep, proof_c_prep], + &[ + proof_b_prep, + circuit_pvk.gamma_g2_neg_pc.clone(), + circuit_pvk.delta_g2_neg_pc.clone(), + ], + )? + }; + + let test = P::final_exponentiation(&test_exp)?; + test.is_eq(&circuit_pvk.alpha_g1_beta_g2) + } + + #[tracing::instrument(target = "r1cs", skip(circuit_vk, x, proof))] + fn verify( + circuit_vk: &Self::VerifyingKeyVar, + x: &Self::InputVar, + proof: &Self::ProofVar, + ) -> Result, SynthesisError> { + let pvk = circuit_vk.prepare()?; + Self::verify_with_processed_vk(&pvk, x, proof) + } +} + +impl CircuitSpecificSetupSNARKGadget> for Groth16VerifierGadget +where + E: PairingEngine, + P: PairingVar, +{ +} + +impl AllocVar, E::Fq> for PreparedVerifyingKeyVar +where + E: PairingEngine, + P: PairingVar, +{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + f().and_then(|pvk| { + let pvk = pvk.borrow(); + let alpha_g1_beta_g2 = P::GTVar::new_variable( + ark_relations::ns!(cs, "alpha_g1_beta_g2"), + || Ok(pvk.alpha_g1_beta_g2.clone()), + mode, + )?; + + let gamma_g2_neg_pc = P::G2PreparedVar::new_variable( + ark_relations::ns!(cs, "gamma_g2_neg_pc"), + || Ok(pvk.gamma_g2_neg_pc.clone()), + mode, + )?; + + let delta_g2_neg_pc = P::G2PreparedVar::new_variable( + ark_relations::ns!(cs, "delta_g2_neg_pc"), + || Ok(pvk.delta_g2_neg_pc.clone()), + mode, + )?; + + let gamma_abc_g1 = Vec::new_variable( + ark_relations::ns!(cs, "gamma_abc_g1"), + || Ok(pvk.vk.gamma_abc_g1.clone()), + mode, + )?; + + Ok(Self { + alpha_g1_beta_g2, + gamma_g2_neg_pc, + delta_g2_neg_pc, + gamma_abc_g1, + }) + }) + } +} + +impl AllocVar, E::Fq> for VerifyingKeyVar +where + E: PairingEngine, + P: PairingVar, +{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + f().and_then(|vk| { + let VerifyingKey { + alpha_g1, + beta_g2, + gamma_g2, + delta_g2, + gamma_abc_g1, + } = vk.borrow().clone(); + let alpha_g1 = + P::G1Var::new_variable(ark_relations::ns!(cs, "alpha_g1"), || Ok(alpha_g1), mode)?; + let beta_g2 = + P::G2Var::new_variable(ark_relations::ns!(cs, "beta_g2"), || Ok(beta_g2), mode)?; + let gamma_g2 = + P::G2Var::new_variable(ark_relations::ns!(cs, "gamma_g2"), || Ok(gamma_g2), mode)?; + let delta_g2 = + P::G2Var::new_variable(ark_relations::ns!(cs, "delta_g2"), || Ok(delta_g2), mode)?; + + let gamma_abc_g1 = Vec::new_variable(cs.clone(), || Ok(gamma_abc_g1), mode)?; + Ok(Self { + alpha_g1, + beta_g2, + gamma_g2, + delta_g2, + gamma_abc_g1, + }) + }) + } +} + +impl AllocVar, E::Fq> for ProofVar +where + E: PairingEngine, + P: PairingVar, +{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + f().and_then(|proof| { + let Proof { a, b, c } = proof.borrow().clone(); + let a = P::G1Var::new_variable(ark_relations::ns!(cs, "a"), || Ok(a), mode)?; + let b = P::G2Var::new_variable(ark_relations::ns!(cs, "b"), || Ok(b), mode)?; + let c = P::G1Var::new_variable(ark_relations::ns!(cs, "c"), || Ok(c), mode)?; + Ok(Self { a, b, c }) + }) + } +} + +impl ToBytesGadget for VerifyingKeyVar +where + E: PairingEngine, + P: PairingVar, +{ + #[inline] + #[tracing::instrument(target = "r1cs", skip(self))] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut bytes = Vec::new(); + bytes.extend_from_slice(&self.alpha_g1.to_bytes()?); + bytes.extend_from_slice(&self.beta_g2.to_bytes()?); + bytes.extend_from_slice(&self.gamma_g2.to_bytes()?); + bytes.extend_from_slice(&self.delta_g2.to_bytes()?); + for g in &self.gamma_abc_g1 { + bytes.extend_from_slice(&g.to_bytes()?); + } + Ok(bytes) + } +} + +#[cfg(test)] +mod test { + use crate::{constraints::Groth16VerifierGadget, Groth16}; + use ark_crypto_primitives::snark::constraints::SNARKGadget; + use ark_crypto_primitives::snark::{CircuitSpecificSetupSNARK, SNARK}; + use ark_ec::PairingEngine; + use ark_ff::{Field, UniformRand}; + use ark_mnt4_298::{ + constraints::PairingVar as MNT4PairingVar, Fr as MNT4Fr, MNT4_298 as MNT4PairingEngine, + }; + use ark_mnt6_298::Fr as MNT6Fr; + use ark_r1cs_std::bits::boolean::Boolean; + use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget}; + use ark_relations::{ + lc, ns, + r1cs::{ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, SynthesisError}, + }; + use ark_std::ops::MulAssign; + use ark_std::test_rng; + + #[derive(Copy, Clone)] + struct Circuit { + a: Option, + b: Option, + num_constraints: usize, + num_variables: usize, + } + + impl ConstraintSynthesizer for Circuit { + fn generate_constraints( + self, + cs: ConstraintSystemRef, + ) -> Result<(), SynthesisError> { + let a = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.new_witness_variable(|| self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.new_input_variable(|| { + let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + a.mul_assign(&b); + Ok(a) + })?; + + for _ in 0..(self.num_variables - 3) { + cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + } + + for _ in 0..self.num_constraints { + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c) + .unwrap(); + } + Ok(()) + } + } + + type TestSNARK = Groth16; + type TestSNARKGadget = Groth16VerifierGadget; + + #[test] + fn groth16_snark_test() { + let mut rng = test_rng(); + let a = MNT4Fr::rand(&mut rng); + let b = MNT4Fr::rand(&mut rng); + let mut c = a; + c.mul_assign(&b); + + let circ = Circuit { + a: Some(a.clone()), + b: Some(b.clone()), + num_constraints: 100, + num_variables: 25, + }; + + let (pk, vk) = TestSNARK::setup(circ, &mut rng).unwrap(); + + let proof = TestSNARK::prove(&pk, circ.clone(), &mut rng).unwrap(); + + assert!( + TestSNARK::verify(&vk, &vec![c], &proof).unwrap(), + "The native verification check fails." + ); + + let cs_sys = ConstraintSystem::::new(); + let cs = ConstraintSystemRef::new(cs_sys); + + let input_gadget = ::Fr, + ::Fq, + TestSNARK, + >>::InputVar::new_input(ns!(cs, "new_input"), || Ok(vec![c])) + .unwrap(); + let proof_gadget = ::Fr, + ::Fq, + TestSNARK, + >>::ProofVar::new_witness(ns!(cs, "alloc_proof"), || Ok(proof)) + .unwrap(); + let vk_gadget = ::Fr, + ::Fq, + TestSNARK, + >>::VerifyingKeyVar::new_constant(ns!(cs, "alloc_vk"), vk.clone()) + .unwrap(); + ::Fr, + ::Fq, + TestSNARK, + >>::verify(&vk_gadget, &input_gadget, &proof_gadget) + .unwrap() + .enforce_equal(&Boolean::constant(true)) + .unwrap(); + + assert!( + cs.is_satisfied().unwrap(), + "Constraints not satisfied: {}", + cs.which_is_unsatisfied().unwrap().unwrap_or_default() + ); + + let pvk = TestSNARK::process_vk(&vk).unwrap(); + let pvk_gadget = ::Fr, + ::Fq, + TestSNARK, + >>::ProcessedVerifyingKeyVar::new_constant( + ns!(cs, "alloc_pvk"), pvk.clone() + ) + .unwrap(); + TestSNARKGadget::verify_with_processed_vk(&pvk_gadget, &input_gadget, &proof_gadget) + .unwrap() + .enforce_equal(&Boolean::constant(true)) + .unwrap(); + + assert!( + cs.is_satisfied().unwrap(), + "Constraints not satisfied: {}", + cs.which_is_unsatisfied().unwrap().unwrap_or_default() + ); + } +} diff --git a/arkworks/groth16/src/data_structures.rs b/arkworks/groth16/src/data_structures.rs new file mode 100644 index 00000000..d9a24c4a --- /dev/null +++ b/arkworks/groth16/src/data_structures.rs @@ -0,0 +1,151 @@ +use ark_ec::PairingEngine; +use ark_ff::bytes::ToBytes; +use ark_serialize::*; +use ark_std::{ + io::{self, Result as IoResult}, + vec::Vec, +}; + +/// A proof in the Groth16 SNARK. +#[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Proof { + /// The `A` element in `G1`. + pub a: E::G1Affine, + /// The `B` element in `G2`. + pub b: E::G2Affine, + /// The `C` element in `G1`. + pub c: E::G1Affine, +} + +impl ToBytes for Proof { + #[inline] + fn write(&self, mut writer: W) -> io::Result<()> { + self.a.write(&mut writer)?; + self.b.write(&mut writer)?; + self.c.write(&mut writer) + } +} + +impl Default for Proof { + fn default() -> Self { + Self { + a: E::G1Affine::default(), + b: E::G2Affine::default(), + c: E::G1Affine::default(), + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// A verification key in the Groth16 SNARK. +#[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct VerifyingKey { + /// The `alpha * G`, where `G` is the generator of `E::G1`. + pub alpha_g1: E::G1Affine, + /// The `alpha * H`, where `H` is the generator of `E::G2`. + pub beta_g2: E::G2Affine, + /// The `gamma * H`, where `H` is the generator of `E::G2`. + pub gamma_g2: E::G2Affine, + /// The `delta * H`, where `H` is the generator of `E::G2`. + pub delta_g2: E::G2Affine, + /// The `gamma^{-1} * (beta * a_i + alpha * b_i + c_i) * H`, where `H` is the generator of `E::G1`. + pub gamma_abc_g1: Vec, +} + +impl ToBytes for VerifyingKey { + fn write(&self, mut writer: W) -> IoResult<()> { + self.alpha_g1.write(&mut writer)?; + self.beta_g2.write(&mut writer)?; + self.gamma_g2.write(&mut writer)?; + self.delta_g2.write(&mut writer)?; + for q in &self.gamma_abc_g1 { + q.write(&mut writer)?; + } + Ok(()) + } +} + +impl Default for VerifyingKey { + fn default() -> Self { + Self { + alpha_g1: E::G1Affine::default(), + beta_g2: E::G2Affine::default(), + gamma_g2: E::G2Affine::default(), + delta_g2: E::G2Affine::default(), + gamma_abc_g1: Vec::new(), + } + } +} + +/// Preprocessed verification key parameters that enable faster verification +/// at the expense of larger size in memory. +#[derive(Clone, Debug, PartialEq)] +pub struct PreparedVerifyingKey { + /// The unprepared verification key. + pub vk: VerifyingKey, + /// The element `e(alpha * G, beta * H)` in `E::GT`. + pub alpha_g1_beta_g2: E::Fqk, + /// The element `- gamma * H` in `E::G2`, prepared for use in pairings. + pub gamma_g2_neg_pc: E::G2Prepared, + /// The element `- delta * H` in `E::G2`, prepared for use in pairings. + pub delta_g2_neg_pc: E::G2Prepared, +} + +impl From> for VerifyingKey { + fn from(other: PreparedVerifyingKey) -> Self { + other.vk + } +} + +impl From> for PreparedVerifyingKey { + fn from(other: VerifyingKey) -> Self { + crate::prepare_verifying_key(&other) + } +} + +impl Default for PreparedVerifyingKey { + fn default() -> Self { + Self { + vk: VerifyingKey::default(), + alpha_g1_beta_g2: E::Fqk::default(), + gamma_g2_neg_pc: E::G2Prepared::default(), + delta_g2_neg_pc: E::G2Prepared::default(), + } + } +} + +impl ToBytes for PreparedVerifyingKey { + fn write(&self, mut writer: W) -> IoResult<()> { + self.vk.write(&mut writer)?; + self.alpha_g1_beta_g2.write(&mut writer)?; + self.gamma_g2_neg_pc.write(&mut writer)?; + self.delta_g2_neg_pc.write(&mut writer)?; + Ok(()) + } +} + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +/// The prover key for for the Groth16 zkSNARK. +#[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct ProvingKey { + /// The underlying verification key. + pub vk: VerifyingKey, + /// The element `beta * G` in `E::G1`. + pub beta_g1: E::G1Affine, + /// The element `delta * G` in `E::G1`. + pub delta_g1: E::G1Affine, + /// The elements `a_i * G` in `E::G1`. + pub a_query: Vec, + /// The elements `b_i * G` in `E::G1`. + pub b_g1_query: Vec, + /// The elements `b_i * H` in `E::G2`. + pub b_g2_query: Vec, + /// The elements `h_i * G` in `E::G1`. + pub h_query: Vec, + /// The elements `l_i * G` in `E::G1`. + pub l_query: Vec, +} diff --git a/arkworks/groth16/src/generator.rs b/arkworks/groth16/src/generator.rs new file mode 100644 index 00000000..aaabb754 --- /dev/null +++ b/arkworks/groth16/src/generator.rs @@ -0,0 +1,232 @@ +use crate::{r1cs_to_qap::R1CStoQAP, ProvingKey, Vec, VerifyingKey}; +use ark_ec::{msm::FixedBaseMSM, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, PrimeField, UniformRand, Zero}; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use ark_relations::r1cs::{ + ConstraintSynthesizer, ConstraintSystem, OptimizationGoal, Result as R1CSResult, + SynthesisError, SynthesisMode, +}; +use ark_std::rand::Rng; +use ark_std::{cfg_into_iter, cfg_iter}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Generates a random common reference string for +/// a circuit. +#[inline] +pub fn generate_random_parameters(circuit: C, rng: &mut R) -> R1CSResult> +where + E: PairingEngine, + C: ConstraintSynthesizer, + R: Rng, +{ + let alpha = E::Fr::rand(rng); + let beta = E::Fr::rand(rng); + let gamma = E::Fr::rand(rng); + let delta = E::Fr::rand(rng); + + let g1_generator = E::G1Projective::rand(rng); + let g2_generator = E::G2Projective::rand(rng); + + generate_parameters::( + circuit, + alpha, + beta, + gamma, + delta, + g1_generator, + g2_generator, + rng, + ) +} + +/// Create parameters for a circuit, given some toxic waste and group generators +pub fn generate_parameters( + circuit: C, + alpha: E::Fr, + beta: E::Fr, + gamma: E::Fr, + delta: E::Fr, + g1_generator: E::G1Projective, + g2_generator: E::G2Projective, + rng: &mut R, +) -> R1CSResult> +where + E: PairingEngine, + C: ConstraintSynthesizer, + R: Rng, +{ + type D = GeneralEvaluationDomain; + + let setup_time = start_timer!(|| "Groth16::Generator"); + let cs = ConstraintSystem::new_ref(); + cs.set_optimization_goal(OptimizationGoal::Constraints); + cs.set_mode(SynthesisMode::Setup); + + // Synthesize the circuit. + let synthesis_time = start_timer!(|| "Constraint synthesis"); + circuit.generate_constraints(cs.clone())?; + end_timer!(synthesis_time); + + let lc_time = start_timer!(|| "Inlining LCs"); + cs.finalize(); + end_timer!(lc_time); + + /////////////////////////////////////////////////////////////////////////// + let domain_time = start_timer!(|| "Constructing evaluation domain"); + + let domain_size = cs.num_constraints() + cs.num_instance_variables(); + let domain = D::new(domain_size).ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + let t = domain.sample_element_outside_domain(rng); + + end_timer!(domain_time); + /////////////////////////////////////////////////////////////////////////// + + let reduction_time = start_timer!(|| "R1CS to QAP Instance Map with Evaluation"); + let num_instance_variables = cs.num_instance_variables(); + let (a, b, c, zt, qap_num_variables, m_raw) = + R1CStoQAP::instance_map_with_evaluation::>(cs, &t)?; + end_timer!(reduction_time); + + // Compute query densities + let non_zero_a: usize = cfg_into_iter!(0..qap_num_variables) + .map(|i| usize::from(!a[i].is_zero())) + .sum(); + + let non_zero_b: usize = cfg_into_iter!(0..qap_num_variables) + .map(|i| usize::from(!b[i].is_zero())) + .sum(); + + let scalar_bits = E::Fr::size_in_bits(); + + let gamma_inverse = gamma.inverse().ok_or(SynthesisError::UnexpectedIdentity)?; + let delta_inverse = delta.inverse().ok_or(SynthesisError::UnexpectedIdentity)?; + + let gamma_abc = cfg_iter!(a[..num_instance_variables]) + .zip(&b[..num_instance_variables]) + .zip(&c[..num_instance_variables]) + .map(|((a, b), c)| (beta * a + &(alpha * b) + c) * &gamma_inverse) + .collect::>(); + + let l = cfg_iter!(a) + .zip(&b) + .zip(&c) + .map(|((a, b), c)| (beta * a + &(alpha * b) + c) * &delta_inverse) + .collect::>(); + + drop(c); + + // Compute B window table + let g2_time = start_timer!(|| "Compute G2 table"); + let g2_window = FixedBaseMSM::get_mul_window_size(non_zero_b); + let g2_table = + FixedBaseMSM::get_window_table::(scalar_bits, g2_window, g2_generator); + end_timer!(g2_time); + + // Compute the B-query in G2 + let b_g2_time = start_timer!(|| "Calculate B G2"); + let b_g2_query = + FixedBaseMSM::multi_scalar_mul::(scalar_bits, g2_window, &g2_table, &b); + drop(g2_table); + end_timer!(b_g2_time); + + // Compute G window table + let g1_window_time = start_timer!(|| "Compute G1 window table"); + let g1_window = + FixedBaseMSM::get_mul_window_size(non_zero_a + non_zero_b + qap_num_variables + m_raw + 1); + let g1_table = + FixedBaseMSM::get_window_table::(scalar_bits, g1_window, g1_generator); + end_timer!(g1_window_time); + + // Generate the R1CS proving key + let proving_key_time = start_timer!(|| "Generate the R1CS proving key"); + + let alpha_g1 = g1_generator.mul(&alpha.into_repr()); + let beta_g1 = g1_generator.mul(&beta.into_repr()); + let beta_g2 = g2_generator.mul(&beta.into_repr()); + let delta_g1 = g1_generator.mul(&delta.into_repr()); + let delta_g2 = g2_generator.mul(&delta.into_repr()); + + // Compute the A-query + let a_time = start_timer!(|| "Calculate A"); + let a_query = + FixedBaseMSM::multi_scalar_mul::(scalar_bits, g1_window, &g1_table, &a); + drop(a); + end_timer!(a_time); + + // Compute the B-query in G1 + let b_g1_time = start_timer!(|| "Calculate B G1"); + let b_g1_query = + FixedBaseMSM::multi_scalar_mul::(scalar_bits, g1_window, &g1_table, &b); + drop(b); + end_timer!(b_g1_time); + + // Compute the H-query + let h_time = start_timer!(|| "Calculate H"); + let h_query = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + g1_window, + &g1_table, + &cfg_into_iter!(0..m_raw - 1) + .map(|i| zt * &delta_inverse * &t.pow([i as u64])) + .collect::>(), + ); + + end_timer!(h_time); + + // Compute the L-query + let l_time = start_timer!(|| "Calculate L"); + let l_query = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + g1_window, + &g1_table, + &l[num_instance_variables..], + ); + drop(l); + end_timer!(l_time); + + end_timer!(proving_key_time); + + // Generate R1CS verification key + let verifying_key_time = start_timer!(|| "Generate the R1CS verification key"); + let gamma_g2 = g2_generator.mul(&gamma.into_repr()); + let gamma_abc_g1 = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + g1_window, + &g1_table, + &gamma_abc, + ); + + drop(g1_table); + + end_timer!(verifying_key_time); + + let vk = VerifyingKey:: { + alpha_g1: alpha_g1.into_affine(), + beta_g2: beta_g2.into_affine(), + gamma_g2: gamma_g2.into_affine(), + delta_g2: delta_g2.into_affine(), + gamma_abc_g1: E::G1Projective::batch_normalization_into_affine(&gamma_abc_g1), + }; + + let batch_normalization_time = start_timer!(|| "Convert proving key elements to affine"); + let a_query = E::G1Projective::batch_normalization_into_affine(&a_query); + let b_g1_query = E::G1Projective::batch_normalization_into_affine(&b_g1_query); + let b_g2_query = E::G2Projective::batch_normalization_into_affine(&b_g2_query); + let h_query = E::G1Projective::batch_normalization_into_affine(&h_query); + let l_query = E::G1Projective::batch_normalization_into_affine(&l_query); + end_timer!(batch_normalization_time); + end_timer!(setup_time); + + Ok(ProvingKey { + vk, + beta_g1: beta_g1.into_affine(), + delta_g1: delta_g1.into_affine(), + a_query, + b_g1_query, + b_g2_query, + h_query, + l_query, + }) +} diff --git a/arkworks/groth16/src/lib.rs b/arkworks/groth16/src/lib.rs new file mode 100644 index 00000000..def723a5 --- /dev/null +++ b/arkworks/groth16/src/lib.rs @@ -0,0 +1,98 @@ +//! An implementation of the [`Groth16`] zkSNARK. +//! +//! [`Groth16`]: https://eprint.iacr.org/2016/260.pdf +#![cfg_attr(not(feature = "std"), no_std)] +#![warn( + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms, + missing_docs +)] +#![allow(clippy::many_single_char_names, clippy::op_ref)] +#![forbid(unsafe_code)] + +#[macro_use] +extern crate ark_std; + +#[cfg(feature = "r1cs")] +#[macro_use] +extern crate derivative; + +/// Reduce an R1CS instance to a *Quadratic Arithmetic Program* instance. +pub(crate) mod r1cs_to_qap; + +/// Data structures used by the prover, verifier, and generator. +pub mod data_structures; + +/// Generate public parameters for the Groth16 zkSNARK construction. +pub mod generator; + +/// Create proofs for the Groth16 zkSNARK construction. +pub mod prover; + +/// Verify proofs for the Groth16 zkSNARK construction. +pub mod verifier; + +/// Constraints for the Groth16 verifier. +#[cfg(feature = "r1cs")] +pub mod constraints; + +#[cfg(test)] +mod test; + +pub use self::data_structures::*; +pub use self::{generator::*, prover::*, verifier::*}; + +use ark_crypto_primitives::snark::*; +use ark_ec::PairingEngine; +use ark_relations::r1cs::{ConstraintSynthesizer, SynthesisError}; +use ark_std::rand::RngCore; +use ark_std::{marker::PhantomData, vec::Vec}; + +/// The SNARK of [[Groth16]](https://eprint.iacr.org/2016/260.pdf). +pub struct Groth16 { + e_phantom: PhantomData, +} + +impl SNARK for Groth16 { + type ProvingKey = ProvingKey; + type VerifyingKey = VerifyingKey; + type Proof = Proof; + type ProcessedVerifyingKey = PreparedVerifyingKey; + type Error = SynthesisError; + + fn circuit_specific_setup, R: RngCore>( + circuit: C, + rng: &mut R, + ) -> Result<(Self::ProvingKey, Self::VerifyingKey), Self::Error> { + let pk = generate_random_parameters::(circuit, rng)?; + let vk = pk.vk.clone(); + + Ok((pk, vk)) + } + + fn prove, R: RngCore>( + pk: &Self::ProvingKey, + circuit: C, + rng: &mut R, + ) -> Result { + create_random_proof::(circuit, pk, rng) + } + + fn process_vk( + circuit_vk: &Self::VerifyingKey, + ) -> Result { + Ok(prepare_verifying_key(circuit_vk)) + } + + fn verify_with_processed_vk( + circuit_pvk: &Self::ProcessedVerifyingKey, + x: &[E::Fr], + proof: &Self::Proof, + ) -> Result { + Ok(verify_proof(&circuit_pvk, proof, &x)?) + } +} + +impl CircuitSpecificSetupSNARK for Groth16 {} diff --git a/arkworks/groth16/src/prover.rs b/arkworks/groth16/src/prover.rs new file mode 100644 index 00000000..513a42f2 --- /dev/null +++ b/arkworks/groth16/src/prover.rs @@ -0,0 +1,204 @@ +use crate::{r1cs_to_qap::R1CStoQAP, Proof, ProvingKey, VerifyingKey}; +use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, PrimeField, UniformRand, Zero}; +use ark_poly::GeneralEvaluationDomain; +use ark_relations::r1cs::{ + ConstraintSynthesizer, ConstraintSystem, OptimizationGoal, Result as R1CSResult, +}; +use ark_std::{cfg_into_iter, cfg_iter, rand::Rng, vec::Vec}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Create a Groth16 proof that is zero-knowledge. +/// This method samples randomness for zero knowledges via `rng`. +#[inline] +pub fn create_random_proof( + circuit: C, + pk: &ProvingKey, + rng: &mut R, +) -> R1CSResult> +where + E: PairingEngine, + C: ConstraintSynthesizer, + R: Rng, +{ + let r = E::Fr::rand(rng); + let s = E::Fr::rand(rng); + + create_proof::(circuit, pk, r, s) +} + +/// Create a Groth16 proof that is *not* zero-knowledge. +#[inline] +pub fn create_proof_no_zk(circuit: C, pk: &ProvingKey) -> R1CSResult> +where + E: PairingEngine, + C: ConstraintSynthesizer, +{ + create_proof::(circuit, pk, E::Fr::zero(), E::Fr::zero()) +} + +/// Create a Groth16 proof using randomness `r` and `s`. +#[inline] +pub fn create_proof( + circuit: C, + pk: &ProvingKey, + r: E::Fr, + s: E::Fr, +) -> R1CSResult> +where + E: PairingEngine, + C: ConstraintSynthesizer, +{ + type D = GeneralEvaluationDomain; + + let prover_time = start_timer!(|| "Groth16::Prover"); + let cs = ConstraintSystem::new_ref(); + + // Set the optimization goal + cs.set_optimization_goal(OptimizationGoal::Constraints); + + // Synthesize the circuit. + let synthesis_time = start_timer!(|| "Constraint synthesis"); + circuit.generate_constraints(cs.clone())?; + debug_assert!(cs.is_satisfied().unwrap()); + end_timer!(synthesis_time); + + let lc_time = start_timer!(|| "Inlining LCs"); + cs.finalize(); + end_timer!(lc_time); + + let witness_map_time = start_timer!(|| "R1CS to QAP witness map"); + let h = R1CStoQAP::witness_map::>(cs.clone())?; + end_timer!(witness_map_time); + let h_assignment = cfg_into_iter!(h).map(|s| s.into_repr()).collect::>(); + let c_acc_time = start_timer!(|| "Compute C"); + + let h_acc = VariableBaseMSM::multi_scalar_mul(&pk.h_query, &h_assignment); + drop(h_assignment); + // Compute C + let prover = cs.borrow().unwrap(); + let aux_assignment = cfg_iter!(prover.witness_assignment) + .map(|s| s.into_repr()) + .collect::>(); + + let l_aux_acc = VariableBaseMSM::multi_scalar_mul(&pk.l_query, &aux_assignment); + + let r_s_delta_g1 = pk + .delta_g1 + .into_projective() + .mul(&r.into_repr()) + .mul(&s.into_repr()); + + end_timer!(c_acc_time); + + let input_assignment = prover.instance_assignment[1..] + .iter() + .map(|s| s.into_repr()) + .collect::>(); + + drop(prover); + drop(cs); + + let assignment = [&input_assignment[..], &aux_assignment[..]].concat(); + drop(aux_assignment); + + // Compute A + let a_acc_time = start_timer!(|| "Compute A"); + let r_g1 = pk.delta_g1.scalar_mul(r); + + let g_a = calculate_coeff(r_g1, &pk.a_query, pk.vk.alpha_g1, &assignment); + + let s_g_a = g_a.mul(&s.into_repr()); + end_timer!(a_acc_time); + + // Compute B in G1 if needed + let g1_b = if !r.is_zero() { + let b_g1_acc_time = start_timer!(|| "Compute B in G1"); + let s_g1 = pk.delta_g1.scalar_mul(s); + let g1_b = calculate_coeff(s_g1, &pk.b_g1_query, pk.beta_g1, &assignment); + + end_timer!(b_g1_acc_time); + + g1_b + } else { + E::G1Projective::zero() + }; + + // Compute B in G2 + let b_g2_acc_time = start_timer!(|| "Compute B in G2"); + let s_g2 = pk.vk.delta_g2.scalar_mul(s); + let g2_b = calculate_coeff(s_g2, &pk.b_g2_query, pk.vk.beta_g2, &assignment); + let r_g1_b = g1_b.mul(&r.into_repr()); + drop(assignment); + + end_timer!(b_g2_acc_time); + + let c_time = start_timer!(|| "Finish C"); + let mut g_c = s_g_a; + g_c += &r_g1_b; + g_c -= &r_s_delta_g1; + g_c += &l_aux_acc; + g_c += &h_acc; + end_timer!(c_time); + + end_timer!(prover_time); + + Ok(Proof { + a: g_a.into_affine(), + b: g2_b.into_affine(), + c: g_c.into_affine(), + }) +} + +/// Given a Groth16 proof, returns a fresh proof of the same statement. For a +/// proof π of a statement S, the output of the non-deterministic procedure +/// `rerandomize_proof(π)` is statistically indistinguishable from a fresh +/// honest proof of S. For more info, see theorem 3 of [\[BKSV20\]](https://eprint.iacr.org/2020/811) +pub fn rerandomize_proof(rng: &mut R, vk: &VerifyingKey, proof: &Proof) -> Proof +where + E: PairingEngine, + R: Rng, +{ + // These are our rerandomization factors. They must be nonzero and uniformly + // sampled. + let (mut r1, mut r2) = (E::Fr::zero(), E::Fr::zero()); + while r1.is_zero() || r2.is_zero() { + r1 = E::Fr::rand(rng); + r2 = E::Fr::rand(rng); + } + + // See figure 1 in the paper referenced above: + // A' = (1/r₁)A + // B' = r₁B + r₁r₂(δG₂) + // C' = C + r₂A + + // We can unwrap() this because r₁ is guaranteed to be nonzero + let new_a = proof.a.scalar_mul(r1.inverse().unwrap()); + let new_b = proof.b.scalar_mul(r1) + &vk.delta_g2.scalar_mul(r1 * &r2); + let new_c = proof.c + proof.a.scalar_mul(r2).into_affine(); + + Proof { + a: new_a.into_affine(), + b: new_b.into_affine(), + c: new_c, + } +} + +fn calculate_coeff( + initial: G::Projective, + query: &[G], + vk_param: G, + assignment: &[::BigInt], +) -> G::Projective { + let el = query[0]; + let acc = VariableBaseMSM::multi_scalar_mul(&query[1..], assignment); + + let mut res = initial; + res.add_assign_mixed(&el); + res += &acc; + res.add_assign_mixed(&vk_param); + + res +} diff --git a/arkworks/groth16/src/r1cs_to_qap.rs b/arkworks/groth16/src/r1cs_to_qap.rs new file mode 100644 index 00000000..9ff4e82f --- /dev/null +++ b/arkworks/groth16/src/r1cs_to_qap.rs @@ -0,0 +1,161 @@ +use ark_ff::{One, PrimeField, Zero}; +use ark_poly::EvaluationDomain; +use ark_std::{cfg_iter, cfg_iter_mut, vec}; + +use crate::Vec; +use ark_relations::r1cs::{ConstraintSystemRef, Result as R1CSResult, SynthesisError}; +use core::ops::{AddAssign, Deref}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +#[inline] +fn evaluate_constraint<'a, LHS, RHS, R>(terms: &'a [(LHS, usize)], assignment: &'a [RHS]) -> R +where + LHS: One + Send + Sync + PartialEq, + RHS: Send + Sync + core::ops::Mul<&'a LHS, Output = RHS> + Copy, + R: Zero + Send + Sync + AddAssign + core::iter::Sum, +{ + // Need to wrap in a closure when using Rayon + #[cfg(feature = "parallel")] + let zero = || R::zero(); + #[cfg(not(feature = "parallel"))] + let zero = R::zero(); + + let res = cfg_iter!(terms).fold(zero, |mut sum, (coeff, index)| { + let val = &assignment[*index]; + + if coeff.is_one() { + sum += *val; + } else { + sum += val.mul(coeff); + } + + sum + }); + + // Need to explicitly call `.sum()` when using Rayon + #[cfg(feature = "parallel")] + return res.sum(); + #[cfg(not(feature = "parallel"))] + return res; +} + +pub(crate) struct R1CStoQAP; + +impl R1CStoQAP { + #[inline] + #[allow(clippy::type_complexity)] + pub(crate) fn instance_map_with_evaluation>( + cs: ConstraintSystemRef, + t: &F, + ) -> R1CSResult<(Vec, Vec, Vec, F, usize, usize)> { + let matrices = cs.to_matrices().unwrap(); + let domain_size = cs.num_constraints() + cs.num_instance_variables(); + let domain = D::new(domain_size).ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + let domain_size = domain.size(); + + let zt = domain.evaluate_vanishing_polynomial(*t); + + // Evaluate all Lagrange polynomials + let coefficients_time = start_timer!(|| "Evaluate Lagrange coefficients"); + let u = domain.evaluate_all_lagrange_coefficients(*t); + end_timer!(coefficients_time); + + let qap_num_variables = (cs.num_instance_variables() - 1) + cs.num_witness_variables(); + + let mut a = vec![F::zero(); qap_num_variables + 1]; + let mut b = vec![F::zero(); qap_num_variables + 1]; + let mut c = vec![F::zero(); qap_num_variables + 1]; + + { + let start = 0; + let end = cs.num_instance_variables(); + let num_constraints = cs.num_constraints(); + a[start..end].copy_from_slice(&u[(start + num_constraints)..(end + num_constraints)]); + } + + for (i, u_i) in u.iter().enumerate().take(cs.num_constraints()) { + for &(ref coeff, index) in &matrices.a[i] { + a[index] += &(*u_i * coeff); + } + for &(ref coeff, index) in &matrices.b[i] { + b[index] += &(*u_i * coeff); + } + for &(ref coeff, index) in &matrices.c[i] { + c[index] += &(*u_i * coeff); + } + } + + Ok((a, b, c, zt, qap_num_variables, domain_size)) + } + + #[inline] + pub(crate) fn witness_map>( + prover: ConstraintSystemRef, + ) -> R1CSResult> { + let matrices = prover.to_matrices().unwrap(); + let zero = F::zero(); + let num_inputs = prover.num_instance_variables(); + let num_constraints = prover.num_constraints(); + let cs = prover.borrow().unwrap(); + let prover = cs.deref(); + + let full_assignment = [ + prover.instance_assignment.as_slice(), + prover.witness_assignment.as_slice(), + ] + .concat(); + + let domain = + D::new(num_constraints + num_inputs).ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + let domain_size = domain.size(); + + let mut a = vec![zero; domain_size]; + let mut b = vec![zero; domain_size]; + + cfg_iter_mut!(a[..num_constraints]) + .zip(cfg_iter_mut!(b[..num_constraints])) + .zip(cfg_iter!(&matrices.a)) + .zip(cfg_iter!(&matrices.b)) + .for_each(|(((a, b), at_i), bt_i)| { + *a = evaluate_constraint(&at_i, &full_assignment); + *b = evaluate_constraint(&bt_i, &full_assignment); + }); + + { + let start = num_constraints; + let end = start + num_inputs; + a[start..end].clone_from_slice(&full_assignment[..num_inputs]); + } + + domain.ifft_in_place(&mut a); + domain.ifft_in_place(&mut b); + + domain.coset_fft_in_place(&mut a); + domain.coset_fft_in_place(&mut b); + + let mut ab = domain.mul_polynomials_in_evaluation_domain(&a, &b); + drop(a); + drop(b); + + let mut c = vec![zero; domain_size]; + cfg_iter_mut!(c[..prover.num_constraints]) + .enumerate() + .for_each(|(i, c)| { + *c = evaluate_constraint(&matrices.c[i], &full_assignment); + }); + + domain.ifft_in_place(&mut c); + domain.coset_fft_in_place(&mut c); + + cfg_iter_mut!(ab) + .zip(c) + .for_each(|(ab_i, c_i)| *ab_i -= &c_i); + + domain.divide_by_vanishing_poly_on_coset_in_place(&mut ab); + domain.coset_ifft_in_place(&mut ab); + + Ok(ab) + } +} diff --git a/arkworks/groth16/src/test.rs b/arkworks/groth16/src/test.rs new file mode 100644 index 00000000..649d6022 --- /dev/null +++ b/arkworks/groth16/src/test.rs @@ -0,0 +1,157 @@ +use crate::{ + create_random_proof, generate_random_parameters, prepare_verifying_key, rerandomize_proof, + verify_proof, +}; +use ark_ec::PairingEngine; +use ark_ff::UniformRand; +use ark_std::test_rng; + +use core::ops::MulAssign; + +use ark_ff::{Field, Zero}; +use ark_relations::{ + lc, + r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}, +}; + +struct MySillyCircuit { + a: Option, + b: Option, +} + +impl ConstraintSynthesizer for MySillyCircuit { + fn generate_constraints( + self, + cs: ConstraintSystemRef, + ) -> Result<(), SynthesisError> { + let a = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.new_witness_variable(|| self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.new_input_variable(|| { + let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + a.mul_assign(&b); + Ok(a) + })?; + + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + + Ok(()) + } +} + +fn test_prove_and_verify(n_iters: usize) +where + E: PairingEngine, +{ + let rng = &mut test_rng(); + + let params = + generate_random_parameters::(MySillyCircuit { a: None, b: None }, rng).unwrap(); + + let pvk = prepare_verifying_key::(¶ms.vk); + + for _ in 0..n_iters { + let a = E::Fr::rand(rng); + let b = E::Fr::rand(rng); + let mut c = a; + c.mul_assign(&b); + + let proof = create_random_proof( + MySillyCircuit { + a: Some(a), + b: Some(b), + }, + ¶ms, + rng, + ) + .unwrap(); + + assert!(verify_proof(&pvk, &proof, &[c]).unwrap()); + assert!(!verify_proof(&pvk, &proof, &[a]).unwrap()); + } +} + +fn test_rerandomize() +where + E: PairingEngine, +{ + // First create an arbitrary Groth16 in the normal way + + let rng = &mut test_rng(); + + let params = + generate_random_parameters::(MySillyCircuit { a: None, b: None }, rng).unwrap(); + + let pvk = prepare_verifying_key::(¶ms.vk); + + let a = E::Fr::rand(rng); + let b = E::Fr::rand(rng); + let c = a * &b; + + // Create the initial proof + let proof1 = create_random_proof( + MySillyCircuit { + a: Some(a), + b: Some(b), + }, + ¶ms, + rng, + ) + .unwrap(); + + // Rerandomize the proof, then rerandomize that + let proof2 = rerandomize_proof(rng, ¶ms.vk, &proof1); + let proof3 = rerandomize_proof(rng, ¶ms.vk, &proof2); + + // Check correctness: a rerandomized proof validates when the original validates + assert!(verify_proof(&pvk, &proof1, &[c]).unwrap()); + assert!(verify_proof(&pvk, &proof2, &[c]).unwrap()); + assert!(verify_proof(&pvk, &proof3, &[c]).unwrap()); + + // Check soundness: a rerandomized proof fails to validate when the original fails to validate + assert!(!verify_proof(&pvk, &proof1, &[E::Fr::zero()]).unwrap()); + assert!(!verify_proof(&pvk, &proof2, &[E::Fr::zero()]).unwrap()); + assert!(!verify_proof(&pvk, &proof3, &[E::Fr::zero()]).unwrap()); + + // Check that the proofs are not equal as group elements + assert!(proof1 != proof2); + assert!(proof1 != proof3); + assert!(proof2 != proof3); +} + +mod bls12_377 { + use super::{test_prove_and_verify, test_rerandomize}; + use ark_bls12_377::Bls12_377; + + #[test] + fn prove_and_verify() { + test_prove_and_verify::(100); + } + + #[test] + fn rerandomize() { + test_rerandomize::(); + } +} + +mod cp6_782 { + use super::{test_prove_and_verify, test_rerandomize}; + + use ark_cp6_782::CP6_782; + + #[test] + fn prove_and_verify() { + test_prove_and_verify::(1); + } + + #[test] + fn rerandomize() { + test_rerandomize::(); + } +} diff --git a/arkworks/groth16/src/verifier.rs b/arkworks/groth16/src/verifier.rs new file mode 100644 index 00000000..3c74b225 --- /dev/null +++ b/arkworks/groth16/src/verifier.rs @@ -0,0 +1,72 @@ +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::PrimeField; + +use super::{PreparedVerifyingKey, Proof, VerifyingKey}; + +use ark_relations::r1cs::{Result as R1CSResult, SynthesisError}; + +use core::ops::{AddAssign, Neg}; + +/// Prepare the verifying key `vk` for use in proof verification. +pub fn prepare_verifying_key(vk: &VerifyingKey) -> PreparedVerifyingKey { + PreparedVerifyingKey { + vk: vk.clone(), + alpha_g1_beta_g2: E::pairing(vk.alpha_g1, vk.beta_g2), + gamma_g2_neg_pc: vk.gamma_g2.neg().into(), + delta_g2_neg_pc: vk.delta_g2.neg().into(), + } +} + +/// Prepare proof inputs for use with [`verify_proof_with_prepared_inputs`], wrt the prepared +/// verification key `pvk` and instance public inputs. +pub fn prepare_inputs( + pvk: &PreparedVerifyingKey, + public_inputs: &[E::Fr], +) -> R1CSResult { + if (public_inputs.len() + 1) != pvk.vk.gamma_abc_g1.len() { + return Err(SynthesisError::MalformedVerifyingKey); + } + + let mut g_ic = pvk.vk.gamma_abc_g1[0].into_projective(); + for (i, b) in public_inputs.iter().zip(pvk.vk.gamma_abc_g1.iter().skip(1)) { + g_ic.add_assign(&b.mul(i.into_repr())); + } + + Ok(g_ic) +} + +/// Verify a Groth16 proof `proof` against the prepared verification key `pvk` and prepared public +/// inputs. This should be preferred over [`verify_proof`] if the instance's public inputs are +/// known in advance. +pub fn verify_proof_with_prepared_inputs( + pvk: &PreparedVerifyingKey, + proof: &Proof, + prepared_inputs: &E::G1Projective, +) -> R1CSResult { + let qap = E::miller_loop( + [ + (proof.a.into(), proof.b.into()), + ( + prepared_inputs.into_affine().into(), + pvk.gamma_g2_neg_pc.clone(), + ), + (proof.c.into(), pvk.delta_g2_neg_pc.clone()), + ] + .iter(), + ); + + let test = E::final_exponentiation(&qap).ok_or(SynthesisError::UnexpectedIdentity)?; + + Ok(test == pvk.alpha_g1_beta_g2) +} + +/// Verify a Groth16 proof `proof` against the prepared verification key `pvk`, +/// with respect to the instance `public_inputs`. +pub fn verify_proof( + pvk: &PreparedVerifyingKey, + proof: &Proof, + public_inputs: &[E::Fr], +) -> R1CSResult { + let prepared_inputs = prepare_inputs(pvk, public_inputs)?; + verify_proof_with_prepared_inputs(pvk, proof, &prepared_inputs) +} diff --git a/arkworks/groth16/tests/mimc.rs b/arkworks/groth16/tests/mimc.rs new file mode 100644 index 00000000..7cfa5b2a --- /dev/null +++ b/arkworks/groth16/tests/mimc.rs @@ -0,0 +1,229 @@ +#![warn(unused)] +#![deny( + trivial_casts, + trivial_numeric_casts, + variant_size_differences, + stable_features, + non_shorthand_field_patterns, + renamed_and_removed_lints, + private_in_public, + unsafe_code +)] + +// For randomness (during paramgen and proof generation) +use ark_std::rand::Rng; + +// For benchmarking +use std::time::{Duration, Instant}; + +// Bring in some tools for using pairing-friendly curves +// We're going to use the BLS12-377 pairing-friendly elliptic curve. +use ark_bls12_377::{Bls12_377, Fr}; +use ark_ff::Field; +use ark_std::test_rng; + +// We'll use these interfaces to construct our circuit. +use ark_relations::{ + lc, ns, + r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError, Variable}, +}; + +const MIMC_ROUNDS: usize = 322; + +/// This is an implementation of MiMC, specifically a +/// variant named `LongsightF322p3` for BLS12-377. +/// See http://eprint.iacr.org/2016/492 for more +/// information about this construction. +/// +/// ``` +/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) { +/// for i from 0 up to 321 { +/// xL, xR := xR + (xL + Ci)^3, xL +/// } +/// return xL +/// } +/// ``` +fn mimc(mut xl: F, mut xr: F, constants: &[F]) -> F { + assert_eq!(constants.len(), MIMC_ROUNDS); + + for i in 0..MIMC_ROUNDS { + let mut tmp1 = xl; + tmp1.add_assign(&constants[i]); + let mut tmp2 = tmp1; + tmp2.square_in_place(); + tmp2.mul_assign(&tmp1); + tmp2.add_assign(&xr); + xr = xl; + xl = tmp2; + } + + xl +} + +/// This is our demo circuit for proving knowledge of the +/// preimage of a MiMC hash invocation. +struct MiMCDemo<'a, F: Field> { + xl: Option, + xr: Option, + constants: &'a [F], +} + +/// Our demo circuit implements this `Circuit` trait which +/// is used during paramgen and proving in order to +/// synthesize the constraint system. +impl<'a, F: Field> ConstraintSynthesizer for MiMCDemo<'a, F> { + fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + assert_eq!(self.constants.len(), MIMC_ROUNDS); + + // Allocate the first component of the preimage. + let mut xl_value = self.xl; + let mut xl = + cs.new_witness_variable(|| xl_value.ok_or(SynthesisError::AssignmentMissing))?; + + // Allocate the second component of the preimage. + let mut xr_value = self.xr; + let mut xr = + cs.new_witness_variable(|| xr_value.ok_or(SynthesisError::AssignmentMissing))?; + + for i in 0..MIMC_ROUNDS { + // xL, xR := xR + (xL + Ci)^3, xL + let ns = ns!(cs, "round"); + let cs = ns.cs(); + + // tmp = (xL + Ci)^2 + let tmp_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.square_in_place(); + e + }); + let tmp = + cs.new_witness_variable(|| tmp_value.ok_or(SynthesisError::AssignmentMissing))?; + + cs.enforce_constraint( + lc!() + xl + (self.constants[i], Variable::One), + lc!() + xl + (self.constants[i], Variable::One), + lc!() + tmp, + )?; + + // new_xL = xR + (xL + Ci)^3 + // new_xL = xR + tmp * (xL + Ci) + // new_xL - xR = tmp * (xL + Ci) + let new_xl_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.mul_assign(&tmp_value.unwrap()); + e.add_assign(&xr_value.unwrap()); + e + }); + + let new_xl = if i == (MIMC_ROUNDS - 1) { + // This is the last round, xL is our image and so + // we allocate a public input. + cs.new_input_variable(|| new_xl_value.ok_or(SynthesisError::AssignmentMissing))? + } else { + cs.new_witness_variable(|| new_xl_value.ok_or(SynthesisError::AssignmentMissing))? + }; + + cs.enforce_constraint( + lc!() + tmp, + lc!() + xl + (self.constants[i], Variable::One), + lc!() + new_xl - xr, + )?; + + // xR = xL + xr = xl; + xr_value = xl_value; + + // xL = new_xL + xl = new_xl; + xl_value = new_xl_value; + } + + Ok(()) + } +} + +#[test] +fn test_mimc_gm_17() { + // We're going to use the Groth-Maller17 proving system. + use ark_groth16::{ + create_random_proof, generate_random_parameters, prepare_verifying_key, verify_proof, + }; + + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut test_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + + println!("Creating parameters..."); + + // Create parameters for our circuit + let params = { + let c = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants, + }; + + generate_random_parameters::(c, rng).unwrap() + }; + + // Prepare the verification key (for proof verification) + let pvk = prepare_verifying_key(¶ms.vk); + + println!("Creating proofs..."); + + // Let's benchmark stuff! + const SAMPLES: u32 = 50; + let mut total_proving = Duration::new(0, 0); + let mut total_verifying = Duration::new(0, 0); + + // Just a place to put the proof data, so we can + // benchmark deserialization. + // let mut proof_vec = vec![]; + + for _ in 0..SAMPLES { + // Generate a random preimage and compute the image + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc(xl, xr, &constants); + + // proof_vec.truncate(0); + + let start = Instant::now(); + { + // Create an instance of our circuit (with the + // witness) + let c = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants, + }; + + // Create a groth16 proof with our parameters. + let proof = create_random_proof(c, ¶ms, rng).unwrap(); + assert!(verify_proof(&pvk, &proof, &[image]).unwrap()); + + // proof.write(&mut proof_vec).unwrap(); + } + + total_proving += start.elapsed(); + + let start = Instant::now(); + // let proof = Proof::read(&proof_vec[..]).unwrap(); + // Check the proof + + total_verifying += start.elapsed(); + } + let proving_avg = total_proving / SAMPLES; + let proving_avg = + proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (proving_avg.as_secs() as f64); + + let verifying_avg = total_verifying / SAMPLES; + let verifying_avg = + verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (verifying_avg.as_secs() as f64); + + println!("Average proving time: {:?} seconds", proving_avg); + println!("Average verifying time: {:?} seconds", verifying_avg); +} diff --git a/arkworks/marlin/.github/ISSUE_TEMPLATE/bug_report.md b/arkworks/marlin/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e01ca941 --- /dev/null +++ b/arkworks/marlin/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! + +--- + +∂ + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + + + diff --git a/arkworks/marlin/.github/ISSUE_TEMPLATE/feature_request.md b/arkworks/marlin/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..7d5ed5df --- /dev/null +++ b/arkworks/marlin/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Feature Request +about: Create a proposal to request a feature + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/arkworks/marlin/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/marlin/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/marlin/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/marlin/.github/workflows/ci.yml b/arkworks/marlin/.github/workflows/ci.yml new file mode 100644 index 00000000..0f4be64a --- /dev/null +++ b/arkworks/marlin/.github/workflows/ci.yml @@ -0,0 +1,105 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --all --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: --release + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - name: Build + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: build + args: --no-default-features --target aarch64-unknown-none + + - name: Check + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: check + args: --examples --no-default-features --target aarch64-unknown-none diff --git a/arkworks/marlin/.github/workflows/linkify_changelog.yml b/arkworks/marlin/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..0cbe85f1 --- /dev/null +++ b/arkworks/marlin/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push \ No newline at end of file diff --git a/arkworks/marlin/.gitignore b/arkworks/marlin/.gitignore new file mode 100644 index 00000000..be1aec0a --- /dev/null +++ b/arkworks/marlin/.gitignore @@ -0,0 +1,9 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params diff --git a/arkworks/marlin/AUTHORS b/arkworks/marlin/AUTHORS new file mode 100644 index 00000000..19afa37e --- /dev/null +++ b/arkworks/marlin/AUTHORS @@ -0,0 +1,6 @@ +Alessandro Chiesa +Yuncong Hu +Mary Maller +Pratyush Mishra +Psi Vesely +Nicholas Ward diff --git a/arkworks/marlin/CHANGELOG.md b/arkworks/marlin/CHANGELOG.md new file mode 100644 index 00000000..05c79ae2 --- /dev/null +++ b/arkworks/marlin/CHANGELOG.md @@ -0,0 +1,23 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +- Change dependency to version `0.3.0` of other arkworks-rs crates. + +## v0.2.0 + +### Features + +- [\#47](https://github.com/arkworks-rs/marlin/pull/47) Automatically pad input to be of length 2^k, so constraint writers can have a public input of any size +- [\#51](https://github.com/arkworks-rs/marlin/pull/51) Implement CanonicalSerialize for Marlin's proofs. +- [\#54](https://github.com/arkworks-rs/marlin/pull/54) Implement CanonicalSerialize for Marlin's Index and Index Verification Key. diff --git a/arkworks/marlin/Cargo.toml b/arkworks/marlin/Cargo.toml new file mode 100644 index 00000000..5ed14bed --- /dev/null +++ b/arkworks/marlin/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "ark-marlin" +version = "0.3.0" +authors = [ + "Alessandro Chiesa ", + "Mary Maller ", + "Yuncong Hu ", + "Pratyush Mishra ", + "Psi Vesely ", + "Nicholas Ward ", + "arkworks contributors" +] +description = "A library for the Marlin preprocessing zkSNARK" +repository = "https://github.com/arkworks-rs/marlin" +documentation = "https://docs.rs/ark-marlin/" +keywords = ["cryptography", "commitments", "zkSNARK"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-serialize = { path = "../algebra/serialize", version = "^0.3.0", default-features = false, features = [ "derive" ] } +ark-ff = { path = "../algebra/ff", version = "^0.3.0", default-features = false } +ark-std = { path = "../std", version = "^0.3.0", default-features = false } +ark-poly = { path = "../algebra/poly", version = "^0.3.0", default-features = false } +ark-relations = { path = "../snark/relations", version = "^0.3.0", default-features = false } +ark-poly-commit = { path = "../poly-commit", version = "^0.3.0", default-features = false } + +rand_chacha = { version = "0.3.0", default-features = false } +rayon = { version = "1", optional = true } +digest = { version = "0.9" } +derivative = { version = "2", features = ["use_core"] } + +[dev-dependencies] +blake2 = { version = "0.9", default-features = false } +ark-bls12-381 = { version = "^0.3.0", default-features = false, features = [ "curve" ] } +ark-mnt4-298 = { version = "^0.3.0", default-features = false, features = ["r1cs", "curve"] } +ark-mnt6-298 = { version = "^0.3.0", default-features = false, features = ["r1cs"] } +ark-mnt4-753 = { version = "^0.3.0", default-features = false, features = ["r1cs", "curve"] } +ark-mnt6-753 = { version = "^0.3.0", default-features = false, features = ["r1cs"] } + +[profile.release] +opt-level = 3 +lto = "thin" +incremental = true +debug = true +panic = 'abort' + +[profile.test] +opt-level = 3 +debug-assertions = true +incremental = true +debug = true + +[profile.dev] +opt-level = 0 +panic = 'abort' + +[features] +default = ["std", "parallel"] +std = [ "ark-ff/std", "ark-poly/std", "ark-relations/std", "ark-std/std", "ark-serialize/std", "ark-poly-commit/std" ] +print-trace = [ "ark-std/print-trace" ] +parallel = [ "std", "ark-ff/parallel", "ark-poly/parallel", "ark-std/parallel", "ark-poly-commit/parallel", "rayon" ] + +[[bench]] +name = "marlin-benches" +path = "benches/bench.rs" +harness = false +required-features = ["std"] diff --git a/arkworks/marlin/LICENSE-APACHE b/arkworks/marlin/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/marlin/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/marlin/LICENSE-MIT b/arkworks/marlin/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/marlin/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/marlin/README.md b/arkworks/marlin/README.md new file mode 100644 index 00000000..588e419c --- /dev/null +++ b/arkworks/marlin/README.md @@ -0,0 +1,113 @@ +

Marlin

+ +

+ + +

+ + +`marlin` is a Rust library that implements a +

+preprocessing zkSNARK for R1CS
+with
+universal and updatable SRS +

+ +This library was initially developed as part of the [Marlin paper][marlin], and is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Overview + +A zkSNARK with **preprocessing** achieves succinct verification for arbitrary computations, as opposed to only for structured computations. Informally, in an offline phase, one can preprocess the desired computation to produce a short summary of it; subsequently, in an online phase, this summary can be used to check any number of arguments relative to this computation. + +The preprocessing zkSNARKs in this library rely on a structured reference string (SRS), which contains system parameters required by the argument system to produce/validate arguments. The SRS in this library is **universal**, which means that it supports (deterministically) preprocessing any computation up to a given size bound. The SRS is also **updatable**, which means that anyone can contribute a fresh share of randomness to it, which facilitates deployments in the real world. + +The construction in this library follows the methodology introduced in the [Marlin paper][marlin], which obtains preprocessing zkSNARKs with universal and updatable SRS by combining two ingredients: + +* an **algebraic holographic proof** +* a **polynomial commitment scheme** + +The first ingredient is provided as part of this library, and is an efficient algebraic holographic proof for R1CS (a generalization of arithmetic circuit satisfiability supported by many argument systems). The second ingredient is imported from [`poly-commit`](https://github.com/arkworks-rs/poly-commit). See below for evaluation details. + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler. To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: +```bash +rustup install stable +``` + +After that, use `cargo` (the standard Rust build tool) to build the library: +```bash +git clone https://github.com/arkworks-rs/marlin.git +cd marlin +cargo build --release +``` + +This library comes with some unit and integration tests. Run these tests with: +```bash +cargo test +``` + +Lastly, this library is instrumented with profiling infrastructure that prints detailed traces of execution time. To enable this, compile with `cargo build --features print-trace`. + + +## Benchmarks + +All benchmarks below are performed over the BLS12-381 curve implemented in the [`ark-bls12-381`](https://github.com/arkworks-rs/curves/) library, with the `asm` feature activated. Benchmarks were run on a machine with an Intel Xeon 6136 CPU running at 3.0 GHz. + + +### Running time compared to Groth16 + +The graphs below compare the running time, in single-thread execution, of Marlin's indexer, prover, and verifier algorithms with the corresponding algorithms of [Groth16][groth16] (the state of the art in preprocessing zkSNARKs for R1CS with circuit-specific SRS) as implemented in [`groth16`](https://github.com/arkworks-rs/groth16). We evaluate Marlin's algorithms when instantiated with the PC scheme from [[CHMMVW20]][marlin] (denoted "M-AHP w/ PC of [[CHMMVW20]][marlin]"), and the PC scheme from [[MBKM19]][sonic] (denoted "M-AHP w/ PC of [[MBKM19]][sonic]"). + +

+Indexer +Prover +

+

+Verifier +

+ +### Multi-threaded performance + +The following graphs compare the running time of Marlin's prover when instantiated with the PC scheme from [[CHMMVW20]][marlin] (left) and the PC scheme from [[MBKM19]][sonic] (right) when executed with a different number of threads. + +

+Multi-threaded scaling of Marlin AHP with the PC scheme from [CHMMVW20] +Multi-threaded scaling of Marlin AHP with the PC scheme from [MBKM19] +

+ +### Proof size + +We compare the proof size of Marlin with that of [Groth16][groth16]. We instantiate the Marlin SNARK with the PC scheme from [[CHMMVW20]][marlin], and the PC scheme from [[MBKM19]][sonic]. + +| Scheme | Proof size in bytes | +|:------------------------------------------:|:---------------------:| +| Marlin AHP with PC of [[CHMMVW20]][marlin] | 880 | +| Marlin AHP with PC of [[MBKM19]][sonic] | 784 | +| [\[Groth16\]][groth16] | 192 | + + +## License + +This library is licensed under either of the following licenses, at your discretion. + + * [Apache License Version 2.0](LICENSE-APACHE) + * [MIT License](LICENSE-MIT) + +Unless you explicitly state otherwise, any contribution that you submit to this library shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +[marlin]: https://ia.cr/2019/1047 +[sonic]: https://ia.cr/2019/099 +[groth16]: https://ia.cr/2016/260 + +## Reference paper + +[Marlin: Preprocessing zkSNARKs with Universal and Updatable SRS][marlin] +Alessandro Chiesa, Yuncong Hu, Mary Maller, [Pratyush Mishra](https://www.github.com/pratyush), [Psi Vesely](https://github.com/psivesely), [Nicholas Ward](https://www.github.com/npwardberkeley) +EUROCRYPT 2020 + +## Acknowledgements + +This work was supported by: an Engineering and Physical Sciences Research Council grant; a Google Faculty Award; the RISELab at UC Berkeley; and donations from the Ethereum Foundation and the Interchain Foundation. diff --git a/arkworks/marlin/benches/bench.rs b/arkworks/marlin/benches/bench.rs new file mode 100644 index 00000000..f0da95c0 --- /dev/null +++ b/arkworks/marlin/benches/bench.rs @@ -0,0 +1,179 @@ +// For benchmark, run: +// RAYON_NUM_THREADS=N cargo bench --no-default-features --features "std parallel" -- --nocapture +// where N is the number of threads you want to use (N = 1 for single-thread). + +use ark_bls12_381::{Bls12_381, Fr as BlsFr}; +use ark_ff::PrimeField; +use ark_marlin::Marlin; +use ark_mnt4_298::{Fr as MNT4Fr, MNT4_298}; +use ark_mnt4_753::{Fr as MNT4BigFr, MNT4_753}; +use ark_mnt6_298::{Fr as MNT6Fr, MNT6_298}; +use ark_mnt6_753::{Fr as MNT6BigFr, MNT6_753}; +use ark_poly::univariate::DensePolynomial; +use ark_poly_commit::marlin_pc::MarlinKZG10; +use ark_relations::{ + lc, + r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}, +}; +use ark_std::{ops::Mul, UniformRand}; +use blake2::Blake2s; + +const NUM_PROVE_REPEATITIONS: usize = 10; +const NUM_VERIFY_REPEATITIONS: usize = 50; + +#[derive(Copy)] +struct DummyCircuit { + pub a: Option, + pub b: Option, + pub num_variables: usize, + pub num_constraints: usize, +} + +impl Clone for DummyCircuit { + fn clone(&self) -> Self { + DummyCircuit { + a: self.a.clone(), + b: self.b.clone(), + num_variables: self.num_variables.clone(), + num_constraints: self.num_constraints.clone(), + } + } +} + +impl ConstraintSynthesizer for DummyCircuit { + fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + let a = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.new_witness_variable(|| self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.new_input_variable(|| { + let a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(a * b) + })?; + + for _ in 0..(self.num_variables - 3) { + let _ = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + } + + for _ in 0..self.num_constraints - 1 { + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + } + + cs.enforce_constraint(lc!(), lc!(), lc!())?; + + Ok(()) + } +} + +macro_rules! marlin_prove_bench { + ($bench_name:ident, $bench_field:ty, $bench_pairing_engine:ty) => { + let rng = &mut ark_std::test_rng(); + let c = DummyCircuit::<$bench_field> { + a: Some(<$bench_field>::rand(rng)), + b: Some(<$bench_field>::rand(rng)), + num_variables: 10, + num_constraints: 65536, + }; + + let srs = Marlin::< + $bench_field, + MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, + Blake2s, + >::universal_setup(65536, 65536, 65536, rng) + .unwrap(); + let (pk, _) = Marlin::< + $bench_field, + MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, + Blake2s, + >::index(&srs, c) + .unwrap(); + + let start = ark_std::time::Instant::now(); + + for _ in 0..NUM_PROVE_REPEATITIONS { + let _ = Marlin::< + $bench_field, + MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, + Blake2s, + >::prove(&pk, c.clone(), rng) + .unwrap(); + } + + println!( + "per-constraint proving time for {}: {} ns/constraint", + stringify!($bench_pairing_engine), + start.elapsed().as_nanos() / NUM_PROVE_REPEATITIONS as u128 / 65536u128 + ); + }; +} + +macro_rules! marlin_verify_bench { + ($bench_name:ident, $bench_field:ty, $bench_pairing_engine:ty) => { + let rng = &mut ark_std::test_rng(); + let c = DummyCircuit::<$bench_field> { + a: Some(<$bench_field>::rand(rng)), + b: Some(<$bench_field>::rand(rng)), + num_variables: 10, + num_constraints: 65536, + }; + + let srs = Marlin::< + $bench_field, + MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, + Blake2s, + >::universal_setup(65536, 65536, 65536, rng) + .unwrap(); + let (pk, vk) = Marlin::< + $bench_field, + MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, + Blake2s, + >::index(&srs, c) + .unwrap(); + let proof = Marlin::< + $bench_field, + MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, + Blake2s, + >::prove(&pk, c.clone(), rng) + .unwrap(); + + let v = c.a.unwrap().mul(c.b.unwrap()); + + let start = ark_std::time::Instant::now(); + + for _ in 0..NUM_VERIFY_REPEATITIONS { + let _ = Marlin::< + $bench_field, + MarlinKZG10<$bench_pairing_engine, DensePolynomial<$bench_field>>, + Blake2s, + >::verify(&vk, &vec![v], &proof, rng) + .unwrap(); + } + + println!( + "verifying time for {}: {} ns", + stringify!($bench_pairing_engine), + start.elapsed().as_nanos() / NUM_VERIFY_REPEATITIONS as u128 + ); + }; +} + +fn bench_prove() { + marlin_prove_bench!(bls, BlsFr, Bls12_381); + marlin_prove_bench!(mnt4, MNT4Fr, MNT4_298); + marlin_prove_bench!(mnt6, MNT6Fr, MNT6_298); + marlin_prove_bench!(mnt4big, MNT4BigFr, MNT4_753); + marlin_prove_bench!(mnt6big, MNT6BigFr, MNT6_753); +} + +fn bench_verify() { + marlin_verify_bench!(bls, BlsFr, Bls12_381); + marlin_verify_bench!(mnt4, MNT4Fr, MNT4_298); + marlin_verify_bench!(mnt6, MNT6Fr, MNT6_298); + marlin_verify_bench!(mnt4big, MNT4BigFr, MNT4_753); + marlin_verify_bench!(mnt6big, MNT6BigFr, MNT6_753); +} + +fn main() { + bench_prove(); + bench_verify(); +} diff --git a/arkworks/marlin/diagram/.gitignore b/arkworks/marlin/diagram/.gitignore new file mode 100644 index 00000000..834c4a22 --- /dev/null +++ b/arkworks/marlin/diagram/.gitignore @@ -0,0 +1 @@ +latex.out diff --git a/arkworks/marlin/diagram/diagram.pdf b/arkworks/marlin/diagram/diagram.pdf new file mode 100644 index 00000000..ffee934c Binary files /dev/null and b/arkworks/marlin/diagram/diagram.pdf differ diff --git a/arkworks/marlin/diagram/diagram.tex b/arkworks/marlin/diagram/diagram.tex new file mode 100644 index 00000000..30599367 --- /dev/null +++ b/arkworks/marlin/diagram/diagram.tex @@ -0,0 +1,269 @@ +\documentclass{article} +\usepackage[utf8]{inputenc} +\usepackage{amsmath} +\usepackage{amsfonts} + +\title{Marlin Diagram} +\date{July 2020} + +\usepackage[x11names]{xcolor} +\usepackage[b4paper,margin=1.2in]{geometry} +\usepackage{tikz} +\usepackage{afterpage} + +\newenvironment{rcases} + {\left.\begin{aligned}} + {\end{aligned}\right\rbrace} + +\begin{document} + +\newcommand{\cm}[1]{\ensuremath{\mathsf{cm}_{#1}}} +\newcommand{\vcm}[1]{\ensuremath{\mathsf{vcm}_{#1}}} +\newcommand{\s}{\ensuremath{\hat{s}}} +\newcommand{\w}{\ensuremath{\hat{w}}} +\newcommand{\x}{\ensuremath{\hat{x}}} +\newcommand{\z}{\ensuremath{\hat{z}}} +\newcommand{\za}{\ensuremath{\hat{z}_A}} +\newcommand{\zb}{\ensuremath{\hat{z}_B}} +\newcommand{\zc}{\ensuremath{\hat{z}_C +}} +\newcommand{\zm}{\ensuremath{\hat{z}_M}} + +\newcommand{\val}{\ensuremath{\mathsf{val}}} +\newcommand{\row}{\ensuremath{\mathsf{row}}} +\newcommand{\col}{\ensuremath{\mathsf{col}}} +\newcommand{\rowcol}{\ensuremath{\mathsf{rowcol}}} + +\newcommand{\hval}{\ensuremath{\widehat{\val}}} +\newcommand{\hrow}{\ensuremath{\widehat{\row}}} +\newcommand{\hcol}{\ensuremath{\widehat{\col}}} +\newcommand{\hrowcol}{\ensuremath{\widehat{\rowcol}}} + +\newcommand{\bb}{\ensuremath{\mathsf{b}}} +\newcommand{\denom}{\ensuremath{\mathsf{denom}}} + +\newcommand{\sumcheckinner}{\mathsf{sumcheck} +_{\mathsf{inner}}} +\newcommand{\sumcheckouter}{\mathsf{sumcheck}_{\mathsf{outer}}} + +\newcommand{\Prover}{\mathcal{P}} +\newcommand{\Verifier}{\mathcal{V}} + +\newcommand{\F}{\mathbb{F}} + +\newcommand{\DomainA}{H} +\newcommand{\DomainB}{K} + +\newcommand{\vPoly}[1]{\ensuremath{v_{#1}}} + + +This diagram (on the following page) shows the interaction of the Marlin prover and verifier. It is similar to the diagrams in the paper (Figure 5 in Section 5 and Figure 7 in Appendix E, in the latest ePrint version), but with two changes: it shows not just the AHP but also the use of the polynomial commitments (the cryptography layer); and it aims to be fully up-to-date with the recent optimizations to the codebase. This diagram, together with the diagrams in the paper, can act as a ``bridge" between the codebase and the theory that the paper describes. + +\section{Glossary of notation} +\begin{table*}[htbp] + \centering + \begin{tabular}{c|c} + $\F$ & the finite field over which the R1CS instance is defined \\ + \hline + $x$ & public input \\ + \hline + $w$ & secret witness \\ + \hline + $\DomainA$ & variable domain \\ + \hline + $\DomainB$ & matrix domain \\ + \hline + $X$ & domain sized for input (not including witness) \\ + \hline + $v_D(X)$ & vanishing polynomial over domain $D$ \\ + \hline + $u_D(X, Y)$ & bivariate derivative of vanishing polynomials over domain $D$\\ + \hline + $A, B, C$ & R1CS instance matrices \\ + \hline + $A^*, B^*, C^*$ & + \begin{tabular}{@{}c@{}}shifted transpose of $A,B,C$ matries given by $M^*_{a,b} := M_{b,a} \cdot u_\DomainA(b,b) \; \forall a,b \in \DomainA$ \\ (optimization from Fractal, explained in Claim 6.7 of that paper) \end{tabular} \\ + \hline + $\{\hval, \hrow, \hcol\}_{\{A^*,B^*,C^*\}}$ & + \begin{tabular}{@{}c@{}} preprocessed polynomials from $A^*, B^*, C^*$ matrices containing LDEs of (respectively) \\ row positions, column positions, and values of non-zero matrix elements \end{tabular} \\ + \hline + $\hrowcol_{\{A^*, B^*, C^*\}}$ & + \begin{tabular}{@{}c@{}} the product polynomial of $\hrow$ and $\hcol$, given separately for efficiency (namely \\ to allow this product to be part of a \textit{linear} combination) \end{tabular} \\ + \hline + $\Prover$ & prover \\ + \hline + $\Verifier$ & verifier \\ + \hline + $\Verifier^{p}$ & + \begin{tabular}{@{}c@{}} $\Verifier$ with ``oracle" access to polynomial $p$ (via commitments provided \\ by the indexer, later opened as necessary by $\Prover$) \end{tabular}\\ + \hline + $\bb$ & bound on the number of queries \\ + \hline + $r_M(X, Y)$ & an intermediate polynomial defined by $r_M(X, Y) = M^*(Y,X)$\\ + \hline + \end{tabular} +\end{table*} + +\afterpage{% +\newgeometry{margin=0.5in} + +\section{Diagram} + +\centering +\begin{tikzpicture}[scale=0.9, every node/.style={scale=0.9}] + +\tikzstyle{lalign} = [minimum width=3cm,align=left,anchor=west] +\tikzstyle{ralign} = [minimum width=3cm,align=right,anchor=east] + +\node[lalign] (prover) at (-3,27.3) {% +$\Prover(\F, \DomainA, \DomainB, A, B, C, x, w)$ +}; + +\node[ralign] (verifier) at (16.2,27.3) {% +$\Verifier^{\{\hval, \hrow, \hcol, \hrowcol\}_{\{A^*, B^*, C^*\}}}(\F, \DomainA, \DomainB, x)$ +}; + +\draw [line width=1.0pt] (-3,27.0) -- (16,27.0); + +\node[lalign] (prover1) at (-3,26.1) {% +$z := (x, w), z_A := Az, z_B := Bz$ \\ +sample $\w(X) \in \F^{<|w|+\bb}[X]$ and $\za(X), \zb(X) \in \F^{<|\DomainA|+\bb}[X]$ \\ +sample mask poly $\s(X) \in \F^{<3|\DomainA|+2\bb-2}[X]$ such that $\sum_{\kappa \in \DomainA}\s(\kappa) = 0$ +}; + +\draw [->] (-2,24.8) -- node[midway,fill=white] {commitments $\cm{\w}, \cm{\za}, \cm{\zb}, \cm{\s}$} (15,24.8); + +\node[ralign] (verifier1) at (16,24.0) {% +$\eta_A, \eta_B, \eta_C \gets \F$ \\ +$\alpha \gets \F \setminus \DomainA$ +}; + +\draw [->] (15,23.3) -- node[midway,fill=white] {$\eta_A, \eta_B, \eta_C, \alpha \in \F$} (-2,23.3); + +\node[lalign] (prover2) at (-3,22.5) {% +compute $t(X) := \sum_M \eta_M r_M(\alpha, X)$ +}; + +\draw (-2.9,22.0) rectangle (15.9,4.8); + +\node (sc1label) at (6.5,21.7) {% +\textbf{sumcheck for} $\s(X) + u_H(\alpha, X) \left(\sum_M \eta_M \zm(X)\right) - t(X)\z(X)$ \textbf{ over } $\DomainA$ +}; + +\node[lalign] (prover3) at (-2,20.7) {% +let $\zc(X) := \za(X) \cdot \zb(X)$ \\ +find $g_1(X) \in \F^{|\DomainA|-1}[X]$ and $h_1(X)$ such that \\ +$s(X)+u_H(\alpha, X)(\sum_M \eta_M \zm(X)) - t(X)\z(X) = h_1(X)\vPoly{\DomainA}(X) + Xg_1(X)$ \hspace{0.3cm} $(*)$ +}; + +\draw [->] (-1,19.5) -- node[midway,fill=white] {commitments $\cm{t}, \cm{g_1}, \cm{h_1}$} (14,19.5); + +\node[ralign] (verifier2) at (15.4,19.1) {% +$\beta \gets \F \setminus \DomainA$ +}; + +\draw [->] (14,18.7) -- node[midway,fill=white] {$\beta \in \F$} (-1,18.7); + +\draw (-0.85,18.2) rectangle (13.85,8.4); + +\node (sc2label) at (6.5,17.6) {% +\textbf{sumcheck for } $\sum\limits_{M \in \{A, B, C\}} \eta_M \frac{\vPoly{\DomainA}(\beta) \vPoly{\DomainA}(\alpha)\hval_{M^*}(X)}{\color{purple}(\beta-\hrow_{M^*}(X))(\alpha-\hcol_{M^*}(X))} $ \textbf{ over } $\DomainB$ +}; + +\node[align=center] (mid1) at (6.5, 16.3) {% +$\begin{aligned} +\text{for } M \in \{A, B, C\} \text{, let } {\color{purple} M_\denom(X)} &:= (\beta - \hrow_{M^*}(X)) (\alpha - \hcol_{M^*}(X)) \\ +&= {\color{gray}\alpha\beta} - {\color{gray}\alpha}\hrow_{M^*}(X) - {\color{gray}\beta}\hcol_{M^*}(X) + \hrowcol_{M^*}(X) +\end{aligned}$ +}; + +\node[align=center] (mid2) at (6.5, 15.0) {% +let ${\color{orange} a(X)} := \sum\limits_{M \in \{A, B, C\}} {\color{gray} \eta_M \vPoly{\DomainA}(\beta) \vPoly{\DomainA}(\alpha)} \hval_{M^*}(X) \prod_{N \neq M} {\color{purple} N_\denom(X)}$ +}; + +\node[align=center] (mid3) at (6.5, 14.1) {% +let ${\color{Green4} b(X)} := \prod\limits_{M \in \{A, B, C\}} {\color{purple} M_\denom(X)}$ +}; + +\node[lalign] (prover4) at (-0.75,13.2) {% +find $g_2(X) \in \F^{|\DomainB|-1}[X]$ and $h_2(X)$ s.t. \\ +$h_2(X)\vPoly{\DomainB}(X) = {\color{orange} a(X)} - {\color{Green4} b(X)} (Xg_2(X)+t(\beta)/|\DomainB|)$ \hspace{0.3cm} $(**)$ +}; + +\draw [->] (0,12.2) -- node[midway,fill=white] {commitments $\cm{g_2}, \cm{h_2}$} (13,12.2); + +\draw [->] (13,11.5) -- node[midway,fill=white] {$\gamma \in \F$} (0,11.5); + +\node[ralign] (verifier3) at (14.5, 11.9) {% +$\gamma \gets \F$ +}; + +\draw[dashed] (1.5,11.0) rectangle (11.5,8.8); + +\node[align=center] (mid4) at (6.5, 9.9) {% +To verify $(**)$, $\Verifier$ will need to check the following: \\[10pt] +$ \underbrace{{\color{orange} a({\color{black} \gamma})} - {\color{Green4} b({\color{black} \gamma})} {\color{gray} (\gamma g_2(\gamma) + t(\beta) / |\DomainB|) - \vPoly{\DomainB}(\gamma)} h_2(\gamma)}_{\sumcheckinner(\gamma)} \stackrel{?}{=} 0 $ +}; + +\node[ralign] (verifier3) at (15.4, 7.9) {% +Compute $\x(X) \in \F^{<|x|}[X]$ from input $x$ +}; + +\draw[dashed] (-2.7,7.4) rectangle (15.7,5.2); + +\node[align=center] (mid5) at (6.5, 6.3) {% +To verify $(*)$, $\Verifier$ will need to check the following: \\[10pt] +$ \underbrace{s(\beta) + {\color{gray} v_H(\alpha, \beta)} ({\color{gray} \eta_A} \za(\beta) + {\color{gray} \eta_C\zb(\beta)} \za(\beta) + {\color{gray} \eta_B\zb(\beta)}) - {\color{gray} t(\beta) \vPoly{X}(\beta)} \w(\beta) - {\color{gray} t(\beta) \x(\beta)} - {\color{gray} \vPoly{\DomainA}(\beta)} h_1(\beta) - {\color{gray} \beta g_1(\beta)}}_{\sumcheckouter(\beta)} \stackrel{?}{=} 0 $ +}; + +\node[lalign] (prover5) at (-3,3.9) {% +$v_{g_2} := g_2(\gamma), v_{A_\denom} := A_\denom(\gamma), v_{B_\denom} := B_\denom(\gamma), v_{C_\denom} := C_\denom(\gamma)$ \\[3pt] +$v_{g_1} := g_1(\beta), v_{\zb} := \zb(\beta), v_{t} := t(\beta)$ +}; + +\draw [->] (-2,2.9) -- node[midway,fill=white] {$v_{g_2}, v_{A_\denom}, v_{B_\denom}, v_{C_\denom}, v_{g_1}, v_{\zb}, v_{t}$} (15,2.9); + +\node[align=center] (mid6) at (6.5,1.9) {% +use index commitments $\hrow, \hcol, \hrowcol$ to construct virtual commitments $\vcm{\{A_\denom, B_\denom, C_\denom\}}$ +}; + +\node[align=center] (mid7) at (6.5,0.8) {% +use index commitments $\hval$, commitments $\vcm{A_\denom}$, $\vcm{B_\denom}, \vcm{C_\denom}, \cm{h_2}$, {\color{gray} and evaluations $g_2(\gamma),t(\beta)$} \\ +to construct virtual commitment $\vcm{\sumcheckinner}$ +}; + +\node[align=center] (mid8) at (6.5,-0.5) {% +use commitments $\cm{\s}, \cm{\za}, \cm{\w}, \cm{h_1}$ {\color{gray} and evaluations $\zb(\beta), t(\beta), g_1(\beta)$} \\ +to construct virtual commitment $\vcm{\sumcheckouter}$ +}; + +\node[ralign] (verifier4) at (16,-1.5) {% +$\xi_1, \dots, \xi_5 \gets \F$ +}; + +\draw [->] (15,-2.1) -- node[midway,fill=white] {$\xi_1, \dots, \xi_5$} (-2,-2.1); + +\node[lalign] (prover6) at (-3,-3.6) {% +use $\mathsf{PC}.\mathsf{Prove}$ with randomness $\xi_1, \dots, \xi_5$ to \\ +construct a batch opening proof $\pi$ of the following: \\ +$(\cm{g_2}, \cm{A_\denom}, \cm{B_\denom}, \cm{C_\denom}, {\color{red} \vcm{\sumcheckinner}})$ at $\gamma$ evaluate to $(v_{g_2}, v_{A_\denom}, v_{B_\denom}, v_{C_\denom}, {\color{red} 0})$ \hspace{0.3cm} ${\color{red} (**)}$ \\ +$(\cm{g_1}, \cm{\zb}, \cm{t}, {\color{red} \vcm{\sumcheckouter}})$ at $\beta$ evaluate to $(v_{g_1}, v_{\zb}, v_{t}, {\color{red} 0})$ \hspace{0.3cm} ${\color{red} (*)}$ \\ +}; + +\draw [->] (-2,-4.7) -- node[midway,fill=white] {$\pi$} (15,-4.7); + +\node[ralign] (verifier5) at (16,-6.0) {% +verify $\pi$ with $\mathsf{PC}.\mathsf{Verify}$, using randomness $\xi_1, \dots, \xi_5$, \\ +evaluations $v_{g_2}, v_{A_\denom}, v_{B_\denom}, v_{C_\denom}, v_{g_1}, v_{\zb}, v_{t}$, and \\ +commitments $\cm{g_2}, \cm{A_\denom}, \cm{B_\denom}, \cm{C_\denom},$ \\ +$\vcm{\sumcheckinner}, \cm{g_1}, \cm{\zb}, \cm{t}, \vcm{\sumcheckinner}$ +}; + +\end{tikzpicture} + +\clearpage +\restoregeometry +} + + +\end{document} diff --git a/arkworks/marlin/scripts/linkify_changelog.py b/arkworks/marlin/scripts/linkify_changelog.py new file mode 100644 index 00000000..867ae14d --- /dev/null +++ b/arkworks/marlin/scripts/linkify_changelog.py @@ -0,0 +1,31 @@ +import re +import sys +import fileinput +import os + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +if len(sys.argv) < 2: + print("Must include path to changelog as the first argument to the script") + print("Example Usage: python3 linkify_changelog.py ../CHANGELOG.md") + exit() + +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub(r"\- #([0-9]*)", r"- [\\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", line.rstrip()) + # edits the current file + print(line) \ No newline at end of file diff --git a/arkworks/marlin/src/ahp/constraint_systems.rs b/arkworks/marlin/src/ahp/constraint_systems.rs new file mode 100644 index 00000000..d4c85f89 --- /dev/null +++ b/arkworks/marlin/src/ahp/constraint_systems.rs @@ -0,0 +1,302 @@ +#![allow(non_snake_case)] + +use crate::ahp::indexer::Matrix; +use crate::ahp::*; +use crate::{BTreeMap, ToString}; +use ark_ff::{Field, PrimeField}; +use ark_poly::{EvaluationDomain, Evaluations as EvaluationsOnDomain, GeneralEvaluationDomain}; +use ark_relations::{ + lc, + r1cs::{ConstraintMatrices, ConstraintSystemRef}, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + cfg_iter_mut, + io::{Read, Write}, +}; +use derivative::Derivative; + +/* ************************************************************************* */ +/* ************************************************************************* */ +/* ************************************************************************* */ + +pub(crate) fn balance_matrices(a_matrix: &mut Matrix, b_matrix: &mut Matrix) { + let mut a_density: usize = a_matrix.iter().map(|row| row.len()).sum(); + let mut b_density: usize = b_matrix.iter().map(|row| row.len()).sum(); + let mut max_density = core::cmp::max(a_density, b_density); + let mut a_is_denser = a_density == max_density; + for (a_row, b_row) in a_matrix.iter_mut().zip(b_matrix) { + if a_is_denser { + let a_row_size = a_row.len(); + let b_row_size = b_row.len(); + core::mem::swap(a_row, b_row); + a_density = a_density - a_row_size + b_row_size; + b_density = b_density - b_row_size + a_row_size; + max_density = core::cmp::max(a_density, b_density); + a_is_denser = a_density == max_density; + } + } +} + +pub(crate) fn num_non_zero(matrices: &ConstraintMatrices) -> usize { + *[ + matrices.a_num_non_zero, + matrices.b_num_non_zero, + matrices.c_num_non_zero, + ] + .iter() + .max() + .unwrap() +} + +pub(crate) fn make_matrices_square_for_indexer(cs: ConstraintSystemRef) { + let num_variables = cs.num_instance_variables() + cs.num_witness_variables(); + let matrix_dim = padded_matrix_dim(num_variables, cs.num_constraints()); + make_matrices_square(cs.clone(), num_variables); + assert_eq!( + cs.num_instance_variables() + cs.num_witness_variables(), + cs.num_constraints(), + "padding failed!" + ); + assert_eq!( + cs.num_instance_variables() + cs.num_witness_variables(), + matrix_dim, + "padding does not result in expected matrix size!" + ); +} + +/// This must *always* be in sync with `make_matrices_square`. +pub(crate) fn padded_matrix_dim(num_formatted_variables: usize, num_constraints: usize) -> usize { + core::cmp::max(num_formatted_variables, num_constraints) +} + +pub(crate) fn pad_input_for_indexer_and_prover(cs: ConstraintSystemRef) { + let formatted_input_size = cs.num_instance_variables(); + + let domain_x = GeneralEvaluationDomain::::new(formatted_input_size); + assert!(domain_x.is_some()); + + let padded_size = domain_x.unwrap().size(); + + if padded_size > formatted_input_size { + for _ in 0..(padded_size - formatted_input_size) { + cs.new_input_variable(|| Ok(F::zero())).unwrap(); + } + } +} + +pub(crate) fn make_matrices_square( + cs: ConstraintSystemRef, + num_formatted_variables: usize, +) { + let num_constraints = cs.num_constraints(); + let matrix_padding = ((num_formatted_variables as isize) - (num_constraints as isize)).abs(); + + if num_formatted_variables > num_constraints { + // Add dummy constraints of the form 0 * 0 == 0 + for _ in 0..matrix_padding { + cs.enforce_constraint(lc!(), lc!(), lc!()) + .expect("enforce 0 * 0 == 0 failed"); + } + } else { + // Add dummy unconstrained variables + for _ in 0..matrix_padding { + let _ = cs + .new_witness_variable(|| Ok(F::one())) + .expect("alloc failed"); + } + } +} + +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Clone(bound = "F: PrimeField"))] +pub struct MatrixEvals { + /// Evaluations of the LDE of row. + pub row: EvaluationsOnDomain, + /// Evaluations of the LDE of col. + pub col: EvaluationsOnDomain, + /// Evaluations of the LDE of val. + pub val: EvaluationsOnDomain, +} + +/// Contains information about the arithmetization of the matrix M^*. +/// Here `M^*(i, j) := M(j, i) * u_H(j, j)`. For more details, see [COS19]. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Clone(bound = "F: PrimeField"))] +pub struct MatrixArithmetization { + /// LDE of the row indices of M^*. + pub row: LabeledPolynomial, + /// LDE of the column indices of M^*. + pub col: LabeledPolynomial, + /// LDE of the non-zero entries of M^*. + pub val: LabeledPolynomial, + /// LDE of the vector containing entry-wise products of `row` and `col`, + /// where `row` and `col` are as above. + pub row_col: LabeledPolynomial, + + /// Evaluation of `self.row`, `self.col`, and `self.val` on the domain `K`. + pub evals_on_K: MatrixEvals, + + /// Evaluation of `self.row`, `self.col`, and, `self.val` on + /// an extended domain B (of size > `3K`). + // TODO: rename B everywhere. + pub evals_on_B: MatrixEvals, + + /// Evaluation of `self.row_col` on an extended domain B (of size > `3K`). + pub row_col_evals_on_B: EvaluationsOnDomain, +} + +// TODO for debugging: add test that checks result of arithmetize_matrix(M). +pub(crate) fn arithmetize_matrix( + matrix_name: &str, + matrix: &mut Matrix, + interpolation_domain: GeneralEvaluationDomain, + output_domain: GeneralEvaluationDomain, + input_domain: GeneralEvaluationDomain, + expanded_domain: GeneralEvaluationDomain, +) -> MatrixArithmetization { + let matrix_time = start_timer!(|| "Computing row, col, and val LDEs"); + + let elems: Vec<_> = output_domain.elements().collect(); + + let mut row_vec = Vec::new(); + let mut col_vec = Vec::new(); + let mut val_vec = Vec::new(); + + let eq_poly_vals_time = start_timer!(|| "Precomputing eq_poly_vals"); + let eq_poly_vals: BTreeMap = output_domain + .elements() + .zip(output_domain.batch_eval_unnormalized_bivariate_lagrange_poly_with_same_inputs()) + .collect(); + end_timer!(eq_poly_vals_time); + + let lde_evals_time = start_timer!(|| "Computing row, col and val evals"); + let mut inverses = Vec::new(); + + let mut count = 0; + + // Recall that we are computing the arithmetization of M^*, + // where `M^*(i, j) := M(j, i) * u_H(j, j)`. + for (r, row) in matrix.into_iter().enumerate() { + if !is_in_ascending_order(&row, |(_, a), (_, b)| a < b) { + row.sort_by(|(_, a), (_, b)| a.cmp(b)); + }; + + for &mut (val, i) in row { + let row_val = elems[r]; + let col_val = elems[output_domain.reindex_by_subdomain(input_domain, i)]; + + // We are dealing with the transpose of M + row_vec.push(col_val); + col_vec.push(row_val); + val_vec.push(val); + inverses.push(eq_poly_vals[&col_val]); + + count += 1; + } + } + ark_ff::batch_inversion::(&mut inverses); + + cfg_iter_mut!(val_vec) + .zip(inverses) + .for_each(|(v, inv)| *v *= &inv); + end_timer!(lde_evals_time); + + for _ in 0..(interpolation_domain.size() - count) { + col_vec.push(elems[0]); + row_vec.push(elems[0]); + val_vec.push(F::zero()); + } + let row_col_vec: Vec<_> = row_vec + .iter() + .zip(&col_vec) + .map(|(row, col)| *row * col) + .collect(); + + let interpolate_time = start_timer!(|| "Interpolating on K and B"); + let row_evals_on_K = EvaluationsOnDomain::from_vec_and_domain(row_vec, interpolation_domain); + let col_evals_on_K = EvaluationsOnDomain::from_vec_and_domain(col_vec, interpolation_domain); + let val_evals_on_K = EvaluationsOnDomain::from_vec_and_domain(val_vec, interpolation_domain); + let row_col_evals_on_K = + EvaluationsOnDomain::from_vec_and_domain(row_col_vec, interpolation_domain); + + let row = row_evals_on_K.clone().interpolate(); + let col = col_evals_on_K.clone().interpolate(); + let val = val_evals_on_K.clone().interpolate(); + let row_col = row_col_evals_on_K.interpolate(); + + let row_evals_on_B = + EvaluationsOnDomain::from_vec_and_domain(expanded_domain.fft(&row), expanded_domain); + let col_evals_on_B = + EvaluationsOnDomain::from_vec_and_domain(expanded_domain.fft(&col), expanded_domain); + let val_evals_on_B = + EvaluationsOnDomain::from_vec_and_domain(expanded_domain.fft(&val), expanded_domain); + let row_col_evals_on_B = + EvaluationsOnDomain::from_vec_and_domain(expanded_domain.fft(&row_col), expanded_domain); + end_timer!(interpolate_time); + + end_timer!(matrix_time); + let evals_on_K = MatrixEvals { + row: row_evals_on_K, + col: col_evals_on_K, + val: val_evals_on_K, + }; + let evals_on_B = MatrixEvals { + row: row_evals_on_B, + col: col_evals_on_B, + val: val_evals_on_B, + }; + + let m_name = matrix_name.to_string(); + MatrixArithmetization { + row: LabeledPolynomial::new(m_name.clone() + "_row", row, None, None), + col: LabeledPolynomial::new(m_name.clone() + "_col", col, None, None), + val: LabeledPolynomial::new(m_name.clone() + "_val", val, None, None), + row_col: LabeledPolynomial::new(m_name + "_row_col", row_col, None, None), + evals_on_K, + evals_on_B, + row_col_evals_on_B: row_col_evals_on_B, + } +} + +fn is_in_ascending_order(x_s: &[T], is_less_than: impl Fn(&T, &T) -> bool) -> bool { + if x_s.is_empty() { + true + } else { + let mut i = 0; + let mut is_sorted = true; + while i < (x_s.len() - 1) { + is_sorted &= is_less_than(&x_s[i], &x_s[i + 1]); + i += 1; + } + is_sorted + } +} + +/* ************************************************************************* */ +/* ************************************************************************* */ +/* ************************************************************************* */ + +/// Formats the public input according to the requirements of the constraint +/// system +pub(crate) fn format_public_input(public_input: &[F]) -> Vec { + let mut input = vec![F::one()]; + input.extend_from_slice(public_input); + input +} + +/// Takes in a previously formatted public input and removes the formatting +/// imposed by the constraint system. +pub(crate) fn unformat_public_input(input: &[F]) -> Vec { + input[1..].to_vec() +} + +pub(crate) fn make_matrices_square_for_prover(cs: ConstraintSystemRef) { + let num_variables = cs.num_instance_variables() + cs.num_witness_variables(); + make_matrices_square(cs.clone(), num_variables); + assert_eq!( + cs.num_instance_variables() + cs.num_witness_variables(), + cs.num_constraints(), + "padding failed!" + ); +} diff --git a/arkworks/marlin/src/ahp/indexer.rs b/arkworks/marlin/src/ahp/indexer.rs new file mode 100644 index 00000000..442db91f --- /dev/null +++ b/arkworks/marlin/src/ahp/indexer.rs @@ -0,0 +1,209 @@ +#![allow(non_snake_case)] + +use crate::ahp::{ + constraint_systems::{arithmetize_matrix, MatrixArithmetization}, + AHPForR1CS, Error, LabeledPolynomial, +}; +use crate::Vec; +use ark_ff::PrimeField; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use ark_relations::r1cs::{ + ConstraintSynthesizer, ConstraintSystem, OptimizationGoal, SynthesisError, SynthesisMode, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + io::{Read, Write}, + marker::PhantomData, +}; +use derivative::Derivative; + +use crate::ahp::constraint_systems::{ + balance_matrices, make_matrices_square_for_indexer, num_non_zero, + pad_input_for_indexer_and_prover, +}; + +/// Information about the index, including the field of definition, the number of +/// variables, the number of constraints, and the maximum number of non-zero +/// entries in any of the constraint matrices. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Clone(bound = ""), Copy(bound = ""))] +pub struct IndexInfo { + /// The total number of variables in the constraint system. + pub num_variables: usize, + /// The number of constraints. + pub num_constraints: usize, + /// The maximum number of non-zero entries in any constraint matrix. + pub num_non_zero: usize, + /// The number of input elements. + pub num_instance_variables: usize, + + #[doc(hidden)] + f: PhantomData, +} + +impl ark_ff::ToBytes for IndexInfo { + fn write(&self, mut w: W) -> ark_std::io::Result<()> { + (self.num_variables as u64).write(&mut w)?; + (self.num_constraints as u64).write(&mut w)?; + (self.num_non_zero as u64).write(&mut w) + } +} + +impl IndexInfo { + /// The maximum degree of polynomial required to represent this index in the + /// the AHP. + pub fn max_degree(&self) -> usize { + AHPForR1CS::::max_degree(self.num_constraints, self.num_variables, self.num_non_zero) + .unwrap() + } +} + +/// Represents a matrix. +pub type Matrix = Vec>; + +#[derive(Derivative)] +#[derivative(Clone(bound = "F: PrimeField"))] +/// The indexed version of the constraint system. +/// This struct contains three kinds of objects: +/// 1) `index_info` is information about the index, such as the size of the +/// public input +/// 2) `{a,b,c}` are the matrices defining the R1CS instance +/// 3) `{a,b,c}_star_arith` are structs containing information about A^*, B^*, and C^*, +/// which are matrices defined as `M^*(i, j) = M(j, i) * u_H(j, j)`. +#[derive(CanonicalSerialize, CanonicalDeserialize)] +pub struct Index { + /// Information about the index. + pub index_info: IndexInfo, + + /// The A matrix for the R1CS instance + pub a: Matrix, + /// The B matrix for the R1CS instance + pub b: Matrix, + /// The C matrix for the R1CS instance + pub c: Matrix, + + /// Arithmetization of the A* matrix. + pub a_star_arith: MatrixArithmetization, + /// Arithmetization of the B* matrix. + pub b_star_arith: MatrixArithmetization, + /// Arithmetization of the C* matrix. + pub c_star_arith: MatrixArithmetization, +} + +impl Index { + /// The maximum degree required to represent polynomials of this index. + pub fn max_degree(&self) -> usize { + self.index_info.max_degree() + } + + /// Iterate over the indexed polynomials. + pub fn iter(&self) -> impl Iterator> { + ark_std::vec![ + &self.a_star_arith.row, + &self.a_star_arith.col, + &self.a_star_arith.val, + &self.a_star_arith.row_col, + &self.b_star_arith.row, + &self.b_star_arith.col, + &self.b_star_arith.val, + &self.b_star_arith.row_col, + &self.c_star_arith.row, + &self.c_star_arith.col, + &self.c_star_arith.val, + &self.c_star_arith.row_col, + ] + .into_iter() + } +} + +impl AHPForR1CS { + /// Generate the index for this constraint system. + pub fn index>(c: C) -> Result, Error> { + let index_time = start_timer!(|| "AHP::Index"); + + let constraint_time = start_timer!(|| "Generating constraints"); + let ics = ConstraintSystem::new_ref(); + ics.set_optimization_goal(OptimizationGoal::Weight); + ics.set_mode(SynthesisMode::Setup); + c.generate_constraints(ics.clone())?; + end_timer!(constraint_time); + + let padding_time = start_timer!(|| "Padding matrices to make them square"); + pad_input_for_indexer_and_prover(ics.clone()); + end_timer!(padding_time); + let matrix_processing_time = start_timer!(|| "Processing matrices"); + ics.finalize(); + make_matrices_square_for_indexer(ics.clone()); + let matrices = ics.to_matrices().expect("should not be `None`"); + let num_non_zero_val = num_non_zero::(&matrices); + let (mut a, mut b, mut c) = (matrices.a, matrices.b, matrices.c); + balance_matrices(&mut a, &mut b); + end_timer!(matrix_processing_time); + + let (num_formatted_input_variables, num_witness_variables, num_constraints, num_non_zero) = ( + ics.num_instance_variables(), + ics.num_witness_variables(), + ics.num_constraints(), + num_non_zero_val, + ); + let num_variables = num_formatted_input_variables + num_witness_variables; + + if num_constraints != num_formatted_input_variables + num_witness_variables { + eprintln!( + "number of (formatted) input_variables: {}", + num_formatted_input_variables + ); + eprintln!("number of witness_variables: {}", num_witness_variables); + eprintln!("number of num_constraints: {}", num_constraints); + eprintln!("number of num_non_zero: {}", num_non_zero); + return Err(Error::NonSquareMatrix); + } + + if !Self::num_formatted_public_inputs_is_admissible(num_formatted_input_variables) { + return Err(Error::InvalidPublicInputLength); + } + + let index_info = IndexInfo { + num_variables, + num_constraints, + num_non_zero, + num_instance_variables: num_formatted_input_variables, + + f: PhantomData, + }; + + let domain_h = GeneralEvaluationDomain::new(num_constraints) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + let domain_k = GeneralEvaluationDomain::new(num_non_zero) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + let x_domain = GeneralEvaluationDomain::::new(num_formatted_input_variables) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + let b_domain = GeneralEvaluationDomain::::new(3 * domain_k.size() - 3) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + let a_arithmetization_time = start_timer!(|| "Arithmetizing A"); + let a_star_arith = arithmetize_matrix("a", &mut a, domain_k, domain_h, x_domain, b_domain); + end_timer!(a_arithmetization_time); + + let b_arithmetization_time = start_timer!(|| "Arithmetizing B"); + let b_star_arith = arithmetize_matrix("b", &mut b, domain_k, domain_h, x_domain, b_domain); + end_timer!(b_arithmetization_time); + + let c_arithmetization_time = start_timer!(|| "Arithmetizing C"); + let c_star_arith = arithmetize_matrix("c", &mut c, domain_k, domain_h, x_domain, b_domain); + end_timer!(c_arithmetization_time); + + end_timer!(index_time); + Ok(Index { + index_info, + + a, + b, + c, + + a_star_arith, + b_star_arith, + c_star_arith, + }) + } +} diff --git a/arkworks/marlin/src/ahp/mod.rs b/arkworks/marlin/src/ahp/mod.rs new file mode 100644 index 00000000..a2f03a3c --- /dev/null +++ b/arkworks/marlin/src/ahp/mod.rs @@ -0,0 +1,495 @@ +use crate::{String, ToString, Vec}; +use ark_ff::{Field, PrimeField}; +use ark_poly::univariate::DensePolynomial; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use ark_poly_commit::{LCTerm, LinearCombination}; +use ark_relations::r1cs::SynthesisError; +use ark_std::{borrow::Borrow, cfg_iter_mut, format, marker::PhantomData, vec}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +pub(crate) mod constraint_systems; +/// Describes data structures and the algorithms used by the AHP indexer. +pub mod indexer; +/// Describes data structures and the algorithms used by the AHP prover. +pub mod prover; +/// Describes data structures and the algorithms used by the AHP verifier. +pub mod verifier; + +/// A labeled DensePolynomial with coefficients over `F` +pub type LabeledPolynomial = ark_poly_commit::LabeledPolynomial>; + +/// The algebraic holographic proof defined in [CHMMVW19](https://eprint.iacr.org/2019/1047). +/// Currently, this AHP only supports inputs of size one +/// less than a power of 2 (i.e., of the form 2^n - 1). +pub struct AHPForR1CS { + field: PhantomData, +} + +impl AHPForR1CS { + /// The labels for the polynomials output by the AHP indexer. + #[rustfmt::skip] + pub const INDEXER_POLYNOMIALS: [&'static str; 12] = [ + // Polynomials for A + "a_row", "a_col", "a_val", "a_row_col", + // Polynomials for B + "b_row", "b_col", "b_val", "b_row_col", + // Polynomials for C + "c_row", "c_col", "c_val", "c_row_col", + ]; + + /// The labels for the polynomials output by the AHP prover. + #[rustfmt::skip] + pub const PROVER_POLYNOMIALS: [&'static str; 9] = [ + // First sumcheck + "w", "z_a", "z_b", "mask_poly", "t", "g_1", "h_1", + // Second sumcheck + "g_2", "h_2", + ]; + + /// THe linear combinations that are statically known to evaluate to zero. + pub const LC_WITH_ZERO_EVAL: [&'static str; 2] = ["inner_sumcheck", "outer_sumcheck"]; + + pub(crate) fn polynomial_labels() -> impl Iterator { + Self::INDEXER_POLYNOMIALS + .iter() + .chain(&Self::PROVER_POLYNOMIALS) + .map(|s| s.to_string()) + } + + /// Check that the (formatted) public input is of the form 2^n for some integer n. + pub fn num_formatted_public_inputs_is_admissible(num_inputs: usize) -> bool { + num_inputs.count_ones() == 1 + } + + /// Check that the (formatted) public input is of the form 2^n for some integer n. + pub fn formatted_public_input_is_admissible(input: &[F]) -> bool { + Self::num_formatted_public_inputs_is_admissible(input.len()) + } + + /// The maximum degree of polynomials produced by the indexer and prover + /// of this protocol. + /// The number of the variables must include the "one" variable. That is, it + /// must be with respect to the number of formatted public inputs. + pub fn max_degree( + num_constraints: usize, + num_variables: usize, + num_non_zero: usize, + ) -> Result { + let padded_matrix_dim = + constraint_systems::padded_matrix_dim(num_variables, num_constraints); + let zk_bound = 1; + let domain_h_size = GeneralEvaluationDomain::::compute_size_of_domain(padded_matrix_dim) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + let domain_k_size = GeneralEvaluationDomain::::compute_size_of_domain(num_non_zero) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + Ok(*[ + 2 * domain_h_size + zk_bound - 2, + 3 * domain_h_size + 2 * zk_bound - 3, // mask_poly + domain_h_size, + domain_h_size, + 3 * domain_k_size - 3, + ] + .iter() + .max() + .unwrap()) + } + + /// Get all the strict degree bounds enforced in the AHP. + pub fn get_degree_bounds(info: &indexer::IndexInfo) -> [usize; 2] { + let mut degree_bounds = [0usize; 2]; + let num_constraints = info.num_constraints; + let num_non_zero = info.num_non_zero; + let h_size = GeneralEvaluationDomain::::compute_size_of_domain(num_constraints).unwrap(); + let k_size = GeneralEvaluationDomain::::compute_size_of_domain(num_non_zero).unwrap(); + + degree_bounds[0] = h_size - 2; + degree_bounds[1] = k_size - 2; + degree_bounds + } + + /// Construct the linear combinations that are checked by the AHP. + #[allow(non_snake_case)] + pub fn construct_linear_combinations( + public_input: &[F], + evals: &E, + state: &verifier::VerifierState, + ) -> Result>, Error> + where + E: EvaluationsProvider, + { + let domain_h = state.domain_h; + let domain_k = state.domain_k; + let k_size = domain_k.size_as_field_element(); + + let public_input = constraint_systems::format_public_input(public_input); + if !Self::formatted_public_input_is_admissible(&public_input) { + return Err(Error::InvalidPublicInputLength); + } + let x_domain = GeneralEvaluationDomain::new(public_input.len()) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + let first_round_msg = state.first_round_msg.unwrap(); + let alpha = first_round_msg.alpha; + let eta_a = first_round_msg.eta_a; + let eta_b = first_round_msg.eta_b; + let eta_c = first_round_msg.eta_c; + + let beta = state.second_round_msg.unwrap().beta; + let gamma = state.gamma.unwrap(); + + let mut linear_combinations = Vec::new(); + + // Outer sumcheck: + let z_b = LinearCombination::new("z_b", vec![(F::one(), "z_b")]); + let g_1 = LinearCombination::new("g_1", vec![(F::one(), "g_1")]); + let t = LinearCombination::new("t", vec![(F::one(), "t")]); + + let r_alpha_at_beta = domain_h.eval_unnormalized_bivariate_lagrange_poly(alpha, beta); + let v_H_at_alpha = domain_h.evaluate_vanishing_polynomial(alpha); + let v_H_at_beta = domain_h.evaluate_vanishing_polynomial(beta); + let v_X_at_beta = x_domain.evaluate_vanishing_polynomial(beta); + + let z_b_at_beta = evals.get_lc_eval(&z_b, beta)?; + let t_at_beta = evals.get_lc_eval(&t, beta)?; + let g_1_at_beta = evals.get_lc_eval(&g_1, beta)?; + + let x_at_beta = x_domain + .evaluate_all_lagrange_coefficients(beta) + .into_iter() + .zip(public_input) + .map(|(l, x)| l * &x) + .fold(F::zero(), |x, y| x + &y); + + #[rustfmt::skip] + let outer_sumcheck = LinearCombination::new( + "outer_sumcheck", + vec![ + (F::one(), "mask_poly".into()), + + (r_alpha_at_beta * (eta_a + eta_c * z_b_at_beta), "z_a".into()), + (r_alpha_at_beta * eta_b * z_b_at_beta, LCTerm::One), + + (-t_at_beta * v_X_at_beta, "w".into()), + (-t_at_beta * x_at_beta, LCTerm::One), + + (-v_H_at_beta, "h_1".into()), + (-beta * g_1_at_beta, LCTerm::One), + ], + ); + debug_assert!(evals.get_lc_eval(&outer_sumcheck, beta)?.is_zero()); + + linear_combinations.push(z_b); + linear_combinations.push(g_1); + linear_combinations.push(t); + linear_combinations.push(outer_sumcheck); + + // Inner sumcheck: + let beta_alpha = beta * alpha; + let g_2 = LinearCombination::new("g_2", vec![(F::one(), "g_2")]); + + let a_denom = LinearCombination::new( + "a_denom", + vec![ + (beta_alpha, LCTerm::One), + (-alpha, "a_row".into()), + (-beta, "a_col".into()), + (F::one(), "a_row_col".into()), + ], + ); + + let b_denom = LinearCombination::new( + "b_denom", + vec![ + (beta_alpha, LCTerm::One), + (-alpha, "b_row".into()), + (-beta, "b_col".into()), + (F::one(), "b_row_col".into()), + ], + ); + + let c_denom = LinearCombination::new( + "c_denom", + vec![ + (beta_alpha, LCTerm::One), + (-alpha, "c_row".into()), + (-beta, "c_col".into()), + (F::one(), "c_row_col".into()), + ], + ); + + let a_denom_at_gamma = evals.get_lc_eval(&a_denom, gamma)?; + let b_denom_at_gamma = evals.get_lc_eval(&b_denom, gamma)?; + let c_denom_at_gamma = evals.get_lc_eval(&c_denom, gamma)?; + let g_2_at_gamma = evals.get_lc_eval(&g_2, gamma)?; + + let v_K_at_gamma = domain_k.evaluate_vanishing_polynomial(gamma); + + let mut a = LinearCombination::new( + "a_poly", + vec![ + (eta_a * b_denom_at_gamma * c_denom_at_gamma, "a_val"), + (eta_b * a_denom_at_gamma * c_denom_at_gamma, "b_val"), + (eta_c * b_denom_at_gamma * a_denom_at_gamma, "c_val"), + ], + ); + + a *= v_H_at_alpha * v_H_at_beta; + let b_at_gamma = a_denom_at_gamma * b_denom_at_gamma * c_denom_at_gamma; + let b_expr_at_gamma = b_at_gamma * (gamma * g_2_at_gamma + &(t_at_beta / &k_size)); + + a -= &LinearCombination::new("b_expr", vec![(b_expr_at_gamma, LCTerm::One)]); + a -= &LinearCombination::new("h_2", vec![(v_K_at_gamma, "h_2")]); + + a.label = "inner_sumcheck".into(); + let inner_sumcheck = a; + debug_assert!(evals.get_lc_eval(&inner_sumcheck, gamma)?.is_zero()); + + linear_combinations.push(g_2); + linear_combinations.push(a_denom); + linear_combinations.push(b_denom); + linear_combinations.push(c_denom); + linear_combinations.push(inner_sumcheck); + + linear_combinations.sort_by(|a, b| a.label.cmp(&b.label)); + Ok(linear_combinations) + } +} + +/// Abstraction that provides evaluations of (linear combinations of) polynomials +/// +/// Intended to provide a common interface for both the prover and the verifier +/// when constructing linear combinations via `AHPForR1CS::construct_linear_combinations`. +pub trait EvaluationsProvider { + /// Get the evaluation of linear combination `lc` at `point`. + fn get_lc_eval(&self, lc: &LinearCombination, point: F) -> Result; +} + +impl<'a, F: Field> EvaluationsProvider for ark_poly_commit::Evaluations { + fn get_lc_eval(&self, lc: &LinearCombination, point: F) -> Result { + let key = (lc.label.clone(), point); + self.get(&key) + .map(|v| *v) + .ok_or(Error::MissingEval(lc.label.clone())) + } +} + +impl>> EvaluationsProvider for Vec { + fn get_lc_eval(&self, lc: &LinearCombination, point: F) -> Result { + let mut eval = F::zero(); + for (coeff, term) in lc.iter() { + let value = if let LCTerm::PolyLabel(label) = term { + self.iter() + .find(|p| { + let p: &LabeledPolynomial = (*p).borrow(); + p.label() == label + }) + .ok_or(Error::MissingEval(format!( + "Missing {} for {}", + label, lc.label + )))? + .borrow() + .evaluate(&point) + } else { + assert!(term.is_one()); + F::one() + }; + eval += *coeff * value + } + Ok(eval) + } +} + +/// Describes the failure modes of the AHP scheme. +#[derive(Debug)] +pub enum Error { + /// During verification, a required evaluation is missing + MissingEval(String), + /// The number of public inputs is incorrect. + InvalidPublicInputLength, + /// The instance generated during proving does not match that in the index. + InstanceDoesNotMatchIndex, + /// Currently we only support square constraint matrices. + NonSquareMatrix, + /// An error occurred during constraint generation. + ConstraintSystemError(SynthesisError), +} + +impl From for Error { + fn from(other: SynthesisError) -> Self { + Error::ConstraintSystemError(other) + } +} + +/// The derivative of the vanishing polynomial +pub trait UnnormalizedBivariateLagrangePoly { + /// Evaluate the polynomial + fn eval_unnormalized_bivariate_lagrange_poly(&self, x: F, y: F) -> F; + + /// Evaluate over a batch of inputs + fn batch_eval_unnormalized_bivariate_lagrange_poly_with_diff_inputs(&self, x: F) -> Vec; + + /// Evaluate the magic polynomial over `self` + fn batch_eval_unnormalized_bivariate_lagrange_poly_with_same_inputs(&self) -> Vec; +} + +impl UnnormalizedBivariateLagrangePoly for GeneralEvaluationDomain { + fn eval_unnormalized_bivariate_lagrange_poly(&self, x: F, y: F) -> F { + if x != y { + (self.evaluate_vanishing_polynomial(x) - self.evaluate_vanishing_polynomial(y)) + / (x - y) + } else { + self.size_as_field_element() * x.pow(&[(self.size() - 1) as u64]) + } + } + + fn batch_eval_unnormalized_bivariate_lagrange_poly_with_diff_inputs(&self, x: F) -> Vec { + let vanish_x = self.evaluate_vanishing_polynomial(x); + let mut inverses: Vec = self.elements().map(|y| x - y).collect(); + ark_ff::batch_inversion(&mut inverses); + + cfg_iter_mut!(inverses).for_each(|denominator| *denominator *= vanish_x); + inverses + } + + fn batch_eval_unnormalized_bivariate_lagrange_poly_with_same_inputs(&self) -> Vec { + let mut elems: Vec = self + .elements() + .map(|e| e * self.size_as_field_element()) + .collect(); + elems[1..].reverse(); + elems + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_bls12_381::Fr; + use ark_ff::{One, UniformRand, Zero}; + use ark_poly::{ + univariate::{DenseOrSparsePolynomial, DensePolynomial}, + Polynomial, UVPolynomial, + }; + + #[test] + fn domain_unnormalized_bivariate_lagrange_poly() { + for domain_size in 1..10 { + let domain = GeneralEvaluationDomain::::new(1 << domain_size).unwrap(); + let manual: Vec<_> = domain + .elements() + .map(|elem| domain.eval_unnormalized_bivariate_lagrange_poly(elem, elem)) + .collect(); + let fast = domain.batch_eval_unnormalized_bivariate_lagrange_poly_with_same_inputs(); + assert_eq!(fast, manual); + } + } + + #[test] + fn domain_unnormalized_bivariate_lagrange_poly_diff_inputs() { + let rng = &mut ark_std::test_rng(); + for domain_size in 1..10 { + let domain = GeneralEvaluationDomain::::new(1 << domain_size).unwrap(); + let x = Fr::rand(rng); + let manual: Vec<_> = domain + .elements() + .map(|y| domain.eval_unnormalized_bivariate_lagrange_poly(x, y)) + .collect(); + let fast = domain.batch_eval_unnormalized_bivariate_lagrange_poly_with_diff_inputs(x); + assert_eq!(fast, manual); + } + } + + #[test] + fn test_summation() { + let rng = &mut ark_std::test_rng(); + let size = 1 << 4; + let domain = GeneralEvaluationDomain::::new(1 << 4).unwrap(); + let size_as_fe = domain.size_as_field_element(); + let poly = DensePolynomial::rand(size, rng); + + let mut sum: Fr = Fr::zero(); + for eval in domain.elements().map(|e| poly.evaluate(&e)) { + sum += eval; + } + let first = poly.coeffs[0] * size_as_fe; + let last = *poly.coeffs.last().unwrap() * size_as_fe; + println!("sum: {:?}", sum); + println!("a_0: {:?}", first); + println!("a_n: {:?}", last); + println!("first + last: {:?}\n", first + last); + assert_eq!(sum, first + last); + } + + #[test] + fn test_alternator_polynomial() { + use ark_poly::Evaluations; + let domain_k = GeneralEvaluationDomain::::new(1 << 4).unwrap(); + let domain_h = GeneralEvaluationDomain::::new(1 << 3).unwrap(); + let domain_h_elems = domain_h + .elements() + .collect::>(); + let alternator_poly_evals = domain_k + .elements() + .map(|e| { + if domain_h_elems.contains(&e) { + Fr::one() + } else { + Fr::zero() + } + }) + .collect(); + let v_k: DenseOrSparsePolynomial<_> = domain_k.vanishing_polynomial().into(); + let v_h: DenseOrSparsePolynomial<_> = domain_h.vanishing_polynomial().into(); + let (divisor, remainder) = v_k.divide_with_q_and_r(&v_h).unwrap(); + assert!(remainder.is_zero()); + println!("Divisor: {:?}", divisor); + println!( + "{:#?}", + divisor + .coeffs + .iter() + .filter_map(|f| if !f.is_zero() { + Some(f.into_repr()) + } else { + None + }) + .collect::>() + ); + + for e in domain_h.elements() { + println!("{:?}", divisor.evaluate(&e)); + } + // Let p = v_K / v_H; + // The alternator polynomial is p * t, where t is defined as + // the LDE of p(h)^{-1} for all h in H. + // + // Because for each h in H, p(h) equals a constant c, we have that t + // is the constant polynomial c^{-1}. + // + // Q: what is the constant c? Why is p(h) constant? What is the easiest + // way to calculate c? + let alternator_poly = + Evaluations::from_vec_and_domain(alternator_poly_evals, domain_k).interpolate(); + let (quotient, remainder) = DenseOrSparsePolynomial::from(alternator_poly.clone()) + .divide_with_q_and_r(&DenseOrSparsePolynomial::from(divisor)) + .unwrap(); + assert!(remainder.is_zero()); + println!("quotient: {:?}", quotient); + println!( + "{:#?}", + quotient + .coeffs + .iter() + .filter_map(|f| if !f.is_zero() { + Some(f.into_repr()) + } else { + None + }) + .collect::>() + ); + + println!("{:?}", alternator_poly); + } +} diff --git a/arkworks/marlin/src/ahp/prover.rs b/arkworks/marlin/src/ahp/prover.rs new file mode 100644 index 00000000..469ccfe6 --- /dev/null +++ b/arkworks/marlin/src/ahp/prover.rs @@ -0,0 +1,730 @@ +#![allow(non_snake_case)] + +use crate::ahp::indexer::*; +use crate::ahp::verifier::*; +use crate::ahp::*; + +use crate::ahp::constraint_systems::{ + make_matrices_square_for_prover, pad_input_for_indexer_and_prover, unformat_public_input, +}; +use crate::{ToString, Vec}; +use ark_ff::{Field, PrimeField, Zero}; +use ark_poly::{ + univariate::DensePolynomial, EvaluationDomain, Evaluations as EvaluationsOnDomain, + GeneralEvaluationDomain, Polynomial, UVPolynomial, +}; +use ark_relations::r1cs::{ + ConstraintSynthesizer, ConstraintSystem, OptimizationGoal, SynthesisError, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::rand::RngCore; +use ark_std::{ + cfg_into_iter, cfg_iter, cfg_iter_mut, + io::{Read, Write}, +}; + +/// State for the AHP prover. +pub struct ProverState<'a, F: PrimeField> { + formatted_input_assignment: Vec, + witness_assignment: Vec, + /// Az + z_a: Option>, + /// Bz + z_b: Option>, + /// query bound b + zk_bound: usize, + + w_poly: Option>, + mz_polys: Option<(LabeledPolynomial, LabeledPolynomial)>, + + index: &'a Index, + + /// the random values sent by the verifier in the first round + verifier_first_msg: Option>, + + /// the blinding polynomial for the first round + mask_poly: Option>, + + /// domain X, sized for the public input + domain_x: GeneralEvaluationDomain, + + /// domain H, sized for constraints + domain_h: GeneralEvaluationDomain, + + /// domain K, sized for matrix nonzero elements + domain_k: GeneralEvaluationDomain, +} + +impl<'a, F: PrimeField> ProverState<'a, F> { + /// Get the public input. + pub fn public_input(&self) -> Vec { + unformat_public_input(&self.formatted_input_assignment) + } +} + +/// Each prover message that is not a list of oracles is a list of field elements. +#[derive(Clone)] +pub enum ProverMsg { + /// Some rounds, the prover sends only oracles. (This is actually the case for all + /// rounds in Marlin.) + EmptyMessage, + /// Otherwise, it's one or more field elements. + FieldElements(Vec), +} + +impl ark_ff::ToBytes for ProverMsg { + fn write(&self, w: W) -> ark_std::io::Result<()> { + match self { + ProverMsg::EmptyMessage => Ok(()), + ProverMsg::FieldElements(field_elems) => field_elems.write(w), + } + } +} + +impl CanonicalSerialize for ProverMsg { + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + let res: Option> = match self { + ProverMsg::EmptyMessage => None, + ProverMsg::FieldElements(v) => Some(v.clone()), + }; + res.serialize(&mut writer) + } + + fn serialized_size(&self) -> usize { + let res: Option> = match self { + ProverMsg::EmptyMessage => None, + ProverMsg::FieldElements(v) => Some(v.clone()), + }; + res.serialized_size() + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + let res: Option> = match self { + ProverMsg::EmptyMessage => None, + ProverMsg::FieldElements(v) => Some(v.clone()), + }; + res.serialize_unchecked(&mut writer) + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + let res: Option> = match self { + ProverMsg::EmptyMessage => None, + ProverMsg::FieldElements(v) => Some(v.clone()), + }; + res.serialize_uncompressed(&mut writer) + } + + fn uncompressed_size(&self) -> usize { + let res: Option> = match self { + ProverMsg::EmptyMessage => None, + ProverMsg::FieldElements(v) => Some(v.clone()), + }; + res.uncompressed_size() + } +} + +impl CanonicalDeserialize for ProverMsg { + fn deserialize(mut reader: R) -> Result { + let res = Option::>::deserialize(&mut reader)?; + + if let Some(res) = res { + Ok(ProverMsg::FieldElements(res)) + } else { + Ok(ProverMsg::EmptyMessage) + } + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let res = Option::>::deserialize_unchecked(&mut reader)?; + + if let Some(res) = res { + Ok(ProverMsg::FieldElements(res)) + } else { + Ok(ProverMsg::EmptyMessage) + } + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let res = Option::>::deserialize_uncompressed(&mut reader)?; + + if let Some(res) = res { + Ok(ProverMsg::FieldElements(res)) + } else { + Ok(ProverMsg::EmptyMessage) + } + } +} + +/// The first set of prover oracles. +pub struct ProverFirstOracles { + /// The LDE of `w`. + pub w: LabeledPolynomial, + /// The LDE of `Az`. + pub z_a: LabeledPolynomial, + /// The LDE of `Bz`. + pub z_b: LabeledPolynomial, + /// The sum-check hiding polynomial. + pub mask_poly: LabeledPolynomial, +} + +impl ProverFirstOracles { + /// Iterate over the polynomials output by the prover in the first round. + pub fn iter(&self) -> impl Iterator> { + vec![&self.w, &self.z_a, &self.z_b, &self.mask_poly].into_iter() + } +} + +/// The second set of prover oracles. +pub struct ProverSecondOracles { + /// The polynomial `t` that is produced in the first round. + pub t: LabeledPolynomial, + /// The polynomial `g` resulting from the first sumcheck. + pub g_1: LabeledPolynomial, + /// The polynomial `h` resulting from the first sumcheck. + pub h_1: LabeledPolynomial, +} + +impl ProverSecondOracles { + /// Iterate over the polynomials output by the prover in the second round. + pub fn iter(&self) -> impl Iterator> { + vec![&self.t, &self.g_1, &self.h_1].into_iter() + } +} + +/// The third set of prover oracles. +pub struct ProverThirdOracles { + /// The polynomial `g` resulting from the second sumcheck. + pub g_2: LabeledPolynomial, + /// The polynomial `h` resulting from the second sumcheck. + pub h_2: LabeledPolynomial, +} + +impl ProverThirdOracles { + /// Iterate over the polynomials output by the prover in the third round. + pub fn iter(&self) -> impl Iterator> { + vec![&self.g_2, &self.h_2].into_iter() + } +} + +impl AHPForR1CS { + /// Initialize the AHP prover. + pub fn prover_init<'a, C: ConstraintSynthesizer>( + index: &'a Index, + c: C, + ) -> Result, Error> { + let init_time = start_timer!(|| "AHP::Prover::Init"); + + let constraint_time = start_timer!(|| "Generating constraints and witnesses"); + let pcs = ConstraintSystem::new_ref(); + pcs.set_optimization_goal(OptimizationGoal::Weight); + pcs.set_mode(ark_relations::r1cs::SynthesisMode::Prove { + construct_matrices: true, + }); + c.generate_constraints(pcs.clone())?; + end_timer!(constraint_time); + + let padding_time = start_timer!(|| "Padding matrices to make them square"); + pad_input_for_indexer_and_prover(pcs.clone()); + pcs.finalize(); + make_matrices_square_for_prover(pcs.clone()); + end_timer!(padding_time); + + let num_non_zero = index.index_info.num_non_zero; + + let (formatted_input_assignment, witness_assignment, num_constraints) = { + let pcs = pcs.borrow().unwrap(); + ( + pcs.instance_assignment.as_slice().to_vec(), + pcs.witness_assignment.as_slice().to_vec(), + pcs.num_constraints, + ) + }; + + let num_input_variables = formatted_input_assignment.len(); + let num_witness_variables = witness_assignment.len(); + if index.index_info.num_constraints != num_constraints + || num_input_variables + num_witness_variables != index.index_info.num_variables + { + return Err(Error::InstanceDoesNotMatchIndex); + } + + if !Self::formatted_public_input_is_admissible(&formatted_input_assignment) { + return Err(Error::InvalidPublicInputLength); + } + + // Perform matrix multiplications + let inner_prod_fn = |row: &[(F, usize)]| { + let mut acc = F::zero(); + for &(ref coeff, i) in row { + let tmp = if i < num_input_variables { + formatted_input_assignment[i] + } else { + witness_assignment[i - num_input_variables] + }; + + acc += &(if coeff.is_one() { tmp } else { tmp * coeff }); + } + acc + }; + + let eval_z_a_time = start_timer!(|| "Evaluating z_A"); + let z_a = index.a.iter().map(|row| inner_prod_fn(row)).collect(); + end_timer!(eval_z_a_time); + + let eval_z_b_time = start_timer!(|| "Evaluating z_B"); + let z_b = index.b.iter().map(|row| inner_prod_fn(row)).collect(); + end_timer!(eval_z_b_time); + + let zk_bound = 1; // One query is sufficient for our desired soundness + + let domain_h = GeneralEvaluationDomain::new(num_constraints) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + let domain_k = GeneralEvaluationDomain::new(num_non_zero) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + let domain_x = GeneralEvaluationDomain::new(num_input_variables) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + end_timer!(init_time); + + Ok(ProverState { + formatted_input_assignment, + witness_assignment, + z_a: Some(z_a), + z_b: Some(z_b), + w_poly: None, + mz_polys: None, + zk_bound, + index, + verifier_first_msg: None, + mask_poly: None, + domain_h, + domain_k, + domain_x, + }) + } + + /// Output the first round message and the next state. + pub fn prover_first_round<'a, R: RngCore>( + mut state: ProverState<'a, F>, + rng: &mut R, + ) -> Result<(ProverMsg, ProverFirstOracles, ProverState<'a, F>), Error> { + let round_time = start_timer!(|| "AHP::Prover::FirstRound"); + let domain_h = state.domain_h; + let zk_bound = state.zk_bound; + + let v_H = domain_h.vanishing_polynomial().into(); + + let x_time = start_timer!(|| "Computing x polynomial and evals"); + let domain_x = state.domain_x; + let x_poly = EvaluationsOnDomain::from_vec_and_domain( + state.formatted_input_assignment.clone(), + domain_x, + ) + .interpolate(); + let x_evals = domain_h.fft(&x_poly); + end_timer!(x_time); + + let ratio = domain_h.size() / domain_x.size(); + + let mut w_extended = state.witness_assignment.clone(); + w_extended.extend(vec![ + F::zero(); + domain_h.size() + - domain_x.size() + - state.witness_assignment.len() + ]); + + let w_poly_time = start_timer!(|| "Computing w polynomial"); + let w_poly_evals = cfg_into_iter!(0..domain_h.size()) + .map(|k| { + if k % ratio == 0 { + F::zero() + } else { + w_extended[k - (k / ratio) - 1] - &x_evals[k] + } + }) + .collect(); + + let w_poly = &EvaluationsOnDomain::from_vec_and_domain(w_poly_evals, domain_h) + .interpolate() + + &(&DensePolynomial::from_coefficients_slice(&[F::rand(rng)]) * &v_H); + let (w_poly, remainder) = w_poly.divide_by_vanishing_poly(domain_x).unwrap(); + assert!(remainder.is_zero()); + end_timer!(w_poly_time); + + let z_a_poly_time = start_timer!(|| "Computing z_A polynomial"); + let z_a = state.z_a.clone().unwrap(); + let z_a_poly = &EvaluationsOnDomain::from_vec_and_domain(z_a, domain_h).interpolate() + + &(&DensePolynomial::from_coefficients_slice(&[F::rand(rng)]) * &v_H); + end_timer!(z_a_poly_time); + + let z_b_poly_time = start_timer!(|| "Computing z_B polynomial"); + let z_b = state.z_b.clone().unwrap(); + let z_b_poly = &EvaluationsOnDomain::from_vec_and_domain(z_b, domain_h).interpolate() + + &(&DensePolynomial::from_coefficients_slice(&[F::rand(rng)]) * &v_H); + end_timer!(z_b_poly_time); + + let mask_poly_time = start_timer!(|| "Computing mask polynomial"); + let mask_poly_degree = 3 * domain_h.size() + 2 * zk_bound - 3; + let mut mask_poly = DensePolynomial::rand(mask_poly_degree, rng); + let scaled_sigma_1 = (mask_poly.divide_by_vanishing_poly(domain_h).unwrap().1)[0]; + mask_poly[0] -= &scaled_sigma_1; + end_timer!(mask_poly_time); + + let msg = ProverMsg::EmptyMessage; + + assert!(w_poly.degree() < domain_h.size() - domain_x.size() + zk_bound); + assert!(z_a_poly.degree() < domain_h.size() + zk_bound); + assert!(z_b_poly.degree() < domain_h.size() + zk_bound); + assert!(mask_poly.degree() <= 3 * domain_h.size() + 2 * zk_bound - 3); + + let w = LabeledPolynomial::new("w".to_string(), w_poly, None, Some(1)); + let z_a = LabeledPolynomial::new("z_a".to_string(), z_a_poly, None, Some(1)); + let z_b = LabeledPolynomial::new("z_b".to_string(), z_b_poly, None, Some(1)); + let mask_poly = + LabeledPolynomial::new("mask_poly".to_string(), mask_poly.clone(), None, None); + + let oracles = ProverFirstOracles { + w: w.clone(), + z_a: z_a.clone(), + z_b: z_b.clone(), + mask_poly: mask_poly.clone(), + }; + + state.w_poly = Some(w); + state.mz_polys = Some((z_a, z_b)); + state.mask_poly = Some(mask_poly); + end_timer!(round_time); + + Ok((msg, oracles, state)) + } + + fn calculate_t<'a>( + matrices: impl Iterator>, + matrix_randomizers: &[F], + input_domain: GeneralEvaluationDomain, + domain_h: GeneralEvaluationDomain, + r_alpha_x_on_h: Vec, + ) -> DensePolynomial { + let mut t_evals_on_h = vec![F::zero(); domain_h.size()]; + for (matrix, eta) in matrices.zip(matrix_randomizers) { + for (r, row) in matrix.iter().enumerate() { + for (coeff, c) in row.iter() { + let index = domain_h.reindex_by_subdomain(input_domain, *c); + t_evals_on_h[index] += *eta * coeff * r_alpha_x_on_h[r]; + } + } + } + EvaluationsOnDomain::from_vec_and_domain(t_evals_on_h, domain_h).interpolate() + } + + /// Output the number of oracles sent by the prover in the first round. + pub fn prover_num_first_round_oracles() -> usize { + 4 + } + + /// Output the degree bounds of oracles in the first round. + pub fn prover_first_round_degree_bounds( + _info: &IndexInfo, + ) -> impl Iterator> { + vec![None; 4].into_iter() + } + + /// Output the second round message and the next state. + pub fn prover_second_round<'a, R: RngCore>( + ver_message: &VerifierFirstMsg, + mut state: ProverState<'a, F>, + _r: &mut R, + ) -> (ProverMsg, ProverSecondOracles, ProverState<'a, F>) { + let round_time = start_timer!(|| "AHP::Prover::SecondRound"); + + let domain_h = state.domain_h; + let zk_bound = state.zk_bound; + + let mask_poly = state + .mask_poly + .as_ref() + .expect("ProverState should include mask_poly when prover_second_round is called"); + + let VerifierFirstMsg { + alpha, + eta_a, + eta_b, + eta_c, + } = *ver_message; + + let summed_z_m_poly_time = start_timer!(|| "Compute z_m poly"); + let (z_a_poly, z_b_poly) = state.mz_polys.as_ref().unwrap(); + let z_c_poly = z_a_poly.polynomial() * z_b_poly.polynomial(); + + let mut summed_z_m_coeffs = z_c_poly.coeffs; + // Note: Can't combine these two loops, because z_c_poly has 2x the degree + // of z_a_poly and z_b_poly, so the second loop gets truncated due to + // the `zip`s. + cfg_iter_mut!(summed_z_m_coeffs).for_each(|c| *c *= &eta_c); + cfg_iter_mut!(summed_z_m_coeffs) + .zip(&z_a_poly.polynomial().coeffs) + .zip(&z_b_poly.polynomial().coeffs) + .for_each(|((c, a), b)| *c += &(eta_a * a + &(eta_b * b))); + + let summed_z_m = DensePolynomial::from_coefficients_vec(summed_z_m_coeffs); + end_timer!(summed_z_m_poly_time); + + let r_alpha_x_evals_time = start_timer!(|| "Compute r_alpha_x evals"); + let r_alpha_x_evals = + domain_h.batch_eval_unnormalized_bivariate_lagrange_poly_with_diff_inputs(alpha); + end_timer!(r_alpha_x_evals_time); + + let r_alpha_poly_time = start_timer!(|| "Compute r_alpha_x poly"); + let r_alpha_poly = DensePolynomial::from_coefficients_vec(domain_h.ifft(&r_alpha_x_evals)); + end_timer!(r_alpha_poly_time); + + let t_poly_time = start_timer!(|| "Compute t poly"); + let t_poly = Self::calculate_t( + vec![&state.index.a, &state.index.b, &state.index.c].into_iter(), + &[eta_a, eta_b, eta_c], + state.domain_x, + state.domain_h, + r_alpha_x_evals.to_vec(), + ); + end_timer!(t_poly_time); + + let z_poly_time = start_timer!(|| "Compute z poly"); + + let domain_x = GeneralEvaluationDomain::new(state.formatted_input_assignment.len()) + .ok_or(SynthesisError::PolynomialDegreeTooLarge) + .unwrap(); + let x_poly = EvaluationsOnDomain::from_vec_and_domain( + state.formatted_input_assignment.clone(), + domain_x, + ) + .interpolate(); + let w_poly = state.w_poly.as_ref().unwrap(); + let mut z_poly = w_poly.polynomial().mul_by_vanishing_poly(domain_x); + cfg_iter_mut!(z_poly.coeffs) + .zip(&x_poly.coeffs) + .for_each(|(z, x)| *z += x); + assert!(z_poly.degree() < domain_h.size() + zk_bound); + + end_timer!(z_poly_time); + + let q_1_time = start_timer!(|| "Compute q_1 poly"); + + let mul_domain_size = *[ + mask_poly.len(), + r_alpha_poly.coeffs.len() + summed_z_m.coeffs.len(), + t_poly.coeffs.len() + z_poly.len(), + ] + .iter() + .max() + .unwrap(); + let mul_domain = GeneralEvaluationDomain::new(mul_domain_size) + .expect("field is not smooth enough to construct domain"); + let mut r_alpha_evals = r_alpha_poly.evaluate_over_domain_by_ref(mul_domain); + let summed_z_m_evals = summed_z_m.evaluate_over_domain_by_ref(mul_domain); + let z_poly_evals = z_poly.evaluate_over_domain_by_ref(mul_domain); + let t_poly_m_evals = t_poly.evaluate_over_domain_by_ref(mul_domain); + + cfg_iter_mut!(r_alpha_evals.evals) + .zip(&summed_z_m_evals.evals) + .zip(&z_poly_evals.evals) + .zip(&t_poly_m_evals.evals) + .for_each(|(((a, b), &c), d)| { + *a *= b; + *a -= c * d; + }); + let rhs = r_alpha_evals.interpolate(); + let q_1 = mask_poly.polynomial() + &rhs; + end_timer!(q_1_time); + + let sumcheck_time = start_timer!(|| "Compute sumcheck h and g polys"); + let (h_1, x_g_1) = q_1.divide_by_vanishing_poly(domain_h).unwrap(); + let g_1 = DensePolynomial::from_coefficients_slice(&x_g_1.coeffs[1..]); + end_timer!(sumcheck_time); + + let msg = ProverMsg::EmptyMessage; + + assert!(g_1.degree() <= domain_h.size() - 2); + assert!(h_1.degree() <= 2 * domain_h.size() + 2 * zk_bound - 2); + + let oracles = ProverSecondOracles { + t: LabeledPolynomial::new("t".into(), t_poly, None, None), + g_1: LabeledPolynomial::new("g_1".into(), g_1, Some(domain_h.size() - 2), Some(1)), + h_1: LabeledPolynomial::new("h_1".into(), h_1, None, None), + }; + + state.w_poly = None; + state.verifier_first_msg = Some(*ver_message); + end_timer!(round_time); + + (msg, oracles, state) + } + + /// Output the number of oracles sent by the prover in the second round. + pub fn prover_num_second_round_oracles() -> usize { + 3 + } + + /// Output the degree bounds of oracles in the second round. + pub fn prover_second_round_degree_bounds( + info: &IndexInfo, + ) -> impl Iterator> { + let h_domain_size = + GeneralEvaluationDomain::::compute_size_of_domain(info.num_constraints).unwrap(); + + vec![None, Some(h_domain_size - 2), None].into_iter() + } + + /// Output the third round message and the next state. + pub fn prover_third_round<'a, R: RngCore>( + ver_message: &VerifierSecondMsg, + prover_state: ProverState<'a, F>, + _r: &mut R, + ) -> Result<(ProverMsg, ProverThirdOracles), Error> { + let round_time = start_timer!(|| "AHP::Prover::ThirdRound"); + + let ProverState { + index, + verifier_first_msg, + domain_h, + domain_k, + .. + } = prover_state; + + let VerifierFirstMsg { + eta_a, + eta_b, + eta_c, + alpha, + } = verifier_first_msg.expect( + "ProverState should include verifier_first_msg when prover_third_round is called", + ); + + let beta = ver_message.beta; + + let v_H_at_alpha = domain_h.evaluate_vanishing_polynomial(alpha); + let v_H_at_beta = domain_h.evaluate_vanishing_polynomial(beta); + + let (a_star, b_star, c_star) = ( + &index.a_star_arith, + &index.b_star_arith, + &index.c_star_arith, + ); + + let f_evals_time = start_timer!(|| "Computing f evals on K"); + let mut f_vals_on_K = Vec::with_capacity(domain_k.size()); + let mut inverses_a = Vec::with_capacity(domain_k.size()); + let mut inverses_b = Vec::with_capacity(domain_k.size()); + let mut inverses_c = Vec::with_capacity(domain_k.size()); + + for i in 0..domain_k.size() { + inverses_a.push((beta - a_star.evals_on_K.row[i]) * (alpha - a_star.evals_on_K.col[i])); + inverses_b.push((beta - b_star.evals_on_K.row[i]) * (alpha - b_star.evals_on_K.col[i])); + inverses_c.push((beta - c_star.evals_on_K.row[i]) * (alpha - c_star.evals_on_K.col[i])); + } + ark_ff::batch_inversion(&mut inverses_a); + ark_ff::batch_inversion(&mut inverses_b); + ark_ff::batch_inversion(&mut inverses_c); + + for i in 0..domain_k.size() { + let t = eta_a * a_star.evals_on_K.val[i] * inverses_a[i] + + eta_b * b_star.evals_on_K.val[i] * inverses_b[i] + + eta_c * c_star.evals_on_K.val[i] * inverses_c[i]; + let f_at_kappa = v_H_at_beta * v_H_at_alpha * t; + f_vals_on_K.push(f_at_kappa); + } + end_timer!(f_evals_time); + + let f_poly_time = start_timer!(|| "Computing f poly"); + let f = EvaluationsOnDomain::from_vec_and_domain(f_vals_on_K, domain_k).interpolate(); + end_timer!(f_poly_time); + + let g_2 = DensePolynomial::from_coefficients_slice(&f.coeffs[1..]); + + let domain_b = GeneralEvaluationDomain::::new(3 * domain_k.size() - 3) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + let denom_eval_time = start_timer!(|| "Computing denominator evals on B"); + let a_denom: Vec<_> = cfg_iter!(a_star.evals_on_B.row.evals) + .zip(&a_star.evals_on_B.col.evals) + .zip(&a_star.row_col_evals_on_B.evals) + .map(|((&r, c), r_c)| beta * alpha - (r * alpha) - (beta * c) + r_c) + .collect(); + + let b_denom: Vec<_> = cfg_iter!(b_star.evals_on_B.row.evals) + .zip(&b_star.evals_on_B.col.evals) + .zip(&b_star.row_col_evals_on_B.evals) + .map(|((&r, c), r_c)| beta * alpha - (r * alpha) - (beta * c) + r_c) + .collect(); + + let c_denom: Vec<_> = cfg_iter!(c_star.evals_on_B.row.evals) + .zip(&c_star.evals_on_B.col.evals) + .zip(&c_star.row_col_evals_on_B.evals) + .map(|((&r, c), r_c)| beta * alpha - (r * alpha) - (beta * c) + r_c) + .collect(); + end_timer!(denom_eval_time); + + let a_evals_time = start_timer!(|| "Computing a evals on B"); + let a_star_evals_on_B = &a_star.evals_on_B; + let b_star_evals_on_B = &b_star.evals_on_B; + let c_star_evals_on_B = &c_star.evals_on_B; + let a_poly_on_B = cfg_into_iter!(0..domain_b.size()) + .map(|i| { + let t = eta_a * a_star_evals_on_B.val.evals[i] * b_denom[i] * c_denom[i] + + eta_b * b_star_evals_on_B.val.evals[i] * a_denom[i] * c_denom[i] + + eta_c * c_star_evals_on_B.val.evals[i] * a_denom[i] * b_denom[i]; + v_H_at_beta * v_H_at_alpha * t + }) + .collect(); + end_timer!(a_evals_time); + + let a_poly_time = start_timer!(|| "Computing a poly"); + let a_poly = EvaluationsOnDomain::from_vec_and_domain(a_poly_on_B, domain_b).interpolate(); + end_timer!(a_poly_time); + + let b_evals_time = start_timer!(|| "Computing b evals on B"); + let b_poly_on_B = cfg_into_iter!(0..domain_b.size()) + .map(|i| a_denom[i] * b_denom[i] * c_denom[i]) + .collect(); + end_timer!(b_evals_time); + + let b_poly_time = start_timer!(|| "Computing b poly"); + let b_poly = EvaluationsOnDomain::from_vec_and_domain(b_poly_on_B, domain_b).interpolate(); + end_timer!(b_poly_time); + + let h_2_poly_time = start_timer!(|| "Computing sumcheck h poly"); + let h_2 = (&a_poly - &(&b_poly * &f)) + .divide_by_vanishing_poly(domain_k) + .unwrap() + .0; + end_timer!(h_2_poly_time); + + let msg = ProverMsg::EmptyMessage; + + assert!(g_2.degree() <= domain_k.size() - 2); + let oracles = ProverThirdOracles { + g_2: LabeledPolynomial::new("g_2".to_string(), g_2, Some(domain_k.size() - 2), None), + h_2: LabeledPolynomial::new("h_2".to_string(), h_2, None, None), + }; + end_timer!(round_time); + + Ok((msg, oracles)) + } + + /// Output the number of oracles sent by the prover in the third round. + pub fn prover_num_third_round_oracles() -> usize { + 3 + } + + /// Output the degree bounds of oracles in the third round. + pub fn prover_third_round_degree_bounds( + info: &IndexInfo, + ) -> impl Iterator> { + let num_non_zero = info.num_non_zero; + let k_size = GeneralEvaluationDomain::::compute_size_of_domain(num_non_zero).unwrap(); + + vec![Some(k_size - 2), None].into_iter() + } +} diff --git a/arkworks/marlin/src/ahp/verifier.rs b/arkworks/marlin/src/ahp/verifier.rs new file mode 100644 index 00000000..50fbe9b2 --- /dev/null +++ b/arkworks/marlin/src/ahp/verifier.rs @@ -0,0 +1,210 @@ +#![allow(non_snake_case)] + +use crate::ahp::indexer::IndexInfo; +use crate::ahp::*; +use ark_std::rand::RngCore; + +use ark_ff::PrimeField; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use ark_poly_commit::QuerySet; + +/// State of the AHP verifier +pub struct VerifierState { + pub(crate) domain_h: GeneralEvaluationDomain, + pub(crate) domain_k: GeneralEvaluationDomain, + + pub(crate) first_round_msg: Option>, + pub(crate) second_round_msg: Option>, + + pub(crate) gamma: Option, +} + +/// First message of the verifier. +#[derive(Copy, Clone)] +pub struct VerifierFirstMsg { + /// Query for the random polynomial. + pub alpha: F, + /// Randomizer for the lincheck for `A`. + pub eta_a: F, + /// Randomizer for the lincheck for `B`. + pub eta_b: F, + /// Randomizer for the lincheck for `C`. + pub eta_c: F, +} + +/// Second verifier message. +#[derive(Copy, Clone)] +pub struct VerifierSecondMsg { + /// Query for the second round of polynomials. + pub beta: F, +} + +impl AHPForR1CS { + /// Output the first message and next round state. + pub fn verifier_first_round( + index_info: IndexInfo, + rng: &mut R, + ) -> Result<(VerifierFirstMsg, VerifierState), Error> { + if index_info.num_constraints != index_info.num_variables { + return Err(Error::NonSquareMatrix); + } + + let domain_h = GeneralEvaluationDomain::new(index_info.num_constraints) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + let domain_k = GeneralEvaluationDomain::new(index_info.num_non_zero) + .ok_or(SynthesisError::PolynomialDegreeTooLarge)?; + + let alpha = domain_h.sample_element_outside_domain(rng); + let eta_a = F::rand(rng); + let eta_b = F::rand(rng); + let eta_c = F::rand(rng); + + let msg = VerifierFirstMsg { + alpha, + eta_a, + eta_b, + eta_c, + }; + + let new_state = VerifierState { + domain_h, + domain_k, + first_round_msg: Some(msg), + second_round_msg: None, + gamma: None, + }; + + Ok((msg, new_state)) + } + + /// Output the second message and next round state. + pub fn verifier_second_round( + mut state: VerifierState, + rng: &mut R, + ) -> (VerifierSecondMsg, VerifierState) { + let beta = state.domain_h.sample_element_outside_domain(rng); + let msg = VerifierSecondMsg { beta }; + state.second_round_msg = Some(msg); + + (msg, state) + } + + /// Output the third message and next round state. + pub fn verifier_third_round( + mut state: VerifierState, + rng: &mut R, + ) -> VerifierState { + state.gamma = Some(F::rand(rng)); + state + } + + /// Output the query state and next round state. + pub fn verifier_query_set<'a, R: RngCore>( + state: VerifierState, + _: &'a mut R, + ) -> (QuerySet, VerifierState) { + let beta = state.second_round_msg.unwrap().beta; + + let gamma = state.gamma.unwrap(); + + let mut query_set = QuerySet::new(); + // For the first linear combination + // Outer sumcheck test: + // s(beta) + r(alpha, beta) * (sum_M eta_M z_M(beta)) - t(beta) * z(beta) + // = h_1(beta) * v_H(beta) + beta * g_1(beta) + // + // Note that z is the interpolation of x || w, so it equals x + v_X * w + // We also use an optimization: instead of explicitly calculating z_c, we + // use the "virtual oracle" z_b * z_c + // + // LinearCombination::new( + // outer_sumcheck + // vec![ + // (F::one(), "mask_poly".into()), + // + // (r_alpha_at_beta * (eta_a + eta_c * z_b_at_beta), "z_a".into()), + // (r_alpha_at_beta * eta_b * z_b_at_beta, LCTerm::One), + // + // (-t_at_beta * v_X_at_beta, "w".into()), + // (-t_at_beta * x_at_beta, LCTerm::One), + // + // (-v_H_at_beta, "h_1".into()), + // (-beta * g_1_at_beta, LCTerm::One), + // ], + // ) + // LinearCombination::new("z_b", vec![(F::one(), z_b)]) + // LinearCombination::new("g_1", vec![(F::one(), g_1)], rhs::new(g_1_at_beta)) + // LinearCombination::new("t", vec![(F::one(), t)]) + query_set.insert(("g_1".into(), ("beta".into(), beta))); + query_set.insert(("z_b".into(), ("beta".into(), beta))); + query_set.insert(("t".into(), ("beta".into(), beta))); + query_set.insert(("outer_sumcheck".into(), ("beta".into(), beta))); + + // For the second linear combination + // Inner sumcheck test: + // h_2(gamma) * v_K(gamma) + // = a(gamma) - b(gamma) * (gamma g_2(gamma) + t(beta) / |K|) + // + // where + // a(X) := sum_M (eta_M v_H(beta) v_H(alpha) val_M(X) prod_N (beta - row_N(X)) (alpha - col_N(X))) + // b(X) := prod_M (beta - row_M(X)) (alpha - col_M(X)) + // + // We define "n_denom" := prod_N (beta - row_N(X)) (alpha - col_N(X))) + // + // LinearCombination::new("g_2", vec![(F::one(), g_2)]); + // + // LinearCombination::new( + // "a_denom".into(), + // vec![ + // (alpha * beta, LCTerm::One), + // (-alpha, "a_row"), + // (-beta, "a_col"), + // (F::one(), "a_row_col"), + // ]); + // LinearCombination::new( + // "b_denom".into(), + // vec![ + // (alpha * beta, LCTerm::One), + // (-alpha, "b_row"), + // (-beta, "b_col"), + // (F::one(), "b_row_col"), + // ]); + // LinearCombination::new( + // "c_denom".into(), + // vec![ + // (alpha * beta, LCTerm::one()), + // (-alpha, "c_row"), + // (-beta, "c_col"), + // (F::one(), "c_row_col"), + // ]); + // + // LinearCombination::new( + // "a_poly".into(), + // vec![ + // (eta_a * b_denom_at_gamma * c_denom_at_gamma, "a_val".into()), + // (eta_b * a_denom_at_gamma * c_denom_at_gamma, "b_val".into()), + // (eta_c * b_denom_at_gamma * a_denom_at_gamma, "c_val".into()), + // ], + // ) + // + // let v_H_at_alpha = domain_h.evaluate_vanishing_polynomial(alpha); + // let v_H_at_beta = domain_h.evaluate_vanishing_polynomial(beta); + // let v_K_at_gamma = domain_k.evaluate_vanishing_polynomial(gamma); + // + // let a_poly_lc *= v_H_at_alpha * v_H_at_beta; + // let b_lc = LinearCombination::new("b_poly", vec![(a_denom_at_gamma * b_denom_at_gamma * c_denom_at_gamma, "one")]); + // let h_lc = LinearCombination::new("b_poly", vec![(v_K_at_gamma, "h_2")]); + // + // // This LC is the only one that is evaluated: + // let inner_sumcheck = a_poly_lc - (b_lc * (gamma * &g_2_at_gamma + &(t_at_beta / &k_size))) - h_lc + // main_lc.set_label("inner_sumcheck"); + query_set.insert(("g_2".into(), ("gamma".into(), gamma))); + query_set.insert(("a_denom".into(), ("gamma".into(), gamma))); + query_set.insert(("b_denom".into(), ("gamma".into(), gamma))); + query_set.insert(("c_denom".into(), ("gamma".into(), gamma))); + query_set.insert(("inner_sumcheck".into(), ("gamma".into(), gamma))); + + (query_set, state) + } +} diff --git a/arkworks/marlin/src/data_structures.rs b/arkworks/marlin/src/data_structures.rs new file mode 100644 index 00000000..553527f6 --- /dev/null +++ b/arkworks/marlin/src/data_structures.rs @@ -0,0 +1,196 @@ +use crate::ahp::indexer::*; +use crate::ahp::prover::ProverMsg; +use crate::Vec; +use ark_ff::PrimeField; +use ark_poly::univariate::DensePolynomial; +use ark_poly_commit::{BatchLCProof, PolynomialCommitment}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + format, + io::{Read, Write}, +}; + +/* ************************************************************************* */ +/* ************************************************************************* */ +/* ************************************************************************* */ + +/// The universal public parameters for the argument system. +pub type UniversalSRS = >>::UniversalParams; + +/* ************************************************************************* */ +/* ************************************************************************* */ +/* ************************************************************************* */ + +/// Verification key for a specific index (i.e., R1CS matrices). +#[derive(CanonicalSerialize, CanonicalDeserialize)] +pub struct IndexVerifierKey>> { + /// Stores information about the size of the index, as well as its field of + /// definition. + pub index_info: IndexInfo, + /// Commitments to the indexed polynomials. + pub index_comms: Vec, + /// The verifier key for this index, trimmed from the universal SRS. + pub verifier_key: PC::VerifierKey, +} + +impl>> ark_ff::ToBytes + for IndexVerifierKey +{ + fn write(&self, mut w: W) -> ark_std::io::Result<()> { + self.index_info.write(&mut w)?; + self.index_comms.write(&mut w) + } +} + +impl>> Clone + for IndexVerifierKey +{ + fn clone(&self) -> Self { + Self { + index_comms: self.index_comms.clone(), + index_info: self.index_info.clone(), + verifier_key: self.verifier_key.clone(), + } + } +} + +impl>> IndexVerifierKey { + /// Iterate over the commitments to indexed polynomials in `self`. + pub fn iter(&self) -> impl Iterator { + self.index_comms.iter() + } +} + +/* ************************************************************************* */ +/* ************************************************************************* */ +/* ************************************************************************* */ + +/// Proving key for a specific index (i.e., R1CS matrices). +#[derive(CanonicalSerialize, CanonicalDeserialize)] +pub struct IndexProverKey>> { + /// The index verifier key. + pub index_vk: IndexVerifierKey, + /// The randomness for the index polynomial commitments. + pub index_comm_rands: Vec, + /// The index itself. + pub index: Index, + /// The committer key for this index, trimmed from the universal SRS. + pub committer_key: PC::CommitterKey, +} + +impl>> Clone for IndexProverKey +where + PC::Commitment: Clone, +{ + fn clone(&self) -> Self { + Self { + index_vk: self.index_vk.clone(), + index_comm_rands: self.index_comm_rands.clone(), + index: self.index.clone(), + committer_key: self.committer_key.clone(), + } + } +} + +/* ************************************************************************* */ +/* ************************************************************************* */ +/* ************************************************************************* */ + +/// A zkSNARK proof. +#[derive(CanonicalSerialize, CanonicalDeserialize)] +pub struct Proof>> { + /// Commitments to the polynomials produced by the AHP prover. + pub commitments: Vec>, + /// Evaluations of these polynomials. + pub evaluations: Vec, + /// The field elements sent by the prover. + pub prover_messages: Vec>, + /// An evaluation proof from the polynomial commitment. + pub pc_proof: BatchLCProof, PC>, +} + +impl>> Proof { + /// Construct a new proof. + pub fn new( + commitments: Vec>, + evaluations: Vec, + prover_messages: Vec>, + pc_proof: BatchLCProof, PC>, + ) -> Self { + Self { + commitments, + evaluations, + prover_messages, + pc_proof, + } + } + + /// Prints information about the size of the proof. + pub fn print_size_info(&self) { + use ark_poly_commit::{PCCommitment, PCProof}; + + let size_of_fe_in_bytes = F::zero().into_repr().as_ref().len() * 8; + let mut num_comms_without_degree_bounds = 0; + let mut num_comms_with_degree_bounds = 0; + let mut size_bytes_comms_without_degree_bounds = 0; + let mut size_bytes_comms_with_degree_bounds = 0; + let mut size_bytes_proofs = 0; + for c in self.commitments.iter().flat_map(|c| c) { + if !c.has_degree_bound() { + num_comms_without_degree_bounds += 1; + size_bytes_comms_without_degree_bounds += c.size_in_bytes(); + } else { + num_comms_with_degree_bounds += 1; + size_bytes_comms_with_degree_bounds += c.size_in_bytes(); + } + } + + let proofs: Vec = self.pc_proof.proof.clone().into(); + let num_proofs = proofs.len(); + for proof in &proofs { + size_bytes_proofs += proof.size_in_bytes(); + } + + let num_evals = self.evaluations.len(); + let evals_size_in_bytes = num_evals * size_of_fe_in_bytes; + let num_prover_messages: usize = self + .prover_messages + .iter() + .map(|v| match v { + ProverMsg::EmptyMessage => 0, + ProverMsg::FieldElements(elems) => elems.len(), + }) + .sum(); + let prover_msg_size_in_bytes = num_prover_messages * size_of_fe_in_bytes; + let arg_size = size_bytes_comms_with_degree_bounds + + size_bytes_comms_without_degree_bounds + + size_bytes_proofs + + prover_msg_size_in_bytes + + evals_size_in_bytes; + let stats = format!( + "Argument size in bytes: {}\n\n\ + Number of commitments without degree bounds: {}\n\ + Size (in bytes) of commitments without degree bounds: {}\n\ + Number of commitments with degree bounds: {}\n\ + Size (in bytes) of commitments with degree bounds: {}\n\n\ + Number of evaluation proofs: {}\n\ + Size (in bytes) of evaluation proofs: {}\n\n\ + Number of evaluations: {}\n\ + Size (in bytes) of evaluations: {}\n\n\ + Number of field elements in prover messages: {}\n\ + Size (in bytes) of prover message: {}\n", + arg_size, + num_comms_without_degree_bounds, + size_bytes_comms_without_degree_bounds, + num_comms_with_degree_bounds, + size_bytes_comms_with_degree_bounds, + num_proofs, + size_bytes_proofs, + num_evals, + evals_size_in_bytes, + num_prover_messages, + prover_msg_size_in_bytes, + ); + add_to_trace!(|| "Statistics about proof", || stats); + } +} diff --git a/arkworks/marlin/src/error.rs b/arkworks/marlin/src/error.rs new file mode 100644 index 00000000..07f1b344 --- /dev/null +++ b/arkworks/marlin/src/error.rs @@ -0,0 +1,26 @@ +use crate::ahp::Error as AHPError; + +/// A `enum` specifying the possible failure modes of the `SNARK`. +#[derive(Debug)] +pub enum Error { + /// The index is too large for the universal public parameters. + IndexTooLarge, + /// There was an error in the underlying holographic IOP. + AHPError(AHPError), + /// There was an error in the underlying polynomial commitment. + PolynomialCommitmentError(E), +} + +impl From for Error { + fn from(err: AHPError) -> Self { + Error::AHPError(err) + } +} + +impl Error { + /// Convert an error in the underlying polynomial commitment scheme + /// to a `Error`. + pub fn from_pc_err(err: E) -> Self { + Error::PolynomialCommitmentError(err) + } +} diff --git a/arkworks/marlin/src/lib.rs b/arkworks/marlin/src/lib.rs new file mode 100644 index 00000000..20fdf0e3 --- /dev/null +++ b/arkworks/marlin/src/lib.rs @@ -0,0 +1,433 @@ +#![cfg_attr(not(feature = "std"), no_std)] +//! A crate for the Marlin preprocessing zkSNARK for R1CS. +//! +//! # Note +//! +//! Currently, Marlin only supports R1CS instances where the number of inputs +//! is the same as the number of constraints (i.e., where the constraint +//! matrices are square). Furthermore, Marlin only supports instances where the +//! public inputs are of size one less than a power of 2 (i.e., 2^n - 1). +#![deny(unused_import_braces, unused_qualifications, trivial_casts)] +#![deny(trivial_numeric_casts, private_in_public)] +#![deny(stable_features, unreachable_pub, non_shorthand_field_patterns)] +#![deny(unused_attributes, unused_imports, unused_mut, missing_docs)] +#![deny(renamed_and_removed_lints, stable_features, unused_allocation)] +#![deny(unused_comparisons, bare_trait_objects, unused_must_use)] +#![forbid(unsafe_code)] + +#[macro_use] +extern crate ark_std; + +use ark_ff::{to_bytes, PrimeField, UniformRand}; +use ark_poly::{univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain}; +use ark_poly_commit::Evaluations; +use ark_poly_commit::{LabeledCommitment, PCUniversalParams, PolynomialCommitment}; +use ark_relations::r1cs::ConstraintSynthesizer; +use ark_std::rand::RngCore; +use digest::Digest; + +use ark_std::{ + collections::BTreeMap, + format, + marker::PhantomData, + string::{String, ToString}, + vec, + vec::Vec, +}; + +#[cfg(not(feature = "std"))] +macro_rules! eprintln { + () => {}; + ($($arg: tt)*) => {}; +} + +/// Implements a Fiat-Shamir based Rng that allows one to incrementally update +/// the seed based on new messages in the proof transcript. +pub mod rng; +use rng::FiatShamirRng; + +mod error; +pub use error::*; + +mod data_structures; +pub use data_structures::*; + +/// Implements an Algebraic Holographic Proof (AHP) for the R1CS indexed relation. +pub mod ahp; +pub use ahp::AHPForR1CS; +use ahp::EvaluationsProvider; + +#[cfg(test)] +mod test; + +/// The compiled argument system. +pub struct Marlin>, D: Digest>( + #[doc(hidden)] PhantomData, + #[doc(hidden)] PhantomData, + #[doc(hidden)] PhantomData, +); + +impl>, D: Digest> Marlin { + /// The personalization string for this protocol. Used to personalize the + /// Fiat-Shamir rng. + pub const PROTOCOL_NAME: &'static [u8] = b"MARLIN-2019"; + + /// Generate the universal prover and verifier keys for the + /// argument system. + pub fn universal_setup( + num_constraints: usize, + num_variables: usize, + num_non_zero: usize, + rng: &mut R, + ) -> Result, Error> { + let max_degree = AHPForR1CS::::max_degree(num_constraints, num_variables, num_non_zero)?; + let setup_time = start_timer!(|| { + format!( + "Marlin::UniversalSetup with max_degree {}, computed for a maximum of {} constraints, {} vars, {} non_zero", + max_degree, num_constraints, num_variables, num_non_zero, + ) + }); + + let srs = PC::setup(max_degree, None, rng).map_err(Error::from_pc_err); + end_timer!(setup_time); + srs + } + + /// Generate the index-specific (i.e., circuit-specific) prover and verifier + /// keys. This is a deterministic algorithm that anyone can rerun. + pub fn index>( + srs: &UniversalSRS, + c: C, + ) -> Result<(IndexProverKey, IndexVerifierKey), Error> { + let index_time = start_timer!(|| "Marlin::Index"); + + // TODO: Add check that c is in the correct mode. + let index = AHPForR1CS::index(c)?; + if srs.max_degree() < index.max_degree() { + Err(Error::IndexTooLarge)?; + } + + let coeff_support = AHPForR1CS::get_degree_bounds(&index.index_info); + // Marlin only needs degree 2 random polynomials + let supported_hiding_bound = 1; + let (committer_key, verifier_key) = PC::trim( + &srs, + index.max_degree(), + supported_hiding_bound, + Some(&coeff_support), + ) + .map_err(Error::from_pc_err)?; + + let commit_time = start_timer!(|| "Commit to index polynomials"); + let (index_comms, index_comm_rands): (_, _) = + PC::commit(&committer_key, index.iter(), None).map_err(Error::from_pc_err)?; + end_timer!(commit_time); + + let index_comms = index_comms + .into_iter() + .map(|c| c.commitment().clone()) + .collect(); + let index_vk = IndexVerifierKey { + index_info: index.index_info, + index_comms, + verifier_key, + }; + + let index_pk = IndexProverKey { + index, + index_comm_rands, + index_vk: index_vk.clone(), + committer_key, + }; + + end_timer!(index_time); + + Ok((index_pk, index_vk)) + } + + /// Create a zkSNARK asserting that the constraint system is satisfied. + pub fn prove, R: RngCore>( + index_pk: &IndexProverKey, + c: C, + zk_rng: &mut R, + ) -> Result, Error> { + let prover_time = start_timer!(|| "Marlin::Prover"); + // Add check that c is in the correct mode. + + let prover_init_state = AHPForR1CS::prover_init(&index_pk.index, c)?; + let public_input = prover_init_state.public_input(); + let mut fs_rng = FiatShamirRng::::from_seed( + &to_bytes![&Self::PROTOCOL_NAME, &index_pk.index_vk, &public_input].unwrap(), + ); + + // -------------------------------------------------------------------- + // First round + + let (prover_first_msg, prover_first_oracles, prover_state) = + AHPForR1CS::prover_first_round(prover_init_state, zk_rng)?; + + let first_round_comm_time = start_timer!(|| "Committing to first round polys"); + let (first_comms, first_comm_rands) = PC::commit( + &index_pk.committer_key, + prover_first_oracles.iter(), + Some(zk_rng), + ) + .map_err(Error::from_pc_err)?; + end_timer!(first_round_comm_time); + + fs_rng.absorb(&to_bytes![first_comms, prover_first_msg].unwrap()); + + let (verifier_first_msg, verifier_state) = + AHPForR1CS::verifier_first_round(index_pk.index_vk.index_info, &mut fs_rng)?; + // -------------------------------------------------------------------- + + // -------------------------------------------------------------------- + // Second round + + let (prover_second_msg, prover_second_oracles, prover_state) = + AHPForR1CS::prover_second_round(&verifier_first_msg, prover_state, zk_rng); + + let second_round_comm_time = start_timer!(|| "Committing to second round polys"); + let (second_comms, second_comm_rands) = PC::commit( + &index_pk.committer_key, + prover_second_oracles.iter(), + Some(zk_rng), + ) + .map_err(Error::from_pc_err)?; + end_timer!(second_round_comm_time); + + fs_rng.absorb(&to_bytes![second_comms, prover_second_msg].unwrap()); + + let (verifier_second_msg, verifier_state) = + AHPForR1CS::verifier_second_round(verifier_state, &mut fs_rng); + // -------------------------------------------------------------------- + + // -------------------------------------------------------------------- + // Third round + let (prover_third_msg, prover_third_oracles) = + AHPForR1CS::prover_third_round(&verifier_second_msg, prover_state, zk_rng)?; + + let third_round_comm_time = start_timer!(|| "Committing to third round polys"); + let (third_comms, third_comm_rands) = PC::commit( + &index_pk.committer_key, + prover_third_oracles.iter(), + Some(zk_rng), + ) + .map_err(Error::from_pc_err)?; + end_timer!(third_round_comm_time); + + fs_rng.absorb(&to_bytes![third_comms, prover_third_msg].unwrap()); + + let verifier_state = AHPForR1CS::verifier_third_round(verifier_state, &mut fs_rng); + // -------------------------------------------------------------------- + + // Gather prover polynomials in one vector. + let polynomials: Vec<_> = index_pk + .index + .iter() + .chain(prover_first_oracles.iter()) + .chain(prover_second_oracles.iter()) + .chain(prover_third_oracles.iter()) + .collect(); + + // Gather commitments in one vector. + #[rustfmt::skip] + let commitments = vec![ + first_comms.iter().map(|p| p.commitment().clone()).collect(), + second_comms.iter().map(|p| p.commitment().clone()).collect(), + third_comms.iter().map(|p| p.commitment().clone()).collect(), + ]; + let labeled_comms: Vec<_> = index_pk + .index_vk + .iter() + .cloned() + .zip(&AHPForR1CS::::INDEXER_POLYNOMIALS) + .map(|(c, l)| LabeledCommitment::new(l.to_string(), c, None)) + .chain(first_comms.iter().cloned()) + .chain(second_comms.iter().cloned()) + .chain(third_comms.iter().cloned()) + .collect(); + + // Gather commitment randomness together. + let comm_rands: Vec = index_pk + .index_comm_rands + .clone() + .into_iter() + .chain(first_comm_rands) + .chain(second_comm_rands) + .chain(third_comm_rands) + .collect(); + + // Compute the AHP verifier's query set. + let (query_set, verifier_state) = + AHPForR1CS::verifier_query_set(verifier_state, &mut fs_rng); + let lc_s = AHPForR1CS::construct_linear_combinations( + &public_input, + &polynomials, + &verifier_state, + )?; + + let eval_time = start_timer!(|| "Evaluating linear combinations over query set"); + let mut evaluations = Vec::new(); + for (label, (_, point)) in &query_set { + let lc = lc_s + .iter() + .find(|lc| &lc.label == label) + .ok_or(ahp::Error::MissingEval(label.to_string()))?; + let eval = polynomials.get_lc_eval(&lc, *point)?; + if !AHPForR1CS::::LC_WITH_ZERO_EVAL.contains(&lc.label.as_ref()) { + evaluations.push((label.to_string(), eval)); + } + } + + evaluations.sort_by(|a, b| a.0.cmp(&b.0)); + let evaluations = evaluations.into_iter().map(|x| x.1).collect::>(); + end_timer!(eval_time); + + fs_rng.absorb(&evaluations); + let opening_challenge: F = u128::rand(&mut fs_rng).into(); + + let pc_proof = PC::open_combinations( + &index_pk.committer_key, + &lc_s, + polynomials, + &labeled_comms, + &query_set, + opening_challenge, + &comm_rands, + Some(zk_rng), + ) + .map_err(Error::from_pc_err)?; + + // Gather prover messages together. + let prover_messages = vec![prover_first_msg, prover_second_msg, prover_third_msg]; + + let proof = Proof::new(commitments, evaluations, prover_messages, pc_proof); + proof.print_size_info(); + end_timer!(prover_time); + Ok(proof) + } + + /// Verify that a proof for the constrain system defined by `C` asserts that + /// all constraints are satisfied. + pub fn verify( + index_vk: &IndexVerifierKey, + public_input: &[F], + proof: &Proof, + rng: &mut R, + ) -> Result> { + let verifier_time = start_timer!(|| "Marlin::Verify"); + + let public_input = { + let domain_x = GeneralEvaluationDomain::::new(public_input.len() + 1).unwrap(); + + let mut unpadded_input = public_input.to_vec(); + unpadded_input.resize( + core::cmp::max(public_input.len(), domain_x.size() - 1), + F::zero(), + ); + + unpadded_input + }; + + let mut fs_rng = FiatShamirRng::::from_seed( + &to_bytes![&Self::PROTOCOL_NAME, &index_vk, &public_input].unwrap(), + ); + + // -------------------------------------------------------------------- + // First round + + let first_comms = &proof.commitments[0]; + fs_rng.absorb(&to_bytes![first_comms, proof.prover_messages[0]].unwrap()); + + let (_, verifier_state) = + AHPForR1CS::verifier_first_round(index_vk.index_info, &mut fs_rng)?; + // -------------------------------------------------------------------- + + // -------------------------------------------------------------------- + // Second round + let second_comms = &proof.commitments[1]; + fs_rng.absorb(&to_bytes![second_comms, proof.prover_messages[1]].unwrap()); + + let (_, verifier_state) = AHPForR1CS::verifier_second_round(verifier_state, &mut fs_rng); + // -------------------------------------------------------------------- + + // -------------------------------------------------------------------- + // Third round + let third_comms = &proof.commitments[2]; + fs_rng.absorb(&to_bytes![third_comms, proof.prover_messages[2]].unwrap()); + + let verifier_state = AHPForR1CS::verifier_third_round(verifier_state, &mut fs_rng); + // -------------------------------------------------------------------- + + // Collect degree bounds for commitments. Indexed polynomials have *no* + // degree bounds because we know the committed index polynomial has the + // correct degree. + let index_info = index_vk.index_info; + let degree_bounds = vec![None; index_vk.index_comms.len()] + .into_iter() + .chain(AHPForR1CS::prover_first_round_degree_bounds(&index_info)) + .chain(AHPForR1CS::prover_second_round_degree_bounds(&index_info)) + .chain(AHPForR1CS::prover_third_round_degree_bounds(&index_info)) + .collect::>(); + + // Gather commitments in one vector. + let commitments: Vec<_> = index_vk + .iter() + .chain(first_comms) + .chain(second_comms) + .chain(third_comms) + .cloned() + .zip(AHPForR1CS::::polynomial_labels()) + .zip(degree_bounds) + .map(|((c, l), d)| LabeledCommitment::new(l, c, d)) + .collect(); + + let (query_set, verifier_state) = + AHPForR1CS::verifier_query_set(verifier_state, &mut fs_rng); + + fs_rng.absorb(&proof.evaluations); + let opening_challenge: F = u128::rand(&mut fs_rng).into(); + + let mut evaluations = Evaluations::new(); + let mut evaluation_labels = Vec::new(); + for (poly_label, (_, point)) in query_set.iter().cloned() { + if AHPForR1CS::::LC_WITH_ZERO_EVAL.contains(&poly_label.as_ref()) { + evaluations.insert((poly_label, point), F::zero()); + } else { + evaluation_labels.push((poly_label, point)); + } + } + evaluation_labels.sort_by(|a, b| a.0.cmp(&b.0)); + for (q, eval) in evaluation_labels.into_iter().zip(&proof.evaluations) { + evaluations.insert(q, *eval); + } + + let lc_s = AHPForR1CS::construct_linear_combinations( + &public_input, + &evaluations, + &verifier_state, + )?; + + let evaluations_are_correct = PC::check_combinations( + &index_vk.verifier_key, + &lc_s, + &commitments, + &query_set, + &evaluations, + &proof.pc_proof, + opening_challenge, + rng, + ) + .map_err(Error::from_pc_err)?; + + if !evaluations_are_correct { + eprintln!("PC::Check failed"); + } + end_timer!(verifier_time, || format!( + " PC::Check for AHP Verifier linear equations: {}", + evaluations_are_correct + )); + Ok(evaluations_are_correct) + } +} diff --git a/arkworks/marlin/src/rng.rs b/arkworks/marlin/src/rng.rs new file mode 100644 index 00000000..ecded54f --- /dev/null +++ b/arkworks/marlin/src/rng.rs @@ -0,0 +1,68 @@ +use crate::Vec; +use ark_ff::{FromBytes, ToBytes}; +use ark_std::marker::PhantomData; +use ark_std::rand::{RngCore, SeedableRng}; +use digest::{generic_array::GenericArray, Digest}; +use rand_chacha::ChaChaRng; + +/// A `SeedableRng` that refreshes its seed by hashing together the previous seed +/// and the new seed material. +// TODO: later: re-evaluate decision about ChaChaRng +pub struct FiatShamirRng { + r: ChaChaRng, + seed: GenericArray, + #[doc(hidden)] + digest: PhantomData, +} + +impl RngCore for FiatShamirRng { + #[inline] + fn next_u32(&mut self) -> u32 { + self.r.next_u32() + } + + #[inline] + fn next_u64(&mut self) -> u64 { + self.r.next_u64() + } + + #[inline] + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.r.fill_bytes(dest); + } + + #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), ark_std::rand::Error> { + Ok(self.r.fill_bytes(dest)) + } +} + +impl FiatShamirRng { + /// Create a new `Self` by initializing with a fresh seed. + /// `self.seed = H(self.seed || new_seed)`. + #[inline] + pub fn from_seed<'a, T: 'a + ToBytes>(seed: &'a T) -> Self { + let mut bytes = Vec::new(); + seed.write(&mut bytes).expect("failed to convert to bytes"); + let seed = D::digest(&bytes); + let r_seed: [u8; 32] = FromBytes::read(seed.as_ref()).expect("failed to get [u32; 8]"); + let r = ChaChaRng::from_seed(r_seed); + Self { + r, + seed, + digest: PhantomData, + } + } + + /// Refresh `self.seed` with new material. Achieved by setting + /// `self.seed = H(self.seed || new_seed)`. + #[inline] + pub fn absorb<'a, T: 'a + ToBytes>(&mut self, seed: &'a T) { + let mut bytes = Vec::new(); + seed.write(&mut bytes).expect("failed to convert to bytes"); + bytes.extend_from_slice(&self.seed); + self.seed = D::digest(&bytes); + let seed: [u8; 32] = FromBytes::read(self.seed.as_ref()).expect("failed to get [u32; 8]"); + self.r = ChaChaRng::from_seed(seed); + } +} diff --git a/arkworks/marlin/src/test.rs b/arkworks/marlin/src/test.rs new file mode 100644 index 00000000..6e4a1905 --- /dev/null +++ b/arkworks/marlin/src/test.rs @@ -0,0 +1,228 @@ +use ark_ff::Field; +use ark_relations::{ + lc, + r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}, +}; +use ark_std::marker::PhantomData; + +#[derive(Copy, Clone)] +struct Circuit { + a: Option, + b: Option, + num_constraints: usize, + num_variables: usize, +} + +impl ConstraintSynthesizer for Circuit { + fn generate_constraints( + self, + cs: ConstraintSystemRef, + ) -> Result<(), SynthesisError> { + let a = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.new_witness_variable(|| self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.new_input_variable(|| { + let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + a.mul_assign(&b); + Ok(a) + })?; + let d = cs.new_input_variable(|| { + let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + a.mul_assign(&b); + a.mul_assign(&b); + Ok(a) + })?; + + for _ in 0..(self.num_variables - 3) { + let _ = cs.new_witness_variable(|| self.a.ok_or(SynthesisError::AssignmentMissing))?; + } + + for _ in 0..(self.num_constraints - 1) { + cs.enforce_constraint(lc!() + a, lc!() + b, lc!() + c)?; + } + cs.enforce_constraint(lc!() + c, lc!() + b, lc!() + d)?; + + Ok(()) + } +} + +#[derive(Clone)] +/// Define a constraint system that would trigger outlining. +struct OutlineTestCircuit { + field_phantom: PhantomData, +} + +impl ConstraintSynthesizer for OutlineTestCircuit { + fn generate_constraints(self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + // This program checks if the input elements are between 0 and 9. + // + // Note that this constraint system is neither the most intuitive way nor + // the most efficient way for such a task. It is for testing purposes, + // as we want to trigger the outlining. + // + let mut inputs = Vec::new(); + for i in 0..5 { + inputs.push(cs.new_input_variable(|| Ok(F::from(i as u128)))?); + } + + for i in 0..5 { + let mut total_count_for_this_input = cs.new_lc(lc!()).unwrap(); + + for bucket in 0..10 { + let count_increment_for_this_bucket = + cs.new_witness_variable(|| Ok(F::from(i == bucket)))?; + + total_count_for_this_input = cs + .new_lc( + lc!() + + (F::one(), total_count_for_this_input) + + (F::one(), count_increment_for_this_bucket.clone()), + ) + .unwrap(); + + // Only when `input[i]` equals `bucket` can `count_increment_for_this_bucket` be nonzero. + // + // A malicious prover can make `count_increment_for_this_bucket` neither 0 nor 1. + // But the constraint on `total_count_for_this_input` will reject such case. + // + // At a high level, only one of the `count_increment_for_this_bucket` among all the buckets + // could be nonzero, which equals `total_count_for_this_input`. Thus, by checking whether + // `total_count_for_this_input` is 1, we know this input number is in the range. + // + cs.enforce_constraint( + lc!() + (F::one(), inputs[i].clone()) + - (F::from(bucket as u128), ark_relations::r1cs::Variable::One), + lc!() + (F::one(), count_increment_for_this_bucket), + lc!(), + )?; + } + + // Enforce `total_count_for_this_input` to be one. + cs.enforce_constraint( + lc!(), + lc!(), + lc!() + (F::one(), total_count_for_this_input.clone()) + - (F::one(), ark_relations::r1cs::Variable::One), + )?; + } + + Ok(()) + } +} + +mod marlin { + use super::*; + use crate::Marlin; + + use ark_bls12_381::{Bls12_381, Fr}; + use ark_ff::UniformRand; + use ark_poly::univariate::DensePolynomial; + use ark_poly_commit::marlin_pc::MarlinKZG10; + use ark_std::ops::MulAssign; + use blake2::Blake2s; + + type MultiPC = MarlinKZG10>; + type MarlinInst = Marlin; + + fn test_circuit(num_constraints: usize, num_variables: usize) { + let rng = &mut ark_std::test_rng(); + + let universal_srs = MarlinInst::universal_setup(100, 25, 100, rng).unwrap(); + + for _ in 0..100 { + let a = Fr::rand(rng); + let b = Fr::rand(rng); + let mut c = a; + c.mul_assign(&b); + let mut d = c; + d.mul_assign(&b); + + let circ = Circuit { + a: Some(a), + b: Some(b), + num_constraints, + num_variables, + }; + + let (index_pk, index_vk) = MarlinInst::index(&universal_srs, circ.clone()).unwrap(); + println!("Called index"); + + let proof = MarlinInst::prove(&index_pk, circ, rng).unwrap(); + println!("Called prover"); + + assert!(MarlinInst::verify(&index_vk, &[c, d], &proof, rng).unwrap()); + println!("Called verifier"); + println!("\nShould not verify (i.e. verifier messages should print below):"); + assert!(!MarlinInst::verify(&index_vk, &[a, a], &proof, rng).unwrap()); + } + } + + #[test] + fn prove_and_verify_with_tall_matrix_big() { + let num_constraints = 100; + let num_variables = 25; + + test_circuit(num_constraints, num_variables); + } + + #[test] + fn prove_and_verify_with_tall_matrix_small() { + let num_constraints = 26; + let num_variables = 25; + + test_circuit(num_constraints, num_variables); + } + + #[test] + fn prove_and_verify_with_squat_matrix_big() { + let num_constraints = 25; + let num_variables = 100; + + test_circuit(num_constraints, num_variables); + } + + #[test] + fn prove_and_verify_with_squat_matrix_small() { + let num_constraints = 25; + let num_variables = 26; + + test_circuit(num_constraints, num_variables); + } + + #[test] + fn prove_and_verify_with_square_matrix() { + let num_constraints = 25; + let num_variables = 25; + + test_circuit(num_constraints, num_variables); + } + + #[test] + /// Test on a constraint system that will trigger outlining. + fn prove_and_test_outlining() { + let rng = &mut ark_std::test_rng(); + + let universal_srs = MarlinInst::universal_setup(150, 150, 150, rng).unwrap(); + + let circ = OutlineTestCircuit { + field_phantom: PhantomData, + }; + + let (index_pk, index_vk) = MarlinInst::index(&universal_srs, circ.clone()).unwrap(); + println!("Called index"); + + let proof = MarlinInst::prove(&index_pk, circ, rng).unwrap(); + println!("Called prover"); + + let mut inputs = Vec::new(); + for i in 0..5 { + inputs.push(Fr::from(i as u128)); + } + + assert!(MarlinInst::verify(&index_vk, &inputs, &proof, rng).unwrap()); + println!("Called verifier"); + } +} diff --git a/arkworks/nonnative/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/nonnative/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..fe00a144 --- /dev/null +++ b/arkworks/nonnative/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer \ No newline at end of file diff --git a/arkworks/nonnative/.github/dependabot.yml b/arkworks/nonnative/.github/dependabot.yml new file mode 100644 index 00000000..5b2a1dc4 --- /dev/null +++ b/arkworks/nonnative/.github/dependabot.yml @@ -0,0 +1,11 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 + ignore: + - dependency-name: rand + versions: + - 0.8.0 diff --git a/arkworks/nonnative/.github/workflows/ci.yml b/arkworks/nonnative/.github/workflows/ci.yml new file mode 100644 index 00000000..55bc3f7d --- /dev/null +++ b/arkworks/nonnative/.github/workflows/ci.yml @@ -0,0 +1,120 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings --cfg ci + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --all --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: "--all \ + --all-features" + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: thumbv6m-none-eabi + override: true + + - name: Install Rust ARM64 (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: ark-nonnative-field + run: | + cargo build --no-default-features --target aarch64-unknown-none + cargo check --examples --no-default-features --target aarch64-unknown-none diff --git a/arkworks/nonnative/.gitignore b/arkworks/nonnative/.gitignore new file mode 100644 index 00000000..448a8bb6 --- /dev/null +++ b/arkworks/nonnative/.gitignore @@ -0,0 +1,12 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo + diff --git a/arkworks/nonnative/CHANGELOG.md b/arkworks/nonnative/CHANGELOG.md new file mode 100644 index 00000000..afdce49f --- /dev/null +++ b/arkworks/nonnative/CHANGELOG.md @@ -0,0 +1,25 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +- [\#43](https://github.com/arkworks-rs/nonnative/pull/43) Add padding to allocated nonnative element's `to_bytes`. + +### Features + +### Improvements + +### Bug fixes + +## v0.2.0 (Initial release of ark-nonnative) \ No newline at end of file diff --git a/arkworks/nonnative/Cargo.toml b/arkworks/nonnative/Cargo.toml new file mode 100644 index 00000000..0aef9aa9 --- /dev/null +++ b/arkworks/nonnative/Cargo.toml @@ -0,0 +1,71 @@ +[package] +name = "ark-nonnative-field" +version = "0.3.0" +authors = [ + "Weikeng Chen", + "Alessandro Chiesa", + "Emma Dauterman", + "Nicholas Ward" +] +description = "Constraints for nonnative field gadgets" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/nonnative" +documentation = "https://docs.rs/ark-nonnative-field/" +keywords = ["r1cs", "nonnative", "finite-fields"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "tests", "README.md"] +license = "MIT/Apache-2.0" +edition = "2018" + +[profile.release] +debug = true +panic = 'abort' + +[profile.test] +opt-level = 3 +lto = "thin" +incremental = true + +[profile.bench] +opt-level = 3 +lto = "fat" +incremental = true +debug = false +rpath = false +debug-assertions = false + +################################# Dependencies ################################ + +[dependencies] +derivative = { version = "2", features = [ "use_core" ] } + +tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } + +ark-ff = { path = "../algebra/ff", version = "^0.3.0", default-features = false } +ark-ec = { path = "../algebra/ec", version = "^0.3.0", default-features = false } +ark-std = { path = "../std", version = "^0.3.0", default-features = false } +ark-relations = { path = "../snark/relations", version = "^0.3.0", default-features = false } +ark-r1cs-std = { path = "../r1cs-std", version = "^0.3.0", default-features = false } + +num-traits = { version = "0.2", default-features = false } +num-bigint = { version = "0.4.0", default-features = false } +num-integer = { version = "0.1.44", default-features = false } + +[dev-dependencies] +paste = "1.0" +ark-bls12-377 = { version = "^0.3.0", features = ["curve"], default-features = false } +ark-bls12-381 = { version = "^0.3.0", features = ["curve"], default-features = false } +ark-mnt4-298 = { version = "^0.3.0", features = ["curve"], default-features = false } +ark-mnt4-753 = { version = "^0.3.0", features = ["curve"], default-features = false } +ark-mnt6-298 = { version = "^0.3.0", default-features = false } +ark-mnt6-753 = { version = "^0.3.0", default-features = false } +ark-pallas = { version = "^0.3.0", features = ["curve"], default-features = false } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "ark-ec/std", "ark-relations/std", "ark-r1cs-std/std", "num-traits/std", "num-bigint/std" ] + +[[bench]] +name = "nonnative-bench" +path = "benches/bench.rs" +harness = false diff --git a/arkworks/nonnative/README.md b/arkworks/nonnative/README.md new file mode 100644 index 00000000..30c8c402 --- /dev/null +++ b/arkworks/nonnative/README.md @@ -0,0 +1,102 @@ +

Non-Native Field Gadgets

+ +

+ + +

+ +The `nonnative` library provides R1CS constraints for checking computations over a non-native field in a proof system. + +The library is based on the constraint-writing framework [arkworks-rs](https://github.com/arkworks-rs) and is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic proof-of-concept prototype; in particular, it has not received careful code review. This implementation is NOT ready for production use. + +## Overview + +This library implements a field gadget for a prime field `Fp` over another prime field `Fq` where `p != q`. + +When writing constraint systems for many cryptographic proofs, we are restricted to a native field (e.g., the scalar field of the pairing-friendly curve). +This can be inconvenient; for example, the recursive composition of proofs via cycles of curves requires the verifier to compute over a non-native field. + +The library makes it possible to write computations over a non-native field in the same way one would write computations over the native field. This naturally introduces additional overhead, which we minimize using a variety of optimizations. + +## Usage + +Because the non-native field implements the `FieldVar` trait in arkworks, we can treat it like a native field variable (`FpVar`). + +We can do the standard field operations, such as `+`, `-`, and `*`. See the following example: + +```rust +let a = NonNativeFieldVar::::new_witness(ns!(cs, "a"), || Ok(a_value))?; +let b = NonNativeFieldVar::::new_witness(ns!(cs, "b"), || Ok(b_value))?; + +// add +let a_plus_b = &a + &b; + +// sub +let a_minus_b = &a - &b; + +// multiply +let a_times_b = &a * $b; + +// enforce equality +a.enforce_equal(&b)?; +``` + +## Advanced optimization + +After each multiplication, our library internally performs a *reduce* operation, which reduces an intermediate type `NonNativeFieldMulResultVar` to the normalized type `NonNativeFieldVar`. +This enables a user to seamlessly perform a sequence of operations without worrying about the underlying details. + +However, this operation is expensive and is sometimes avoidable. We can reduce the number of constraints by using this intermediate type, which only supports additions. To multiply, it must be reduced back to `NonNativeFieldVar`. See below for a skeleton example. + +--- + +To compute `a * b + c * d`, the straightforward (but more expensive) implementation is as follows: + +``` +let a_times_b = &a * &b; +let c_times_d = &c * &d; +let res = &a_times_b + &c_times_d; +``` + +This performs two *reduce* operations in total, one for each multiplication. + +--- + +We can save one reduction by using the `NonNativeFieldMulResultGadget`, as follows: + +``` +let a_times_b = a.mul_without_reduce(&b)?; +let c_times_d = c.mul_without_reduce(&d)?; +let res = (&a_times_b + &c_times_d)?.reduce()?; +``` + +It performs only one *reduce* operation and is roughly 2x faster than the first implementation. + +## Inspiration and basic design + +The library employs the standard idea of using multiple **limbs** to represent an element of the target field. For example, an element in the TargetField may be represented by three BaseField elements (i.e., the limbs). + +``` +TargetField -> limb 1, limb 2, and limb 3 (each is a BaseField element) +``` + +After some computation, the limbs become overwhelmed and need to be **reduced**, in order to engage in more computation. + +We heavily use the optimization techniques in [[KPS18]](https://akosba.github.io/papers/xjsnark.pdf) and [[OWWB20]](https://eprint.iacr.org/2019/1494). Both works have their own open-source libraries: [xJsnark](https://github.com/akosba/xjsnark) and [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat). Compared with them, this library works with the arkworks environment and is also optimized for density instead of number of constraints, which is useful for holographic zero-knowledge proofs like [Marlin](https://github.com/arkworks-rs/marlin). + +## License + +The library is licensed under either of the following licenses, at your discretion. + + * Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +## References + +[KPS18]: A. E. Kosba, C. Papamanthou, and E. Shi. "xJsnark: a framework for efficient verifiable computation," in *Proceedings of the 39th Symposium on Security and Privacy*, ser. S&P ’18, 2018, pp. 944–961. + +[OWWB20]: A. Ozdemir, R. S. Wahby, B. Whitehat, and D. Boneh. "Scaling verifiable computation using efficient set accumulators," in *Proceedings of the 29th USENIX Security Symposium*, ser. Security ’20, 2020. diff --git a/arkworks/nonnative/benches/bench.rs b/arkworks/nonnative/benches/bench.rs new file mode 100644 index 00000000..61f2d2ee --- /dev/null +++ b/arkworks/nonnative/benches/bench.rs @@ -0,0 +1,235 @@ +use ark_ff::PrimeField; +use ark_nonnative_field::NonNativeFieldVar; +use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget, fields::FieldVar}; +use ark_relations::{ + ns, + r1cs::{ConstraintSystem, ConstraintSystemRef, OptimizationGoal}, +}; +use ark_std::rand::RngCore; + +const NUM_REPETITIONS: usize = 1; + +fn get_density(cs: &ConstraintSystemRef) -> usize { + match cs { + ConstraintSystemRef::None => panic!("Constraint system is none."), + ConstraintSystemRef::CS(r) => { + let mut cs_bak = r.borrow().clone(); + + cs_bak.finalize(); + let matrices = cs_bak.to_matrices().unwrap(); + + matrices.a_num_non_zero + matrices.b_num_non_zero + matrices.c_num_non_zero + } + } +} + +fn allocation( + cs: ConstraintSystemRef, + rng: &mut R, +) -> (usize, usize) { + let a_native = TargetField::rand(rng); + + let constraints_before = cs.num_constraints(); + let nonzeros_before = get_density(&cs); + + // There will be a check that ensures it has the reasonable number of bits + let _ = NonNativeFieldVar::::new_witness(ns!(cs, "alloc a"), || { + Ok(a_native) + }) + .unwrap(); + + let constraints_after = cs.num_constraints(); + let nonzeros_after = get_density(&cs); + + return ( + constraints_after - constraints_before, + nonzeros_after - nonzeros_before, + ); +} + +fn addition( + cs: ConstraintSystemRef, + rng: &mut R, +) -> (usize, usize) { + let a_native = TargetField::rand(rng); + let a = NonNativeFieldVar::::new_witness(ns!(cs, "alloc a"), || { + Ok(a_native) + }) + .unwrap(); + + let b_native = TargetField::rand(rng); + let b = NonNativeFieldVar::::new_witness(ns!(cs, "alloc b"), || { + Ok(b_native) + }) + .unwrap(); + + let constraints_before = cs.num_constraints(); + let nonzeros_before = get_density(&cs); + + let _ = &a + &b; + + let constraints_after = cs.num_constraints(); + let nonzeros_after = get_density(&cs); + + return ( + constraints_after - constraints_before, + nonzeros_after - nonzeros_before, + ); +} + +fn equality( + cs: ConstraintSystemRef, + rng: &mut R, +) -> (usize, usize) { + let a_native = TargetField::rand(rng); + let a1 = NonNativeFieldVar::::new_witness(ns!(cs, "alloc a1"), || { + Ok(a_native) + }) + .unwrap(); + let a2 = NonNativeFieldVar::::new_witness(ns!(cs, "alloc a2"), || { + Ok(a_native) + }) + .unwrap(); + + let constraints_before = cs.num_constraints(); + let nonzeros_before = get_density(&cs); + + a1.enforce_equal(&a2).unwrap(); + + let constraints_after = cs.num_constraints(); + let nonzeros_after = get_density(&cs); + + return ( + constraints_after - constraints_before, + nonzeros_after - nonzeros_before, + ); +} + +fn multiplication( + cs: ConstraintSystemRef, + rng: &mut R, +) -> (usize, usize) { + let a_native = TargetField::rand(rng); + let a = NonNativeFieldVar::::new_witness(ns!(cs, "initial a"), || { + Ok(a_native) + }) + .unwrap(); + + let b_native = TargetField::rand(rng); + let b = NonNativeFieldVar::::new_witness(ns!(cs, "initial b"), || { + Ok(b_native) + }) + .unwrap(); + + let constraints_before = cs.num_constraints(); + let nonzeros_before = get_density(&cs); + + let _ = &a * &b; + + let constraints_after = cs.num_constraints(); + let nonzeros_after = get_density(&cs); + + return ( + constraints_after - constraints_before, + nonzeros_after - nonzeros_before, + ); +} + +fn inverse( + cs: ConstraintSystemRef, + rng: &mut R, +) -> (usize, usize) { + let num_native = TargetField::rand(rng); + let num = NonNativeFieldVar::::new_witness(ns!(cs, "alloc"), || { + Ok(num_native) + }) + .unwrap(); + + let constraints_before = cs.num_constraints(); + let nonzeros_before = get_density(&cs); + + let _ = num.inverse().unwrap(); + + let constraints_after = cs.num_constraints(); + let nonzeros_after = get_density(&cs); + + return ( + constraints_after - constraints_before, + nonzeros_after - nonzeros_before, + ); +} + +macro_rules! nonnative_bench_individual { + ($bench_method:ident, $bench_name:ident, $bench_target_field:ty, $bench_base_field:ty) => { + let rng = &mut ark_std::test_rng(); + let mut num_constraints = 0; + let mut num_nonzeros = 0; + for _ in 0..NUM_REPETITIONS { + let cs_sys = ConstraintSystem::<$bench_base_field>::new(); + let cs = ConstraintSystemRef::new(cs_sys); + cs.set_optimization_goal(OptimizationGoal::Constraints); + + let (cur_constraints, cur_nonzeros) = + $bench_method::<$bench_target_field, $bench_base_field, _>(cs.clone(), rng); + + num_constraints += cur_constraints; + num_nonzeros += cur_nonzeros; + + assert!(cs.is_satisfied().unwrap()); + } + let average_constraints = num_constraints / NUM_REPETITIONS; + let average_nonzeros = num_nonzeros / NUM_REPETITIONS; + println!( + "{} takes: {} constraints, {} non-zeros", + stringify!($bench_method), + average_constraints, + average_nonzeros, + ); + }; +} + +macro_rules! nonnative_bench { + ($bench_name:ident, $bench_target_field:ty, $bench_base_field:ty) => { + println!( + "For {} to simulate {}", + stringify!($bench_base_field), + stringify!($bench_target_field), + ); + nonnative_bench_individual!( + allocation, + $bench_name, + $bench_target_field, + $bench_base_field + ); + nonnative_bench_individual!( + addition, + $bench_name, + $bench_target_field, + $bench_base_field + ); + nonnative_bench_individual!( + multiplication, + $bench_name, + $bench_target_field, + $bench_base_field + ); + nonnative_bench_individual!( + equality, + $bench_name, + $bench_target_field, + $bench_base_field + ); + nonnative_bench_individual!(inverse, $bench_name, $bench_target_field, $bench_base_field); + println!("----------------------") + }; +} + +fn main() { + nonnative_bench!(MNT46Small, ark_mnt4_298::Fr, ark_mnt6_298::Fr); + nonnative_bench!(MNT64Small, ark_mnt6_298::Fr, ark_mnt4_298::Fr); + nonnative_bench!(MNT46Big, ark_mnt4_753::Fr, ark_mnt6_753::Fr); + nonnative_bench!(MNT64Big, ark_mnt6_753::Fr, ark_mnt4_753::Fr); + nonnative_bench!(BLS12MNT4Small, ark_bls12_381::Fr, ark_mnt4_298::Fr); + nonnative_bench!(BLS12, ark_bls12_381::Fq, ark_bls12_381::Fr); + nonnative_bench!(MNT6BigMNT4Small, ark_mnt6_753::Fr, ark_mnt4_298::Fr); +} diff --git a/arkworks/nonnative/src/allocated_nonnative_field_mul_result_var.rs b/arkworks/nonnative/src/allocated_nonnative_field_mul_result_var.rs new file mode 100644 index 00000000..736ba590 --- /dev/null +++ b/arkworks/nonnative/src/allocated_nonnative_field_mul_result_var.rs @@ -0,0 +1,289 @@ +use crate::params::{get_params, OptimizationType}; +use crate::reduce::{bigint_to_basefield, limbs_to_bigint, Reducer}; +use crate::AllocatedNonNativeFieldVar; +use ark_ff::{FpParameters, PrimeField}; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::*; +use ark_relations::r1cs::{OptimizationGoal, Result as R1CSResult}; +use ark_relations::{ns, r1cs::ConstraintSystemRef}; +use ark_std::marker::PhantomData; +use ark_std::vec::Vec; +use num_bigint::BigUint; + +/// The allocated form of `NonNativeFieldMulResultVar` (introduced below) +#[derive(Debug)] +#[must_use] +pub struct AllocatedNonNativeFieldMulResultVar { + /// Constraint system reference + pub cs: ConstraintSystemRef, + /// Limbs of the intermediate representations + pub limbs: Vec>, + /// The cumulative num of additions + pub prod_of_num_of_additions: BaseField, + #[doc(hidden)] + pub target_phantom: PhantomData, +} + +impl + From<&AllocatedNonNativeFieldVar> + for AllocatedNonNativeFieldMulResultVar +{ + fn from(src: &AllocatedNonNativeFieldVar) -> Self { + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + src.get_optimization_type(), + ); + + let mut limbs = src.limbs.clone(); + limbs.reverse(); + limbs.resize(2 * params.num_limbs - 1, FpVar::::zero()); + limbs.reverse(); + + let prod_of_num_of_additions = src.num_of_additions_over_normal_form + &BaseField::one(); + + Self { + cs: src.cs(), + limbs, + prod_of_num_of_additions, + target_phantom: PhantomData, + } + } +} + +impl + AllocatedNonNativeFieldMulResultVar +{ + /// Get the CS + pub fn cs(&self) -> ConstraintSystemRef { + self.cs.clone() + } + + /// Get the value of the multiplication result + pub fn value(&self) -> R1CSResult { + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + self.get_optimization_type(), + ); + + let p_representations = + AllocatedNonNativeFieldVar::::get_limbs_representations_from_big_integer( + &::Params::MODULUS, + self.get_optimization_type() + )?; + let p_bigint = limbs_to_bigint(params.bits_per_limb, &p_representations); + + let mut limbs_values = Vec::::new(); + for limb in self.limbs.iter() { + limbs_values.push(limb.value().unwrap_or_default()); + } + let value_bigint = limbs_to_bigint(params.bits_per_limb, &limbs_values); + + let res = bigint_to_basefield::(&(value_bigint % p_bigint)); + Ok(res) + } + + /// Constraints for reducing the result of a multiplication mod p, to get an original representation. + pub fn reduce(&self) -> R1CSResult> { + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + self.get_optimization_type(), + ); + + // Step 1: get p + let p_representations = + AllocatedNonNativeFieldVar::::get_limbs_representations_from_big_integer( + &::Params::MODULUS, + self.get_optimization_type() + )?; + let p_bigint = limbs_to_bigint(params.bits_per_limb, &p_representations); + + let mut p_gadget_limbs = Vec::new(); + for limb in p_representations.iter() { + p_gadget_limbs.push(FpVar::::new_constant(self.cs(), limb)?); + } + let p_gadget = AllocatedNonNativeFieldVar:: { + cs: self.cs(), + limbs: p_gadget_limbs, + num_of_additions_over_normal_form: BaseField::one(), + is_in_the_normal_form: false, + target_phantom: PhantomData, + }; + + // Step 2: compute surfeit + let surfeit = overhead!(self.prod_of_num_of_additions + BaseField::one()) + 1 + 1; + + // Step 3: allocate k + let k_bits = { + let mut res = Vec::new(); + + let mut limbs_values = Vec::::new(); + for limb in self.limbs.iter() { + limbs_values.push(limb.value().unwrap_or_default()); + } + + let value_bigint = limbs_to_bigint(params.bits_per_limb, &limbs_values); + let mut k_cur = value_bigint / p_bigint; + + let total_len = TargetField::size_in_bits() + surfeit; + + for _ in 0..total_len { + res.push(Boolean::::new_witness(self.cs(), || { + Ok(&k_cur % 2u64 == BigUint::from(1u64)) + })?); + k_cur /= 2u64; + } + res + }; + + let k_limbs = { + let zero = FpVar::Constant(BaseField::zero()); + let mut limbs = Vec::new(); + + let mut k_bits_cur = k_bits.clone(); + + for i in 0..params.num_limbs { + let this_limb_size = if i != params.num_limbs - 1 { + params.bits_per_limb + } else { + k_bits.len() - (params.num_limbs - 1) * params.bits_per_limb + }; + + let this_limb_bits = k_bits_cur[0..this_limb_size].to_vec(); + k_bits_cur = k_bits_cur[this_limb_size..].to_vec(); + + let mut limb = zero.clone(); + let mut cur = BaseField::one(); + + for bit in this_limb_bits.iter() { + limb += &(FpVar::::from(bit.clone()) * cur); + cur.double_in_place(); + } + limbs.push(limb); + } + + limbs.reverse(); + limbs + }; + + let k_gadget = AllocatedNonNativeFieldVar:: { + cs: self.cs(), + limbs: k_limbs, + num_of_additions_over_normal_form: self.prod_of_num_of_additions, + is_in_the_normal_form: false, + target_phantom: PhantomData, + }; + + let cs = self.cs(); + + let r_gadget = AllocatedNonNativeFieldVar::::new_witness( + ns!(cs, "r"), + || Ok(self.value()?), + )?; + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + self.get_optimization_type(), + ); + + // Step 1: reduce `self` and `other` if neceessary + let mut prod_limbs = Vec::new(); + let zero = FpVar::::zero(); + + for _ in 0..2 * params.num_limbs - 1 { + prod_limbs.push(zero.clone()); + } + + for i in 0..params.num_limbs { + for j in 0..params.num_limbs { + prod_limbs[i + j] = &prod_limbs[i + j] + (&p_gadget.limbs[i] * &k_gadget.limbs[j]); + } + } + + let mut kp_plus_r_gadget = Self { + cs: cs, + limbs: prod_limbs, + prod_of_num_of_additions: (p_gadget.num_of_additions_over_normal_form + + BaseField::one()) + * (k_gadget.num_of_additions_over_normal_form + BaseField::one()), + target_phantom: PhantomData, + }; + + let kp_plus_r_limbs_len = kp_plus_r_gadget.limbs.len(); + for (i, limb) in r_gadget.limbs.iter().rev().enumerate() { + kp_plus_r_gadget.limbs[kp_plus_r_limbs_len - 1 - i] += limb; + } + + Reducer::::group_and_check_equality( + surfeit, + 2 * params.bits_per_limb, + params.bits_per_limb, + &self.limbs, + &kp_plus_r_gadget.limbs, + )?; + + Ok(r_gadget) + } + + /// Add unreduced elements. + #[tracing::instrument(target = "r1cs")] + pub fn add(&self, other: &Self) -> R1CSResult { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + let mut new_limbs = Vec::new(); + + for (l1, l2) in self.limbs.iter().zip(other.limbs.iter()) { + let new_limb = l1 + l2; + new_limbs.push(new_limb); + } + + Ok(Self { + cs: self.cs(), + limbs: new_limbs, + prod_of_num_of_additions: self.prod_of_num_of_additions + + other.prod_of_num_of_additions, + target_phantom: PhantomData, + }) + } + + /// Add native constant elem + #[tracing::instrument(target = "r1cs")] + pub fn add_constant(&self, other: &TargetField) -> R1CSResult { + let mut other_limbs = + AllocatedNonNativeFieldVar::::get_limbs_representations( + other, + self.get_optimization_type(), + )?; + other_limbs.reverse(); + + let mut new_limbs = Vec::new(); + + for (i, limb) in self.limbs.iter().rev().enumerate() { + if i < other_limbs.len() { + new_limbs.push(limb + other_limbs[i]); + } else { + new_limbs.push((*limb).clone()); + } + } + + new_limbs.reverse(); + + Ok(Self { + cs: self.cs(), + limbs: new_limbs, + prod_of_num_of_additions: self.prod_of_num_of_additions + BaseField::one(), + target_phantom: PhantomData, + }) + } + + pub(crate) fn get_optimization_type(&self) -> OptimizationType { + match self.cs().optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + } + } +} diff --git a/arkworks/nonnative/src/allocated_nonnative_field_var.rs b/arkworks/nonnative/src/allocated_nonnative_field_var.rs new file mode 100644 index 00000000..78cb55d7 --- /dev/null +++ b/arkworks/nonnative/src/allocated_nonnative_field_var.rs @@ -0,0 +1,865 @@ +use crate::params::{get_params, OptimizationType}; +use crate::reduce::{bigint_to_basefield, limbs_to_bigint, Reducer}; +use crate::AllocatedNonNativeFieldMulResultVar; +use ark_ff::{BigInteger, FpParameters, PrimeField}; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::*; +use ark_r1cs_std::ToConstraintFieldGadget; +use ark_relations::r1cs::{OptimizationGoal, Result as R1CSResult}; +use ark_relations::{ + ns, + r1cs::{ConstraintSystemRef, Namespace, SynthesisError}, +}; +use ark_std::cmp::{max, min}; +use ark_std::marker::PhantomData; +use ark_std::{borrow::Borrow, vec, vec::Vec}; + +/// The allocated version of `NonNativeFieldVar` (introduced below) +#[derive(Debug)] +#[must_use] +pub struct AllocatedNonNativeFieldVar { + /// Constraint system reference + pub cs: ConstraintSystemRef, + /// The limbs, each of which is a BaseField gadget. + pub limbs: Vec>, + /// Number of additions done over this gadget, using which the gadget decides when to reduce. + pub num_of_additions_over_normal_form: BaseField, + /// Whether the limb representation is the normal form (using only the bits specified in the parameters, and the representation is strictly within the range of TargetField). + pub is_in_the_normal_form: bool, + #[doc(hidden)] + pub target_phantom: PhantomData, +} + +impl + AllocatedNonNativeFieldVar +{ + /// Return cs + pub fn cs(&self) -> ConstraintSystemRef { + self.cs.clone() + } + + /// Obtain the value of limbs + pub fn limbs_to_value( + limbs: Vec, + optimization_type: OptimizationType, + ) -> TargetField { + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + optimization_type, + ); + + let mut base_repr: ::BigInt = TargetField::one().into_repr(); + + // Convert 2^{(params.bits_per_limb - 1)} into the TargetField and then double the base + // This is because 2^{(params.bits_per_limb)} might indeed be larger than the target field's prime. + base_repr.muln((params.bits_per_limb - 1) as u32); + let mut base: TargetField = TargetField::from_repr(base_repr).unwrap(); + base = base + &base; + + let mut result = TargetField::zero(); + let mut power = TargetField::one(); + + for limb in limbs.iter().rev() { + let mut val = TargetField::zero(); + let mut cur = TargetField::one(); + + for bit in limb.into_repr().to_bits_be().iter().rev() { + if *bit { + val += &cur; + } + cur.double_in_place(); + } + + result += &(val * power); + power *= &base; + } + + result + } + + /// Obtain the value of a nonnative field element + pub fn value(&self) -> R1CSResult { + let mut limbs = Vec::new(); + for limb in self.limbs.iter() { + limbs.push(limb.value()?); + } + + Ok(Self::limbs_to_value(limbs, self.get_optimization_type())) + } + + /// Obtain the nonnative field element of a constant value + pub fn constant(cs: ConstraintSystemRef, value: TargetField) -> R1CSResult { + let optimization_type = match cs.optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + }; + + let limbs_value = Self::get_limbs_representations(&value, optimization_type)?; + + let mut limbs = Vec::new(); + + for limb_value in limbs_value.iter() { + limbs.push(FpVar::::new_constant( + ns!(cs, "limb"), + limb_value, + )?); + } + + Ok(Self { + cs, + limbs, + num_of_additions_over_normal_form: BaseField::zero(), + is_in_the_normal_form: true, + target_phantom: PhantomData, + }) + } + + /// Obtain the nonnative field element of one + pub fn one(cs: ConstraintSystemRef) -> R1CSResult { + Self::constant(cs, TargetField::one()) + } + + /// Obtain the nonnative field element of zero + pub fn zero(cs: ConstraintSystemRef) -> R1CSResult { + Self::constant(cs, TargetField::zero()) + } + + /// Add a nonnative field element + #[tracing::instrument(target = "r1cs")] + pub fn add(&self, other: &Self) -> R1CSResult { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + let mut limbs = Vec::new(); + for (this_limb, other_limb) in self.limbs.iter().zip(other.limbs.iter()) { + limbs.push(this_limb + other_limb); + } + + let mut res = Self { + cs: self.cs(), + limbs, + num_of_additions_over_normal_form: self + .num_of_additions_over_normal_form + .add(&other.num_of_additions_over_normal_form) + .add(&BaseField::one()), + is_in_the_normal_form: false, + target_phantom: PhantomData, + }; + + Reducer::::post_add_reduce(&mut res)?; + Ok(res) + } + + /// Add a constant + #[tracing::instrument(target = "r1cs")] + pub fn add_constant(&self, other: &TargetField) -> R1CSResult { + let other_limbs = Self::get_limbs_representations(other, self.get_optimization_type())?; + + let mut limbs = Vec::new(); + for (this_limb, other_limb) in self.limbs.iter().zip(other_limbs.iter()) { + limbs.push(this_limb + *other_limb); + } + + let mut res = Self { + cs: self.cs(), + limbs, + num_of_additions_over_normal_form: self + .num_of_additions_over_normal_form + .add(&BaseField::one()), + is_in_the_normal_form: false, + target_phantom: PhantomData, + }; + + Reducer::::post_add_reduce(&mut res)?; + + Ok(res) + } + + /// Subtract a nonnative field element, without the final reduction step + #[tracing::instrument(target = "r1cs")] + pub fn sub_without_reduce(&self, other: &Self) -> R1CSResult { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + self.get_optimization_type(), + ); + + // Step 1: reduce the `other` if needed + let mut surfeit = overhead!(other.num_of_additions_over_normal_form + BaseField::one()) + 1; + let mut other = other.clone(); + if (surfeit + params.bits_per_limb > BaseField::size_in_bits() - 1) + || (surfeit + + (TargetField::size_in_bits() - params.bits_per_limb * (params.num_limbs - 1)) + > BaseField::size_in_bits() - 1) + { + Reducer::reduce(&mut other)?; + surfeit = overhead!(other.num_of_additions_over_normal_form + BaseField::one()) + 1; + } + + // Step 2: construct the padding + let mut pad_non_top_limb_repr: ::BigInt = + BaseField::one().into_repr(); + let mut pad_top_limb_repr: ::BigInt = pad_non_top_limb_repr; + + pad_non_top_limb_repr.muln((surfeit + params.bits_per_limb) as u32); + let pad_non_top_limb = BaseField::from_repr(pad_non_top_limb_repr).unwrap(); + + pad_top_limb_repr.muln( + (surfeit + + (TargetField::size_in_bits() - params.bits_per_limb * (params.num_limbs - 1))) + as u32, + ); + let pad_top_limb = BaseField::from_repr(pad_top_limb_repr).unwrap(); + + let mut pad_limbs = Vec::new(); + pad_limbs.push(pad_top_limb); + for _ in 0..self.limbs.len() - 1 { + pad_limbs.push(pad_non_top_limb); + } + + // Step 3: prepare to pad the padding to k * p for some k + let pad_to_kp_gap = Self::limbs_to_value(pad_limbs, self.get_optimization_type()).neg(); + let pad_to_kp_limbs = + Self::get_limbs_representations(&pad_to_kp_gap, self.get_optimization_type())?; + + // Step 4: the result is self + pad + pad_to_kp - other + let mut limbs = Vec::new(); + for (i, ((this_limb, other_limb), pad_to_kp_limb)) in self + .limbs + .iter() + .zip(other.limbs.iter()) + .zip(pad_to_kp_limbs.iter()) + .enumerate() + { + if i != 0 { + limbs.push(this_limb + pad_non_top_limb + *pad_to_kp_limb - other_limb); + } else { + limbs.push(this_limb + pad_top_limb + *pad_to_kp_limb - other_limb); + } + } + + let result = AllocatedNonNativeFieldVar:: { + cs: self.cs(), + limbs, + num_of_additions_over_normal_form: self.num_of_additions_over_normal_form + + (other.num_of_additions_over_normal_form + BaseField::one()) + + (other.num_of_additions_over_normal_form + BaseField::one()), + is_in_the_normal_form: false, + target_phantom: PhantomData, + }; + + Ok(result) + } + + /// Subtract a nonnative field element + #[tracing::instrument(target = "r1cs")] + pub fn sub(&self, other: &Self) -> R1CSResult { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + let mut result = self.sub_without_reduce(other)?; + Reducer::::post_add_reduce(&mut result)?; + Ok(result) + } + + /// Subtract a constant + #[tracing::instrument(target = "r1cs")] + pub fn sub_constant(&self, other: &TargetField) -> R1CSResult { + self.sub(&Self::constant(self.cs(), *other)?) + } + + /// Multiply a nonnative field element + #[tracing::instrument(target = "r1cs")] + pub fn mul(&self, other: &Self) -> R1CSResult { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + self.mul_without_reduce(&other)?.reduce() + } + + /// Multiply a constant + pub fn mul_constant(&self, other: &TargetField) -> R1CSResult { + self.mul(&Self::constant(self.cs(), *other)?) + } + + /// Compute the negate of a nonnative field element + #[tracing::instrument(target = "r1cs")] + pub fn negate(&self) -> R1CSResult { + Self::zero(self.cs())?.sub(self) + } + + /// Compute the inverse of a nonnative field element + #[tracing::instrument(target = "r1cs")] + pub fn inverse(&self) -> R1CSResult { + let inverse = Self::new_witness(self.cs(), || { + Ok(self.value()?.inverse().unwrap_or_else(TargetField::zero)) + })?; + + let actual_result = self.clone().mul(&inverse)?; + actual_result.conditional_enforce_equal(&Self::one(self.cs())?, &Boolean::TRUE)?; + Ok(inverse) + } + + /// Convert a `TargetField` element into limbs (not constraints) + /// This is an internal function that would be reused by a number of other functions + pub fn get_limbs_representations( + elem: &TargetField, + optimization_type: OptimizationType, + ) -> R1CSResult> { + Self::get_limbs_representations_from_big_integer(&elem.into_repr(), optimization_type) + } + + /// Obtain the limbs directly from a big int + pub fn get_limbs_representations_from_big_integer( + elem: &::BigInt, + optimization_type: OptimizationType, + ) -> R1CSResult> { + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + optimization_type, + ); + + // push the lower limbs first + let mut limbs: Vec = Vec::new(); + let mut cur = *elem; + for _ in 0..params.num_limbs { + let cur_bits = cur.to_bits_be(); // `to_bits` is big endian + let cur_mod_r = ::BigInt::from_bits_be( + &cur_bits[cur_bits.len() - params.bits_per_limb..], + ); // therefore, the lowest `bits_per_non_top_limb` bits is what we want. + limbs.push(BaseField::from_repr(cur_mod_r).unwrap()); + cur.divn(params.bits_per_limb as u32); + } + + // then we reserve, so that the limbs are ``big limb first'' + limbs.reverse(); + + Ok(limbs) + } + + /// for advanced use, multiply and output the intermediate representations (without reduction) + /// This intermediate representations can be added with each other, and they can later be reduced back to the `NonNativeFieldVar`. + #[tracing::instrument(target = "r1cs")] + pub fn mul_without_reduce( + &self, + other: &Self, + ) -> R1CSResult> { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + self.get_optimization_type(), + ); + + // Step 1: reduce `self` and `other` if neceessary + let mut self_reduced = self.clone(); + let mut other_reduced = other.clone(); + Reducer::::pre_mul_reduce(&mut self_reduced, &mut other_reduced)?; + + let mut prod_limbs = Vec::new(); + if self.get_optimization_type() == OptimizationType::Weight { + let zero = FpVar::::zero(); + + for _ in 0..2 * params.num_limbs - 1 { + prod_limbs.push(zero.clone()); + } + + for i in 0..params.num_limbs { + for j in 0..params.num_limbs { + prod_limbs[i + j] = + &prod_limbs[i + j] + (&self_reduced.limbs[i] * &other_reduced.limbs[j]); + } + } + } else { + let cs = self.cs().or(other.cs()); + + for z_index in 0..2 * params.num_limbs - 1 { + prod_limbs.push(FpVar::new_witness(ns!(cs, "limb product"), || { + let mut z_i = BaseField::zero(); + for i in 0..=min(params.num_limbs - 1, z_index) { + let j = z_index - i; + if j < params.num_limbs { + z_i += &self_reduced.limbs[i] + .value()? + .mul(&other_reduced.limbs[j].value()?); + } + } + + Ok(z_i) + })?); + } + + for c in 0..(2 * params.num_limbs - 1) { + let c_pows: Vec<_> = (0..(2 * params.num_limbs - 1)) + .map(|i| BaseField::from((c + 1) as u128).pow(&vec![i as u64])) + .collect(); + + let x = self_reduced + .limbs + .iter() + .zip(c_pows.iter()) + .map(|(var, c_pow)| var * *c_pow) + .fold(FpVar::zero(), |sum, i| sum + i); + + let y = other_reduced + .limbs + .iter() + .zip(c_pows.iter()) + .map(|(var, c_pow)| var * *c_pow) + .fold(FpVar::zero(), |sum, i| sum + i); + + let z = prod_limbs + .iter() + .zip(c_pows.iter()) + .map(|(var, c_pow)| var * *c_pow) + .fold(FpVar::zero(), |sum, i| sum + i); + + z.enforce_equal(&(x * y))?; + } + } + + Ok(AllocatedNonNativeFieldMulResultVar { + cs: self.cs(), + limbs: prod_limbs, + prod_of_num_of_additions: (self_reduced.num_of_additions_over_normal_form + + BaseField::one()) + * (other_reduced.num_of_additions_over_normal_form + BaseField::one()), + target_phantom: PhantomData, + }) + } + + pub(crate) fn frobenius_map(&self, _power: usize) -> R1CSResult { + Ok(self.clone()) + } + + pub(crate) fn conditional_enforce_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> R1CSResult<()> { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + self.get_optimization_type(), + ); + + // Get p + let p_representations = + AllocatedNonNativeFieldVar::::get_limbs_representations_from_big_integer( + &::Params::MODULUS, + self.get_optimization_type() + )?; + let p_bigint = limbs_to_bigint(params.bits_per_limb, &p_representations); + + let mut p_gadget_limbs = Vec::new(); + for limb in p_representations.iter() { + p_gadget_limbs.push(FpVar::::Constant(*limb)); + } + let p_gadget = AllocatedNonNativeFieldVar:: { + cs: self.cs(), + limbs: p_gadget_limbs, + num_of_additions_over_normal_form: BaseField::one(), + is_in_the_normal_form: false, + target_phantom: PhantomData, + }; + + // Get delta = self - other + let cs = self.cs().or(other.cs()).or(should_enforce.cs()); + let mut delta = self.sub_without_reduce(other)?; + delta = should_enforce.select(&delta, &Self::zero(cs.clone())?)?; + + // Allocate k = delta / p + let k_gadget = FpVar::::new_witness(ns!(cs, "k"), || { + let mut delta_limbs_values = Vec::::new(); + for limb in delta.limbs.iter() { + delta_limbs_values.push(limb.value()?); + } + + let delta_bigint = limbs_to_bigint(params.bits_per_limb, &delta_limbs_values); + + Ok(bigint_to_basefield::(&(delta_bigint / p_bigint))) + })?; + + let surfeit = overhead!(delta.num_of_additions_over_normal_form + BaseField::one()) + 1; + Reducer::::limb_to_bits(&k_gadget, surfeit)?; + + // Compute k * p + let mut kp_gadget_limbs = Vec::new(); + for limb in p_gadget.limbs.iter() { + kp_gadget_limbs.push(limb * &k_gadget); + } + + // Enforce delta = kp + Reducer::::group_and_check_equality( + surfeit, + params.bits_per_limb, + params.bits_per_limb, + &delta.limbs, + &kp_gadget_limbs, + )?; + + Ok(()) + } + + #[tracing::instrument(target = "r1cs")] + pub(crate) fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> R1CSResult<()> { + assert_eq!(self.get_optimization_type(), other.get_optimization_type()); + + let cs = self.cs().or(other.cs()).or(should_enforce.cs()); + + let _ = should_enforce + .select(&self.sub(other)?, &Self::one(cs)?)? + .inverse()?; + + Ok(()) + } + + pub(crate) fn get_optimization_type(&self) -> OptimizationType { + match self.cs().optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + } + } +} + +impl ToBitsGadget + for AllocatedNonNativeFieldVar +{ + #[tracing::instrument(target = "r1cs")] + fn to_bits_le(&self) -> R1CSResult>> { + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + self.get_optimization_type(), + ); + + // Reduce to the normal form + // Though, a malicious prover can make it slightly larger than p + let mut self_normal = self.clone(); + Reducer::::pre_eq_reduce(&mut self_normal)?; + + // Therefore, we convert it to bits and enforce that it is in the field + let mut bits = Vec::>::new(); + for limb in self_normal.limbs.iter() { + bits.extend_from_slice(&Reducer::::limb_to_bits( + &limb, + params.bits_per_limb, + )?); + } + bits.reverse(); + + let mut b = TargetField::characteristic().to_vec(); + assert_eq!(b[0] % 2, 1); + b[0] -= 1; // This works, because the LSB is one, so there's no borrows. + let run = Boolean::::enforce_smaller_or_equal_than_le(&bits, b)?; + + // We should always end in a "run" of zeros, because + // the characteristic is an odd prime. So, this should + // be empty. + assert!(run.is_empty()); + + Ok(bits) + } +} + +impl ToBytesGadget + for AllocatedNonNativeFieldVar +{ + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> R1CSResult>> { + let mut bits = self.to_bits_le()?; + + let num_bits = TargetField::BigInt::NUM_LIMBS * 64; + assert!(bits.len() <= num_bits); + bits.resize_with(num_bits, || Boolean::constant(false)); + + let bytes = bits.chunks(8).map(UInt8::from_bits_le).collect(); + Ok(bytes) + } +} + +impl CondSelectGadget + for AllocatedNonNativeFieldVar +{ + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> R1CSResult { + assert_eq!( + true_value.get_optimization_type(), + false_value.get_optimization_type() + ); + + let mut limbs_sel = Vec::with_capacity(true_value.limbs.len()); + + for (x, y) in true_value.limbs.iter().zip(&false_value.limbs) { + limbs_sel.push(FpVar::::conditionally_select(cond, x, y)?); + } + + Ok(Self { + cs: true_value.cs().or(false_value.cs()), + limbs: limbs_sel, + num_of_additions_over_normal_form: max( + true_value.num_of_additions_over_normal_form, + false_value.num_of_additions_over_normal_form, + ), + is_in_the_normal_form: true_value.is_in_the_normal_form + && false_value.is_in_the_normal_form, + target_phantom: PhantomData, + }) + } +} + +impl TwoBitLookupGadget + for AllocatedNonNativeFieldVar +{ + type TableConstant = TargetField; + + #[tracing::instrument(target = "r1cs")] + fn two_bit_lookup( + bits: &[Boolean], + constants: &[Self::TableConstant], + ) -> R1CSResult { + debug_assert!(bits.len() == 2); + debug_assert!(constants.len() == 4); + + let cs = bits.cs(); + + let optimization_type = match cs.optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + }; + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + optimization_type, + ); + let mut limbs_constants = Vec::new(); + for _ in 0..params.num_limbs { + limbs_constants.push(Vec::new()); + } + + for constant in constants.iter() { + let representations = + AllocatedNonNativeFieldVar::::get_limbs_representations( + constant, + optimization_type, + )?; + + for (i, representation) in representations.iter().enumerate() { + limbs_constants[i].push(*representation); + } + } + + let mut limbs = Vec::new(); + for limbs_constant in limbs_constants.iter() { + limbs.push(FpVar::::two_bit_lookup(bits, limbs_constant)?); + } + + Ok(AllocatedNonNativeFieldVar:: { + cs, + limbs, + num_of_additions_over_normal_form: BaseField::zero(), + is_in_the_normal_form: true, + target_phantom: PhantomData, + }) + } +} + +impl ThreeBitCondNegLookupGadget + for AllocatedNonNativeFieldVar +{ + type TableConstant = TargetField; + + #[tracing::instrument(target = "r1cs")] + fn three_bit_cond_neg_lookup( + bits: &[Boolean], + b0b1: &Boolean, + constants: &[Self::TableConstant], + ) -> R1CSResult { + debug_assert!(bits.len() == 3); + debug_assert!(constants.len() == 4); + + let cs = bits.cs().or(b0b1.cs()); + + let optimization_type = match cs.optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + }; + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + optimization_type, + ); + + let mut limbs_constants = Vec::new(); + for _ in 0..params.num_limbs { + limbs_constants.push(Vec::new()); + } + + for constant in constants.iter() { + let representations = + AllocatedNonNativeFieldVar::::get_limbs_representations( + constant, + optimization_type, + )?; + + for (i, representation) in representations.iter().enumerate() { + limbs_constants[i].push(*representation); + } + } + + let mut limbs = Vec::new(); + for limbs_constant in limbs_constants.iter() { + limbs.push(FpVar::::three_bit_cond_neg_lookup( + bits, + b0b1, + limbs_constant, + )?); + } + + Ok(AllocatedNonNativeFieldVar:: { + cs, + limbs, + num_of_additions_over_normal_form: BaseField::zero(), + is_in_the_normal_form: true, + target_phantom: PhantomData, + }) + } +} + +impl AllocVar + for AllocatedNonNativeFieldVar +{ + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> R1CSResult { + let ns = cs.into(); + let cs = ns.cs(); + + let optimization_type = match cs.optimization_goal() { + OptimizationGoal::None => OptimizationType::Constraints, + OptimizationGoal::Constraints => OptimizationType::Constraints, + OptimizationGoal::Weight => OptimizationType::Weight, + }; + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + optimization_type, + ); + let zero = TargetField::zero(); + + let elem = match f() { + Ok(t) => *(t.borrow()), + Err(_) => zero, + }; + let elem_representations = Self::get_limbs_representations(&elem, optimization_type)?; + let mut limbs = Vec::new(); + + for limb in elem_representations.iter() { + limbs.push(FpVar::::new_variable( + ark_relations::ns!(cs, "alloc"), + || Ok(limb), + mode, + )?); + } + + let num_of_additions_over_normal_form = if mode != AllocationMode::Witness { + BaseField::zero() + } else { + BaseField::one() + }; + + if mode == AllocationMode::Witness { + for limb in limbs.iter().rev().take(params.num_limbs - 1) { + Reducer::::limb_to_bits(limb, params.bits_per_limb)?; + } + + Reducer::::limb_to_bits( + &limbs[0], + TargetField::size_in_bits() - (params.num_limbs - 1) * params.bits_per_limb, + )?; + } + + Ok(Self { + cs, + limbs, + num_of_additions_over_normal_form, + is_in_the_normal_form: mode != AllocationMode::Witness, + target_phantom: PhantomData, + }) + } +} + +impl ToConstraintFieldGadget + for AllocatedNonNativeFieldVar +{ + fn to_constraint_field(&self) -> R1CSResult>> { + // provide a unique representation of the nonnative variable + // step 1: convert it into a bit sequence + let bits = self.to_bits_le()?; + + // step 2: obtain the parameters for weight-optimized (often, fewer limbs) + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + OptimizationType::Weight, + ); + + // step 3: assemble the limbs + let mut limbs = bits + .chunks(params.bits_per_limb) + .map(|chunk| { + let mut limb = FpVar::::zero(); + let mut w = BaseField::one(); + for b in chunk.iter() { + limb += FpVar::from(b.clone()) * w; + w.double_in_place(); + } + limb + }) + .collect::>>(); + + limbs.reverse(); + + // step 4: output the limbs + Ok(limbs) + } +} + +/* + * Implementation of a few traits + */ + +impl Clone + for AllocatedNonNativeFieldVar +{ + fn clone(&self) -> Self { + AllocatedNonNativeFieldVar { + cs: self.cs(), + limbs: self.limbs.clone(), + num_of_additions_over_normal_form: self.num_of_additions_over_normal_form, + is_in_the_normal_form: self.is_in_the_normal_form, + target_phantom: PhantomData, + } + } +} diff --git a/arkworks/nonnative/src/lib.rs b/arkworks/nonnative/src/lib.rs new file mode 100644 index 00000000..e34c87b2 --- /dev/null +++ b/arkworks/nonnative/src/lib.rs @@ -0,0 +1,106 @@ +//! This library provides the non-native field gadget for the `arkworks` constraint-writing platform. +//! The non-native field gadget can be used as a standard `FieldVar`, given +//! reasonable non-native gadget parameters. +//! +//! This file contains the implementation of three structs: +//! +//! - `NonNativeFieldParams` specifies the constraint prime field (called `BaseField`), +//! the simulated prime field (called `TargetField`), and internal parameters +//! searched by the Python script (see `README.md`). +//! - `NonNativeFieldVar` implements the `FieldVar` for simulating `TargetField` +//! arithmetic within `BaseField`. +//! - `NonNativeFieldMulResultVar` is an intermediate representations of the +//! result of multiplication, which is hidden from the `FieldVar` interface +//! and is left for advanced users who want better performance. +//! +//! The Python script mentioned above can be found in the subdirectory `scripts`. + +#![cfg_attr(not(feature = "std"), no_std)] +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms, + missing_docs +)] +#![allow( + clippy::redundant_closure_call, + clippy::enum_glob_use, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::unseparated_literal_suffix +)] +#![forbid(unsafe_code)] + +#[macro_use] +extern crate ark_r1cs_std; + +use ark_std::fmt::Debug; + +/// example parameters of non-native field gadget +/// +/// Sample parameters for non-native field gadgets +/// - `BaseField`: the constraint field +/// - `TargetField`: the field being simulated +/// - `num_limbs`: how many limbs are used +/// - `bits_per_limb`: the size of the limbs +/// +pub mod params; +/// a submodule for reducing the representations +#[doc(hidden)] +pub mod reduce; + +/// a macro for computing ceil(log2(x)) for a field element x +#[doc(hidden)] +#[macro_export] +macro_rules! overhead { + ($x:expr) => {{ + use ark_ff::BigInteger; + let num = $x; + let num_bits = num.into_repr().to_bits_be(); + let mut skipped_bits = 0; + for b in num_bits.iter() { + if *b == false { + skipped_bits += 1; + } else { + break; + } + } + + let mut is_power_of_2 = true; + for b in num_bits.iter().skip(skipped_bits + 1) { + if *b == true { + is_power_of_2 = false; + } + } + + if is_power_of_2 { + num_bits.len() - skipped_bits + } else { + num_bits.len() - skipped_bits + 1 + } + }}; +} + +/// Parameters for a specific `NonNativeFieldVar` instantiation +#[derive(Clone, Debug)] +pub struct NonNativeFieldParams { + /// The number of limbs (`BaseField` elements) used to represent a `TargetField` element. Highest limb first. + pub num_limbs: usize, + + /// The number of bits of the limb + pub bits_per_limb: usize, +} + +mod allocated_nonnative_field_var; +pub use allocated_nonnative_field_var::*; + +mod nonnative_field_var; +pub use nonnative_field_var::*; + +mod allocated_nonnative_field_mul_result_var; +pub use allocated_nonnative_field_mul_result_var::*; + +mod nonnative_field_mul_result_var; +pub use nonnative_field_mul_result_var::*; diff --git a/arkworks/nonnative/src/nonnative_field_mul_result_var.rs b/arkworks/nonnative/src/nonnative_field_mul_result_var.rs new file mode 100644 index 00000000..35cf65a0 --- /dev/null +++ b/arkworks/nonnative/src/nonnative_field_mul_result_var.rs @@ -0,0 +1,78 @@ +use crate::{AllocatedNonNativeFieldMulResultVar, NonNativeFieldVar}; +use ark_ff::PrimeField; +use ark_relations::r1cs::Result as R1CSResult; + +/// An intermediate representation especially for the result of a multiplication, containing more limbs. +/// It is intended for advanced usage to improve the efficiency. +/// +/// That is, instead of calling `mul`, one can call `mul_without_reduce` to +/// obtain this intermediate representation, which can still be added. +/// Then, one can call `reduce` to reduce it back to `NonNativeFieldVar`. +/// This may help cut the number of reduce operations. +#[derive(Debug)] +#[must_use] +pub enum NonNativeFieldMulResultVar { + /// as a constant + Constant(TargetField), + /// as an allocated gadget + Var(AllocatedNonNativeFieldMulResultVar), +} + +impl + NonNativeFieldMulResultVar +{ + /// Create a zero `NonNativeFieldMulResultVar` (used for additions) + pub fn zero() -> Self { + Self::Constant(TargetField::zero()) + } + + /// Create an `NonNativeFieldMulResultVar` from a constant + pub fn constant(v: TargetField) -> Self { + Self::Constant(v) + } + + /// Reduce the `NonNativeFieldMulResultVar` back to NonNativeFieldVar + #[tracing::instrument(target = "r1cs")] + pub fn reduce(&self) -> R1CSResult> { + match self { + Self::Constant(c) => Ok(NonNativeFieldVar::Constant(*c)), + Self::Var(v) => Ok(NonNativeFieldVar::Var(v.reduce()?)), + } + } +} + +impl + From<&NonNativeFieldVar> + for NonNativeFieldMulResultVar +{ + fn from(src: &NonNativeFieldVar) -> Self { + match src { + NonNativeFieldVar::Constant(c) => NonNativeFieldMulResultVar::Constant(*c), + NonNativeFieldVar::Var(v) => { + NonNativeFieldMulResultVar::Var(AllocatedNonNativeFieldMulResultVar::< + TargetField, + BaseField, + >::from(v)) + } + } + } +} + +impl_bounded_ops!( + NonNativeFieldMulResultVar, + TargetField, + Add, + add, + AddAssign, + add_assign, + |this: &'a NonNativeFieldMulResultVar, other: &'a NonNativeFieldMulResultVar| { + use NonNativeFieldMulResultVar::*; + match (this, other) { + (Constant(c1), Constant(c2)) => Constant(*c1 + c2), + (Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.add_constant(c).unwrap()), + (Var(v1), Var(v2)) => Var(v1.add(v2).unwrap()), + } + }, + |this: &'a NonNativeFieldMulResultVar, other: TargetField| { this + &NonNativeFieldMulResultVar::Constant(other) }, + (TargetField: PrimeField, BaseField: PrimeField), +); diff --git a/arkworks/nonnative/src/nonnative_field_var.rs b/arkworks/nonnative/src/nonnative_field_var.rs new file mode 100644 index 00000000..fb144334 --- /dev/null +++ b/arkworks/nonnative/src/nonnative_field_var.rs @@ -0,0 +1,494 @@ +use crate::params::OptimizationType; +use crate::{AllocatedNonNativeFieldVar, NonNativeFieldMulResultVar}; +use ark_ff::PrimeField; +use ark_ff::{to_bytes, FpParameters}; +use ark_r1cs_std::boolean::Boolean; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::fields::FieldVar; +use ark_r1cs_std::prelude::*; +use ark_r1cs_std::{R1CSVar, ToConstraintFieldGadget}; +use ark_relations::r1cs::Result as R1CSResult; +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; +use ark_std::hash::{Hash, Hasher}; +use ark_std::{borrow::Borrow, vec::Vec}; + +/// A gadget for representing non-native (`TargetField`) field elements over the constraint field (`BaseField`). +#[derive(Clone, Debug)] +#[must_use] +pub enum NonNativeFieldVar { + /// Constant + Constant(TargetField), + /// Allocated gadget + Var(AllocatedNonNativeFieldVar), +} + +impl PartialEq + for NonNativeFieldVar +{ + fn eq(&self, other: &Self) -> bool { + self.value() + .unwrap_or_default() + .eq(&other.value().unwrap_or_default()) + } +} + +impl Eq + for NonNativeFieldVar +{ +} + +impl Hash + for NonNativeFieldVar +{ + fn hash(&self, state: &mut H) { + self.value().unwrap_or_default().hash(state); + } +} + +impl R1CSVar + for NonNativeFieldVar +{ + type Value = TargetField; + + fn cs(&self) -> ConstraintSystemRef { + match self { + Self::Constant(_) => ConstraintSystemRef::None, + Self::Var(a) => a.cs(), + } + } + + fn value(&self) -> R1CSResult { + match self { + Self::Constant(v) => Ok(*v), + Self::Var(v) => v.value(), + } + } +} + +impl From> + for NonNativeFieldVar +{ + fn from(other: Boolean) -> Self { + if let Boolean::Constant(b) = other { + Self::Constant(>::from(b as u128)) + } else { + // `other` is a variable + let one = Self::Constant(TargetField::one()); + let zero = Self::Constant(TargetField::zero()); + Self::conditionally_select(&other, &one, &zero).unwrap() + } + } +} + +impl + From> + for NonNativeFieldVar +{ + fn from(other: AllocatedNonNativeFieldVar) -> Self { + Self::Var(other) + } +} + +impl<'a, TargetField: PrimeField, BaseField: PrimeField> FieldOpsBounds<'a, TargetField, Self> + for NonNativeFieldVar +{ +} + +impl<'a, TargetField: PrimeField, BaseField: PrimeField> + FieldOpsBounds<'a, TargetField, NonNativeFieldVar> + for &'a NonNativeFieldVar +{ +} + +impl FieldVar + for NonNativeFieldVar +{ + fn zero() -> Self { + Self::Constant(TargetField::zero()) + } + + fn one() -> Self { + Self::Constant(TargetField::one()) + } + + fn constant(v: TargetField) -> Self { + Self::Constant(v) + } + + #[tracing::instrument(target = "r1cs")] + fn negate(&self) -> R1CSResult { + match self { + Self::Constant(c) => Ok(Self::Constant(-*c)), + Self::Var(v) => Ok(Self::Var(v.negate()?)), + } + } + + #[tracing::instrument(target = "r1cs")] + fn inverse(&self) -> R1CSResult { + match self { + Self::Constant(c) => Ok(Self::Constant(c.inverse().unwrap_or_default())), + Self::Var(v) => Ok(Self::Var(v.inverse()?)), + } + } + + #[tracing::instrument(target = "r1cs")] + fn frobenius_map(&self, power: usize) -> R1CSResult { + match self { + Self::Constant(c) => Ok(Self::Constant({ + let mut tmp = *c; + tmp.frobenius_map(power); + tmp + })), + Self::Var(v) => Ok(Self::Var(v.frobenius_map(power)?)), + } + } +} + +/****************************************************************************/ +/****************************************************************************/ + +impl_bounded_ops!( + NonNativeFieldVar, + TargetField, + Add, + add, + AddAssign, + add_assign, + |this: &'a NonNativeFieldVar, other: &'a NonNativeFieldVar| { + use NonNativeFieldVar::*; + match (this, other) { + (Constant(c1), Constant(c2)) => Constant(*c1 + c2), + (Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.add_constant(c).unwrap()), + (Var(v1), Var(v2)) => Var(v1.add(v2).unwrap()), + } + }, + |this: &'a NonNativeFieldVar, other: TargetField| { this + &NonNativeFieldVar::Constant(other) }, + (TargetField: PrimeField, BaseField: PrimeField), +); + +impl_bounded_ops!( + NonNativeFieldVar, + TargetField, + Sub, + sub, + SubAssign, + sub_assign, + |this: &'a NonNativeFieldVar, other: &'a NonNativeFieldVar| { + use NonNativeFieldVar::*; + match (this, other) { + (Constant(c1), Constant(c2)) => Constant(*c1 - c2), + (Var(v), Constant(c)) => Var(v.sub_constant(c).unwrap()), + (Constant(c), Var(v)) => Var(v.sub_constant(c).unwrap().negate().unwrap()), + (Var(v1), Var(v2)) => Var(v1.sub(v2).unwrap()), + } + }, + |this: &'a NonNativeFieldVar, other: TargetField| { + this - &NonNativeFieldVar::Constant(other) + }, + (TargetField: PrimeField, BaseField: PrimeField), +); + +impl_bounded_ops!( + NonNativeFieldVar, + TargetField, + Mul, + mul, + MulAssign, + mul_assign, + |this: &'a NonNativeFieldVar, other: &'a NonNativeFieldVar| { + use NonNativeFieldVar::*; + match (this, other) { + (Constant(c1), Constant(c2)) => Constant(*c1 * c2), + (Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.mul_constant(c).unwrap()), + (Var(v1), Var(v2)) => Var(v1.mul(v2).unwrap()), + } + }, + |this: &'a NonNativeFieldVar, other: TargetField| { + if other.is_zero() { + NonNativeFieldVar::zero() + } else { + this * &NonNativeFieldVar::Constant(other) + } + }, + (TargetField: PrimeField, BaseField: PrimeField), +); + +/****************************************************************************/ +/****************************************************************************/ + +impl EqGadget + for NonNativeFieldVar +{ + #[tracing::instrument(target = "r1cs")] + fn is_eq(&self, other: &Self) -> R1CSResult> { + let cs = self.cs().or(other.cs()); + + if cs == ConstraintSystemRef::None { + Ok(Boolean::Constant(self.value()? == other.value()?)) + } else { + let should_enforce_equal = + Boolean::new_witness(cs, || Ok(self.value()? == other.value()?))?; + + self.conditional_enforce_equal(other, &should_enforce_equal)?; + self.conditional_enforce_not_equal(other, &should_enforce_equal.not())?; + + Ok(should_enforce_equal) + } + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> R1CSResult<()> { + match (self, other) { + (Self::Constant(c1), Self::Constant(c2)) => { + if c1 != c2 { + should_enforce.enforce_equal(&Boolean::FALSE)?; + } + Ok(()) + } + (Self::Constant(c), Self::Var(v)) | (Self::Var(v), Self::Constant(c)) => { + let cs = v.cs(); + let c = AllocatedNonNativeFieldVar::new_constant(cs, c)?; + c.conditional_enforce_equal(v, should_enforce) + } + (Self::Var(v1), Self::Var(v2)) => v1.conditional_enforce_equal(v2, should_enforce), + } + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> R1CSResult<()> { + match (self, other) { + (Self::Constant(c1), Self::Constant(c2)) => { + if c1 == c2 { + should_enforce.enforce_equal(&Boolean::FALSE)?; + } + Ok(()) + } + (Self::Constant(c), Self::Var(v)) | (Self::Var(v), Self::Constant(c)) => { + let cs = v.cs(); + let c = AllocatedNonNativeFieldVar::new_constant(cs, c)?; + c.conditional_enforce_not_equal(v, should_enforce) + } + (Self::Var(v1), Self::Var(v2)) => v1.conditional_enforce_not_equal(v2, should_enforce), + } + } +} + +impl ToBitsGadget + for NonNativeFieldVar +{ + #[tracing::instrument(target = "r1cs")] + fn to_bits_le(&self) -> R1CSResult>> { + match self { + Self::Constant(_) => self.to_non_unique_bits_le(), + Self::Var(v) => v.to_bits_le(), + } + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bits_le(&self) -> R1CSResult>> { + use ark_ff::BitIteratorLE; + match self { + Self::Constant(c) => Ok(BitIteratorLE::new(&c.into_repr()) + .take((TargetField::Params::MODULUS_BITS) as usize) + .map(Boolean::constant) + .collect::>()), + Self::Var(v) => v.to_non_unique_bits_le(), + } + } +} + +impl ToBytesGadget + for NonNativeFieldVar +{ + /// Outputs the unique byte decomposition of `self` in *little-endian* + /// form. + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> R1CSResult>> { + match self { + Self::Constant(c) => Ok(UInt8::constant_vec(&to_bytes![c].unwrap())), + Self::Var(v) => v.to_bytes(), + } + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> R1CSResult>> { + match self { + Self::Constant(c) => Ok(UInt8::constant_vec(&to_bytes![c].unwrap())), + Self::Var(v) => v.to_non_unique_bytes(), + } + } +} + +impl CondSelectGadget + for NonNativeFieldVar +{ + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> R1CSResult { + match cond { + Boolean::Constant(true) => Ok(true_value.clone()), + Boolean::Constant(false) => Ok(false_value.clone()), + _ => { + let cs = cond.cs(); + let true_value = match true_value { + Self::Constant(f) => AllocatedNonNativeFieldVar::new_constant(cs.clone(), f)?, + Self::Var(v) => v.clone(), + }; + let false_value = match false_value { + Self::Constant(f) => AllocatedNonNativeFieldVar::new_constant(cs, f)?, + Self::Var(v) => v.clone(), + }; + cond.select(&true_value, &false_value).map(Self::Var) + } + } + } +} + +/// Uses two bits to perform a lookup into a table +/// `b` is little-endian: `b[0]` is LSB. +impl TwoBitLookupGadget + for NonNativeFieldVar +{ + type TableConstant = TargetField; + + #[tracing::instrument(target = "r1cs")] + fn two_bit_lookup(b: &[Boolean], c: &[Self::TableConstant]) -> R1CSResult { + debug_assert_eq!(b.len(), 2); + debug_assert_eq!(c.len(), 4); + if b.cs().is_none() { + // We're in the constant case + + let lsb = b[0].value()? as usize; + let msb = b[1].value()? as usize; + let index = lsb + (msb << 1); + Ok(Self::Constant(c[index])) + } else { + AllocatedNonNativeFieldVar::two_bit_lookup(b, c).map(Self::Var) + } + } +} + +impl ThreeBitCondNegLookupGadget + for NonNativeFieldVar +{ + type TableConstant = TargetField; + + #[tracing::instrument(target = "r1cs")] + fn three_bit_cond_neg_lookup( + b: &[Boolean], + b0b1: &Boolean, + c: &[Self::TableConstant], + ) -> R1CSResult { + debug_assert_eq!(b.len(), 3); + debug_assert_eq!(c.len(), 4); + + if b.cs().or(b0b1.cs()).is_none() { + // We're in the constant case + + let lsb = b[0].value()? as usize; + let msb = b[1].value()? as usize; + let index = lsb + (msb << 1); + let intermediate = c[index]; + + let is_negative = b[2].value()?; + let y = if is_negative { + -intermediate + } else { + intermediate + }; + Ok(Self::Constant(y)) + } else { + AllocatedNonNativeFieldVar::three_bit_cond_neg_lookup(b, b0b1, c).map(Self::Var) + } + } +} + +impl AllocVar + for NonNativeFieldVar +{ + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> R1CSResult { + let ns = cs.into(); + let cs = ns.cs(); + + if cs == ConstraintSystemRef::None || mode == AllocationMode::Constant { + Ok(Self::Constant(*f()?.borrow())) + } else { + AllocatedNonNativeFieldVar::new_variable(cs, f, mode).map(Self::Var) + } + } +} + +impl ToConstraintFieldGadget + for NonNativeFieldVar +{ + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> R1CSResult>> { + // Use one group element to represent the optimization type. + // + // By default, the constant is converted in the weight-optimized type, because it results in fewer elements. + match self { + Self::Constant(c) => Ok(AllocatedNonNativeFieldVar::get_limbs_representations( + c, + OptimizationType::Weight, + )? + .into_iter() + .map(FpVar::constant) + .collect()), + Self::Var(v) => v.to_constraint_field(), + } + } +} + +impl NonNativeFieldVar { + /// The `mul_without_reduce` for `NonNativeFieldVar` + #[tracing::instrument(target = "r1cs")] + pub fn mul_without_reduce( + &self, + other: &Self, + ) -> R1CSResult> { + match self { + Self::Constant(c) => match other { + Self::Constant(other_c) => Ok(NonNativeFieldMulResultVar::Constant(*c * other_c)), + Self::Var(other_v) => { + let self_v = + AllocatedNonNativeFieldVar::::new_constant( + self.cs(), + c, + )?; + Ok(NonNativeFieldMulResultVar::Var( + other_v.mul_without_reduce(&self_v)?, + )) + } + }, + Self::Var(v) => { + let other_v = match other { + Self::Constant(other_c) => { + AllocatedNonNativeFieldVar::::new_constant( + self.cs(), + other_c, + )? + } + Self::Var(other_v) => other_v.clone(), + }; + Ok(NonNativeFieldMulResultVar::Var( + v.mul_without_reduce(&other_v)?, + )) + } + } + } +} diff --git a/arkworks/nonnative/src/params.rs b/arkworks/nonnative/src/params.rs new file mode 100644 index 00000000..fd019c26 --- /dev/null +++ b/arkworks/nonnative/src/params.rs @@ -0,0 +1,96 @@ +use crate::NonNativeFieldParams; + +/// Obtain the parameters from a `ConstraintSystem`'s cache or generate a new one +#[must_use] +pub const fn get_params( + target_field_size: usize, + base_field_size: usize, + optimization_type: OptimizationType, +) -> NonNativeFieldParams { + let (num_of_limbs, limb_size) = + find_parameters(base_field_size, target_field_size, optimization_type); + NonNativeFieldParams { + num_limbs: num_of_limbs, + bits_per_limb: limb_size, + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +/// The type of optimization target for the parameters searching +pub enum OptimizationType { + /// Optimized for constraints + Constraints, + /// Optimized for weight + Weight, +} + +/// A function to search for parameters for nonnative field gadgets +pub const fn find_parameters( + base_field_prime_length: usize, + target_field_prime_bit_length: usize, + optimization_type: OptimizationType, +) -> (usize, usize) { + let mut found = false; + let mut min_cost = 0usize; + let mut min_cost_limb_size = 0usize; + let mut min_cost_num_of_limbs = 0usize; + + let surfeit = 10; + let mut max_limb_size = (base_field_prime_length - 1 - surfeit - 1) / 2 - 1; + if max_limb_size > target_field_prime_bit_length { + max_limb_size = target_field_prime_bit_length; + } + let mut limb_size = 1; + + while limb_size <= max_limb_size { + let num_of_limbs = (target_field_prime_bit_length + limb_size - 1) / limb_size; + + let group_size = + (base_field_prime_length - 1 - surfeit - 1 - 1 - limb_size + limb_size - 1) / limb_size; + let num_of_groups = (2 * num_of_limbs - 1 + group_size - 1) / group_size; + + let mut this_cost = 0; + + match optimization_type { + OptimizationType::Constraints => { + this_cost += 2 * num_of_limbs - 1; + } + OptimizationType::Weight => { + this_cost += 6 * num_of_limbs * num_of_limbs; + } + }; + + match optimization_type { + OptimizationType::Constraints => { + this_cost += target_field_prime_bit_length; // allocation of k + this_cost += target_field_prime_bit_length + num_of_limbs; // allocation of r + //this_cost += 2 * num_of_limbs - 1; // compute kp + this_cost += num_of_groups + (num_of_groups - 1) * (limb_size * 2 + surfeit) + 1; + // equality check + } + OptimizationType::Weight => { + this_cost += target_field_prime_bit_length * 3 + target_field_prime_bit_length; // allocation of k + this_cost += target_field_prime_bit_length * 3 + + target_field_prime_bit_length + + num_of_limbs; // allocation of r + this_cost += num_of_limbs * num_of_limbs + 2 * (2 * num_of_limbs - 1); // compute kp + this_cost += num_of_limbs + + num_of_groups + + 6 * num_of_groups + + (num_of_groups - 1) * (2 * limb_size + surfeit) * 4 + + 2; // equality check + } + }; + + if !found || this_cost < min_cost { + found = true; + min_cost = this_cost; + min_cost_limb_size = limb_size; + min_cost_num_of_limbs = num_of_limbs; + } + + limb_size += 1; + } + + (min_cost_num_of_limbs, min_cost_limb_size) +} diff --git a/arkworks/nonnative/src/reduce.rs b/arkworks/nonnative/src/reduce.rs new file mode 100644 index 00000000..a6e1c20b --- /dev/null +++ b/arkworks/nonnative/src/reduce.rs @@ -0,0 +1,333 @@ +use crate::params::get_params; +use crate::{overhead, AllocatedNonNativeFieldVar}; +use ark_ff::{biginteger::BigInteger, fields::FpParameters, BitIteratorBE, One, PrimeField, Zero}; +use ark_r1cs_std::eq::EqGadget; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::fields::FieldVar; +use ark_r1cs_std::{alloc::AllocVar, boolean::Boolean, R1CSVar}; +use ark_relations::{ + ns, + r1cs::{ConstraintSystemRef, Result as R1CSResult}, +}; +use ark_std::{cmp::min, marker::PhantomData, vec, vec::Vec}; +use num_bigint::BigUint; +use num_integer::Integer; + +pub fn limbs_to_bigint( + bits_per_limb: usize, + limbs: &[BaseField], +) -> BigUint { + let mut val = BigUint::zero(); + let mut big_cur = BigUint::one(); + let two = BigUint::from(2u32); + for limb in limbs.iter().rev() { + let limb_repr = limb.into_repr().to_bits_le(); + let mut small_cur = big_cur.clone(); + for limb_bit in limb_repr.iter() { + if *limb_bit { + val += &small_cur; + } + small_cur *= 2u32; + } + big_cur *= two.pow(bits_per_limb as u32); + } + + val +} + +pub fn bigint_to_basefield(bigint: &BigUint) -> BaseField { + let mut val = BaseField::zero(); + let mut cur = BaseField::one(); + let bytes = bigint.to_bytes_be(); + + let basefield_256 = BaseField::from_repr(::BigInt::from(256)).unwrap(); + + for byte in bytes.iter().rev() { + let bytes_basefield = BaseField::from(*byte as u128); + val += cur * bytes_basefield; + + cur *= &basefield_256; + } + + val +} + +/// the collections of methods for reducing the presentations +pub struct Reducer { + pub target_phantom: PhantomData, + pub base_phantom: PhantomData, +} + +impl Reducer { + /// convert limbs to bits (take at most `BaseField::size_in_bits() - 1` bits) + /// This implementation would be more efficient than the original `to_bits` + /// or `to_non_unique_bits` since we enforce that some bits are always zero. + #[tracing::instrument(target = "r1cs")] + pub fn limb_to_bits( + limb: &FpVar, + num_bits: usize, + ) -> R1CSResult>> { + let cs = limb.cs(); + + let num_bits = min(BaseField::size_in_bits() - 1, num_bits); + let mut bits_considered = Vec::with_capacity(num_bits); + let limb_value = limb.value().unwrap_or_default(); + + for b in BitIteratorBE::new(limb_value.into_repr()).skip( + <::Params as FpParameters>::REPR_SHAVE_BITS as usize + + (BaseField::size_in_bits() - num_bits), + ) { + bits_considered.push(b); + } + + if cs == ConstraintSystemRef::None { + let mut bits = vec![]; + for b in bits_considered { + bits.push(Boolean::::Constant(b)); + } + + Ok(bits) + } else { + let mut bits = vec![]; + for b in bits_considered { + bits.push(Boolean::::new_witness( + ark_relations::ns!(cs, "bit"), + || Ok(b), + )?); + } + + let mut bit_sum = FpVar::::zero(); + let mut coeff = BaseField::one(); + + for bit in bits.iter().rev() { + bit_sum += + as From>>::from((*bit).clone()) * coeff; + coeff.double_in_place(); + } + + bit_sum.enforce_equal(limb)?; + + Ok(bits) + } + } + + /// Reduction to the normal form + #[tracing::instrument(target = "r1cs")] + pub fn reduce(elem: &mut AllocatedNonNativeFieldVar) -> R1CSResult<()> { + let new_elem = + AllocatedNonNativeFieldVar::new_witness(ns!(elem.cs(), "normal_form"), || { + Ok(elem.value().unwrap_or_default()) + })?; + elem.conditional_enforce_equal(&new_elem, &Boolean::TRUE)?; + *elem = new_elem; + + Ok(()) + } + + /// Reduction to be enforced after additions + #[tracing::instrument(target = "r1cs")] + pub fn post_add_reduce( + elem: &mut AllocatedNonNativeFieldVar, + ) -> R1CSResult<()> { + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + elem.get_optimization_type(), + ); + let surfeit = overhead!(elem.num_of_additions_over_normal_form + BaseField::one()) + 1; + + if BaseField::size_in_bits() > 2 * params.bits_per_limb + surfeit + 1 { + Ok(()) + } else { + Self::reduce(elem) + } + } + + /// Reduction used before multiplication to reduce the representations in a way that allows efficient multiplication + #[tracing::instrument(target = "r1cs")] + pub fn pre_mul_reduce( + elem: &mut AllocatedNonNativeFieldVar, + elem_other: &mut AllocatedNonNativeFieldVar, + ) -> R1CSResult<()> { + assert_eq!( + elem.get_optimization_type(), + elem_other.get_optimization_type() + ); + + let params = get_params( + TargetField::size_in_bits(), + BaseField::size_in_bits(), + elem.get_optimization_type(), + ); + + if 2 * params.bits_per_limb + ark_std::log2(params.num_limbs) as usize + > BaseField::size_in_bits() - 1 + { + panic!("The current limb parameters do not support multiplication."); + } + + loop { + let prod_of_num_of_additions = (elem.num_of_additions_over_normal_form + + BaseField::one()) + * (elem_other.num_of_additions_over_normal_form + BaseField::one()); + let overhead_limb = overhead!(prod_of_num_of_additions.mul( + &BaseField::from_repr(::BigInt::from( + (params.num_limbs) as u64 + )) + .unwrap() + )); + let bits_per_mulresult_limb = 2 * (params.bits_per_limb + 1) + overhead_limb; + + if bits_per_mulresult_limb < BaseField::size_in_bits() { + break; + } + + if elem.num_of_additions_over_normal_form + >= elem_other.num_of_additions_over_normal_form + { + Self::reduce(elem)?; + } else { + Self::reduce(elem_other)?; + } + } + + Ok(()) + } + + /// Reduction to the normal form + #[tracing::instrument(target = "r1cs")] + pub fn pre_eq_reduce( + elem: &mut AllocatedNonNativeFieldVar, + ) -> R1CSResult<()> { + if elem.is_in_the_normal_form { + return Ok(()); + } + + Self::reduce(elem) + } + + /// Group and check equality + #[tracing::instrument(target = "r1cs")] + pub fn group_and_check_equality( + surfeit: usize, + bits_per_limb: usize, + shift_per_limb: usize, + left: &[FpVar], + right: &[FpVar], + ) -> R1CSResult<()> { + let cs = left.cs().or(right.cs()); + let zero = FpVar::::zero(); + + let mut limb_pairs = Vec::<(FpVar, FpVar)>::new(); + let num_limb_in_a_group = (BaseField::size_in_bits() + - 1 + - surfeit + - 1 + - 1 + - 1 + - (bits_per_limb - shift_per_limb)) + / shift_per_limb; + + let shift_array = { + let mut array = Vec::new(); + let mut cur = BaseField::one().into_repr(); + for _ in 0..num_limb_in_a_group { + array.push(BaseField::from_repr(cur).unwrap()); + cur.muln(shift_per_limb as u32); + } + + array + }; + + for (left_limb, right_limb) in left.iter().zip(right.iter()).rev() { + // note: the `rev` operation is here, so that the first limb (and the first groupped limb) will be the least significant limb. + limb_pairs.push((left_limb.clone(), right_limb.clone())); + } + + let mut groupped_limb_pairs = Vec::<(FpVar, FpVar, usize)>::new(); + + for limb_pairs_in_a_group in limb_pairs.chunks(num_limb_in_a_group) { + let mut left_total_limb = zero.clone(); + let mut right_total_limb = zero.clone(); + + for ((left_limb, right_limb), shift) in + limb_pairs_in_a_group.iter().zip(shift_array.iter()) + { + left_total_limb += &(left_limb * *shift); + right_total_limb += &(right_limb * *shift); + } + + groupped_limb_pairs.push(( + left_total_limb, + right_total_limb, + limb_pairs_in_a_group.len(), + )); + } + + // This part we mostly use the techniques in bellman-bignat + // The following code is adapted from https://github.com/alex-ozdemir/bellman-bignat/blob/master/src/mp/bignat.rs#L567 + let mut carry_in = zero; + let mut carry_in_value = BaseField::zero(); + let mut accumulated_extra = BigUint::zero(); + for (group_id, (left_total_limb, right_total_limb, num_limb_in_this_group)) in + groupped_limb_pairs.iter().enumerate() + { + let mut pad_limb_repr: ::BigInt = BaseField::one().into_repr(); + + pad_limb_repr.muln( + (surfeit + + (bits_per_limb - shift_per_limb) + + shift_per_limb * num_limb_in_this_group + + 1 + + 1) as u32, + ); + let pad_limb = BaseField::from_repr(pad_limb_repr).unwrap(); + + let left_total_limb_value = left_total_limb.value().unwrap_or_default(); + let right_total_limb_value = right_total_limb.value().unwrap_or_default(); + + let mut carry_value = + left_total_limb_value + carry_in_value + pad_limb - right_total_limb_value; + + let mut carry_repr = carry_value.into_repr(); + carry_repr.divn((shift_per_limb * num_limb_in_this_group) as u32); + + carry_value = BaseField::from_repr(carry_repr).unwrap(); + + let carry = FpVar::::new_witness(cs.clone(), || Ok(carry_value))?; + + accumulated_extra += limbs_to_bigint(bits_per_limb, &[pad_limb]); + + let (new_accumulated_extra, remainder) = accumulated_extra.div_rem( + &BigUint::from(2u64).pow((shift_per_limb * num_limb_in_this_group) as u32), + ); + let remainder_limb = bigint_to_basefield::(&remainder); + + // Now check + // left_total_limb + pad_limb + carry_in - right_total_limb + // = carry shift by (shift_per_limb * num_limb_in_this_group) + remainder + + let eqn_left = left_total_limb + pad_limb + &carry_in - right_total_limb; + + let eqn_right = &carry + * BaseField::from(2u64).pow(&[(shift_per_limb * num_limb_in_this_group) as u64]) + + remainder_limb; + + eqn_left.conditional_enforce_equal(&eqn_right, &Boolean::::TRUE)?; + + accumulated_extra = new_accumulated_extra; + carry_in = carry.clone(); + carry_in_value = carry_value; + + if group_id == groupped_limb_pairs.len() - 1 { + carry.enforce_equal(&FpVar::::Constant(bigint_to_basefield( + &accumulated_extra, + )))?; + } else { + Reducer::::limb_to_bits(&carry, surfeit + bits_per_limb)?; + } + } + + Ok(()) + } +} diff --git a/arkworks/nonnative/tests/arithmetic_tests.rs b/arkworks/nonnative/tests/arithmetic_tests.rs new file mode 100644 index 00000000..4cabd26a --- /dev/null +++ b/arkworks/nonnative/tests/arithmetic_tests.rs @@ -0,0 +1,698 @@ +use ark_bls12_381::Bls12_381; +use ark_ec::PairingEngine; +use ark_ff::PrimeField; +use ark_mnt4_298::MNT4_298; +use ark_mnt4_753::MNT4_753; +use ark_mnt6_298::MNT6_298; +use ark_mnt6_753::MNT6_753; + +use ark_nonnative_field::NonNativeFieldVar; +use ark_r1cs_std::{alloc::AllocVar, eq::EqGadget, fields::FieldVar, R1CSVar}; +use ark_relations::r1cs::{ConstraintSystem, ConstraintSystemRef}; +use ark_std::rand::RngCore; + +#[cfg(not(ci))] +const NUM_REPETITIONS: usize = 100; +#[cfg(ci)] +const NUM_REPETITIONS: usize = 1; + +#[cfg(not(ci))] +const TEST_COUNT: usize = 100; +#[cfg(ci)] +const TEST_COUNT: usize = 1; + +fn allocation_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let a_native = TargetField::rand(rng); + let a = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc a"), + || Ok(a_native), + ) + .unwrap(); + + let a_actual = a.value().unwrap(); + let a_expected = a_native; + assert!( + a_actual.eq(&a_expected), + "allocated value does not equal the expected value" + ); +} + +fn addition_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let a_native = TargetField::rand(rng); + let a = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc a"), + || Ok(a_native), + ) + .unwrap(); + + let b_native = TargetField::rand(rng); + let b = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc b"), + || Ok(b_native), + ) + .unwrap(); + + let a_plus_b = a + &b; + + let a_plus_b_actual = a_plus_b.value().unwrap(); + let a_plus_b_expected = a_native + &b_native; + assert!(a_plus_b_actual.eq(&a_plus_b_expected), "a + b failed"); +} + +fn multiplication_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let a_native = TargetField::rand(rng); + let a = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc a"), + || Ok(a_native), + ) + .unwrap(); + + let b_native = TargetField::rand(rng); + let b = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc b"), + || Ok(b_native), + ) + .unwrap(); + + let a_times_b = a * &b; + + let a_times_b_actual = a_times_b.value().unwrap(); + let a_times_b_expected = a_native * &b_native; + + assert!( + a_times_b_actual.eq(&a_times_b_expected), + "a_times_b = {:?}, a_times_b_actual = {:?}, a_times_b_expected = {:?}", + a_times_b, + a_times_b_actual.into_repr().as_ref(), + a_times_b_expected.into_repr().as_ref() + ); +} + +fn equality_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let a_native = TargetField::rand(rng); + let a = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc a"), + || Ok(a_native), + ) + .unwrap(); + + let b_native = TargetField::rand(rng); + let b = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc b"), + || Ok(b_native), + ) + .unwrap(); + + let a_times_b = a * &b; + + let a_times_b_expected = a_native * &b_native; + let a_times_b_expected_gadget = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc a * b"), + || Ok(a_times_b_expected), + ) + .unwrap(); + + a_times_b.enforce_equal(&a_times_b_expected_gadget).unwrap(); +} + +fn edge_cases_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let zero_native = TargetField::zero(); + let zero = NonNativeFieldVar::::zero(); + let one = NonNativeFieldVar::::one(); + + let a_native = TargetField::rand(rng); + let minus_a_native = TargetField::zero() - &a_native; + let a = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "alloc a"), + || Ok(a_native), + ) + .unwrap(); + + let a_plus_zero = &a + &zero; + let a_minus_zero = &a - &zero; + let zero_minus_a = &zero - &a; + let a_times_zero = &a * &zero; + + let zero_plus_a = &zero + &a; + let zero_times_a = &zero * &a; + + let a_times_one = &a * &one; + let one_times_a = &one * &a; + + let a_plus_zero_native = a_plus_zero.value().unwrap(); + let a_minus_zero_native = a_minus_zero.value().unwrap(); + let zero_minus_a_native = zero_minus_a.value().unwrap(); + let a_times_zero_native = a_times_zero.value().unwrap(); + let zero_plus_a_native = zero_plus_a.value().unwrap(); + let zero_times_a_native = zero_times_a.value().unwrap(); + let a_times_one_native = a_times_one.value().unwrap(); + let one_times_a_native = one_times_a.value().unwrap(); + + assert!( + a_plus_zero_native.eq(&a_native), + "a_plus_zero = {:?}, a = {:?}", + a_plus_zero_native.into_repr().as_ref(), + a_native.into_repr().as_ref() + ); + assert!( + a_minus_zero_native.eq(&a_native), + "a_minus_zero = {:?}, a = {:?}", + a_minus_zero_native.into_repr().as_ref(), + a_native.into_repr().as_ref() + ); + assert!( + zero_minus_a_native.eq(&minus_a_native), + "zero_minus_a = {:?}, minus_a = {:?}", + zero_minus_a_native.into_repr().as_ref(), + minus_a_native.into_repr().as_ref() + ); + assert!( + a_times_zero_native.eq(&zero_native), + "a_times_zero = {:?}, zero = {:?}", + a_times_zero_native.into_repr().as_ref(), + zero_native.into_repr().as_ref() + ); + assert!( + zero_plus_a_native.eq(&a_native), + "zero_plus_a = {:?}, a = {:?}", + zero_plus_a_native.into_repr().as_ref(), + a_native.into_repr().as_ref() + ); + assert!( + zero_times_a_native.eq(&zero_native), + "zero_times_a = {:?}, zero = {:?}", + zero_times_a_native.into_repr().as_ref(), + zero_native.into_repr().as_ref() + ); + assert!( + a_times_one_native.eq(&a_native), + "a_times_one = {:?}, a = {:?}", + a_times_one_native.into_repr().as_ref(), + a_native.into_repr().as_ref() + ); + assert!( + one_times_a_native.eq(&a_native), + "one_times_a = {:?}, a = {:?}", + one_times_a_native.into_repr().as_ref(), + a_native.into_repr().as_ref() + ); +} + +fn distribution_law_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let a_native = TargetField::rand(rng); + let b_native = TargetField::rand(rng); + let c_native = TargetField::rand(rng); + + let a_plus_b_native = a_native.clone() + &b_native; + let a_times_c_native = a_native.clone() * &c_native; + let b_times_c_native = b_native.clone() * &c_native; + let a_plus_b_times_c_native = a_plus_b_native.clone() * &c_native; + let a_times_c_plus_b_times_c_native = a_times_c_native + &b_times_c_native; + + assert!( + a_plus_b_times_c_native.eq(&a_times_c_plus_b_times_c_native), + "(a + b) * c doesn't equal (a * c) + (b * c)" + ); + + let a = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "a"), + || Ok(a_native), + ) + .unwrap(); + let b = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "b"), + || Ok(b_native), + ) + .unwrap(); + let c = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "c"), + || Ok(c_native), + ) + .unwrap(); + + let a_plus_b = &a + &b; + let a_times_c = &a * &c; + let b_times_c = &b * &c; + let a_plus_b_times_c = &a_plus_b * &c; + let a_times_c_plus_b_times_c = &a_times_c + &b_times_c; + + assert!( + a_plus_b.value().unwrap().eq(&a_plus_b_native), + "a + b doesn't match" + ); + assert!( + a_times_c.value().unwrap().eq(&a_times_c_native), + "a * c doesn't match" + ); + assert!( + b_times_c.value().unwrap().eq(&b_times_c_native), + "b * c doesn't match" + ); + assert!( + a_plus_b_times_c + .value() + .unwrap() + .eq(&a_plus_b_times_c_native), + "(a + b) * c doesn't match" + ); + assert!( + a_times_c_plus_b_times_c + .value() + .unwrap() + .eq(&a_times_c_plus_b_times_c_native), + "(a * c) + (b * c) doesn't match" + ); + assert!( + a_plus_b_times_c_native.eq(&a_times_c_plus_b_times_c_native), + "(a + b) * c != (a * c) + (b * c)" + ); +} + +fn randomized_arithmetic_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut operations: Vec = Vec::new(); + for _ in 0..TEST_COUNT { + operations.push(rng.next_u32() % 3); + } + + let mut num_native = TargetField::rand(rng); + let mut num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "initial num"), + || Ok(num_native), + ) + .unwrap(); + for op in operations.iter() { + let next_native = TargetField::rand(rng); + let next = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "next num for repetition"), + || Ok(next_native), + ) + .unwrap(); + match op { + 0 => { + num_native += &next_native; + num += &next; + } + 1 => { + num_native *= &next_native; + num *= &next; + } + 2 => { + num_native -= &next_native; + num -= &next; + } + _ => (), + }; + + assert!( + num.value().unwrap().eq(&num_native), + "randomized arithmetic failed:" + ); + } +} + +fn addition_stress_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut num_native = TargetField::rand(rng); + let mut num = + NonNativeFieldVar::new_witness(ark_relations::ns!(cs, "initial num"), || Ok(num_native)) + .unwrap(); + for _ in 0..TEST_COUNT { + let next_native = TargetField::rand(rng); + let next = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "next num for repetition"), + || Ok(next_native), + ) + .unwrap(); + num_native += &next_native; + num += &next; + + assert!(num.value().unwrap().eq(&num_native)); + } +} + +fn multiplication_stress_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut num_native = TargetField::rand(rng); + let mut num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "initial num"), + || Ok(num_native), + ) + .unwrap(); + for _ in 0..TEST_COUNT { + let next_native = TargetField::rand(rng); + let next = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "next num for repetition"), + || Ok(next_native), + ) + .unwrap(); + num_native *= &next_native; + num *= &next; + + assert!(num.value().unwrap().eq(&num_native)); + } +} + +fn mul_and_add_stress_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut num_native = TargetField::rand(rng); + let mut num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "initial num"), + || Ok(num_native), + ) + .unwrap(); + for _ in 0..TEST_COUNT { + let next_add_native = TargetField::rand(rng); + let next_add = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "next to add num for repetition"), + || Ok(next_add_native), + ) + .unwrap(); + let next_mul_native = TargetField::rand(rng); + let next_mul = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "next to mul num for repetition"), + || Ok(next_mul_native), + ) + .unwrap(); + + num_native = num_native * &next_mul_native + &next_add_native; + num = num * &next_mul + &next_add; + + assert!(num.value().unwrap().eq(&num_native)); + } +} + +fn square_mul_add_stress_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut num_native = TargetField::rand(rng); + let mut num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "initial num"), + || Ok(num_native), + ) + .unwrap(); + for _ in 0..TEST_COUNT { + let next_add_native = TargetField::rand(rng); + let next_add = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "next to add num for repetition"), + || Ok(next_add_native), + ) + .unwrap(); + let next_mul_native = TargetField::rand(rng); + let next_mul = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "next to mul num for repetition"), + || Ok(next_mul_native), + ) + .unwrap(); + + num_native = num_native * &num_native * &next_mul_native + &next_add_native; + num = &num * &num * &next_mul + &next_add; + + assert!(num.value().unwrap().eq(&num_native)); + } +} + +fn double_stress_test_1( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut num_native = TargetField::rand(rng); + let mut num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "initial num"), + || Ok(num_native), + ) + .unwrap(); + // Add to at least BaseField::size_in_bits() to ensure that we teat the overflowing + for _ in 0..TEST_COUNT + BaseField::size_in_bits() { + // double + num_native = num_native + &num_native; + num = &num + # + + assert!(num.value().unwrap().eq(&num_native), "result incorrect"); + } +} + +fn double_stress_test_2( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut num_native = TargetField::rand(rng); + let mut num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "initial num"), + || Ok(num_native), + ) + .unwrap(); + for _ in 0..TEST_COUNT { + // double + num_native = num_native + &num_native; + num = &num + # + + assert!(num.value().unwrap().eq(&num_native)); + + // square + let num_square_native = num_native * &num_native; + let num_square = &num * # + assert!(num_square.value().unwrap().eq(&num_square_native)); + } +} + +fn double_stress_test_3( + cs: ConstraintSystemRef, + rng: &mut R, +) { + let mut num_native = TargetField::rand(rng); + let mut num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "initial num"), + || Ok(num_native), + ) + .unwrap(); + for _ in 0..TEST_COUNT { + // double + num_native = num_native + &num_native; + num = &num + # + + assert!(num.value().unwrap().eq(&num_native)); + + // square + let num_square_native = num_native * &num_native; + let num_square = &num * # + let num_square_native_gadget = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "repetition: alloc_native num"), + || Ok(num_square_native), + ) + .unwrap(); + + num_square.enforce_equal(&num_square_native_gadget).unwrap(); + } +} + +fn inverse_stress_test( + cs: ConstraintSystemRef, + rng: &mut R, +) { + for _ in 0..TEST_COUNT { + let num_native = TargetField::rand(rng); + let num = NonNativeFieldVar::::new_witness( + ark_relations::ns!(cs, "num"), + || Ok(num_native), + ) + .unwrap(); + + if num_native == TargetField::zero() { + continue; + } + + let num_native_inverse = num_native.inverse().unwrap(); + let num_inverse = num.inverse().unwrap(); + + assert!(num_inverse.value().unwrap().eq(&num_native_inverse)); + } +} + +macro_rules! nonnative_test_individual { + ($test_method:ident, $test_name:ident, $test_target_field:ty, $test_base_field:ty) => { + paste::item! { + #[test] + fn [<$test_method _ $test_name:lower>]() { + let rng = &mut ark_std::test_rng(); + + for _ in 0..NUM_REPETITIONS { + let cs = ConstraintSystem::<$test_base_field>::new_ref(); + $test_method::<$test_target_field, $test_base_field, _>(cs.clone(), rng); + assert!(cs.is_satisfied().unwrap()); + } + } + } + }; +} + +macro_rules! nonnative_test { + ($test_name:ident, $test_target_field:ty, $test_base_field:ty) => { + nonnative_test_individual!( + allocation_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + addition_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + multiplication_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + equality_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + edge_cases_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + distribution_law_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + addition_stress_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + double_stress_test_1, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + double_stress_test_2, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + double_stress_test_3, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + randomized_arithmetic_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + multiplication_stress_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + mul_and_add_stress_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + square_mul_add_stress_test, + $test_name, + $test_target_field, + $test_base_field + ); + nonnative_test_individual!( + inverse_stress_test, + $test_name, + $test_target_field, + $test_base_field + ); + }; +} + +nonnative_test!( + MNT46Small, + ::Fr, + ::Fr +); +nonnative_test!( + MNT64Small, + ::Fr, + ::Fr +); +nonnative_test!( + MNT46Big, + ::Fr, + ::Fr +); +nonnative_test!( + MNT64Big, + ::Fr, + ::Fr +); +nonnative_test!( + BLS12MNT4Small, + ::Fr, + ::Fr +); +nonnative_test!( + BLS12, + ::Fq, + ::Fr +); +#[cfg(not(ci))] +nonnative_test!( + MNT6BigMNT4Small, + ::Fr, + ::Fr +); +nonnative_test!( + PallasFrMNT6Fr, + ark_pallas::Fr, + ::Fr +); +nonnative_test!( + MNT6FrPallasFr, + ::Fr, + ark_pallas::Fr +); +nonnative_test!(PallasFqFr, ark_pallas::Fq, ark_pallas::Fr); +nonnative_test!(PallasFrFq, ark_pallas::Fr, ark_pallas::Fq); diff --git a/arkworks/nonnative/tests/from_test.rs b/arkworks/nonnative/tests/from_test.rs new file mode 100644 index 00000000..cf501a0d --- /dev/null +++ b/arkworks/nonnative/tests/from_test.rs @@ -0,0 +1,24 @@ +use ark_nonnative_field::{NonNativeFieldMulResultVar, NonNativeFieldVar}; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::R1CSVar; +use ark_relations::r1cs::ConstraintSystem; +use ark_std::UniformRand; + +#[test] +fn from_test() { + type F = ark_bls12_377::Fr; + type CF = ark_bls12_377::Fq; + + let mut rng = ark_std::test_rng(); + let cs = ConstraintSystem::::new_ref(); + let f = F::rand(&mut rng); + + let f_var = NonNativeFieldVar::::new_input(cs.clone(), || Ok(f)).unwrap(); + let f_var_converted = NonNativeFieldMulResultVar::::from(&f_var); + let f_var_converted_reduced = f_var_converted.reduce().unwrap(); + + let f_var_value = f_var.value().unwrap(); + let f_var_converted_reduced_value = f_var_converted_reduced.value().unwrap(); + + assert_eq!(f_var_value, f_var_converted_reduced_value); +} diff --git a/arkworks/nonnative/tests/to_bytes_test.rs b/arkworks/nonnative/tests/to_bytes_test.rs new file mode 100644 index 00000000..44c6ddf2 --- /dev/null +++ b/arkworks/nonnative/tests/to_bytes_test.rs @@ -0,0 +1,50 @@ +use ark_ec::PairingEngine; +use ark_ff::{to_bytes, Zero}; +use ark_mnt4_298::MNT4_298; +use ark_mnt6_298::MNT6_298; +use ark_nonnative_field::NonNativeFieldVar; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::{R1CSVar, ToBitsGadget, ToBytesGadget}; +use ark_relations::r1cs::ConstraintSystem; + +#[test] +fn to_bytes_test() { + let cs = ConstraintSystem::<::Fr>::new_ref(); + + let target_test_elem = ::Fr::from(123456u128); + let target_test_gadget = NonNativeFieldVar::< + ::Fr, + ::Fr, + >::new_witness(cs, || Ok(target_test_elem)) + .unwrap(); + + let target_to_bytes: Vec = target_test_gadget + .to_bytes() + .unwrap() + .iter() + .map(|v| v.value().unwrap()) + .collect(); + + // 123456 = 65536 + 226 * 256 + 64 + assert_eq!(target_to_bytes[0], 64); + assert_eq!(target_to_bytes[1], 226); + assert_eq!(target_to_bytes[2], 1); + + for byte in target_to_bytes.iter().skip(3) { + assert_eq!(*byte, 0); + } + + assert_eq!(to_bytes!(target_test_elem).unwrap(), target_to_bytes); +} + +#[test] +fn to_bits_test() { + type F = ark_bls12_377::Fr; + type CF = ark_bls12_377::Fq; + + let cs = ConstraintSystem::::new_ref(); + let f = F::zero(); + + let f_var = NonNativeFieldVar::::new_input(cs.clone(), || Ok(f)).unwrap(); + f_var.to_bits_le().unwrap(); +} diff --git a/arkworks/nonnative/tests/to_constraint_field_test.rs b/arkworks/nonnative/tests/to_constraint_field_test.rs new file mode 100644 index 00000000..78d704a6 --- /dev/null +++ b/arkworks/nonnative/tests/to_constraint_field_test.rs @@ -0,0 +1,28 @@ +use ark_nonnative_field::NonNativeFieldVar; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::{R1CSVar, ToConstraintFieldGadget}; +use ark_relations::r1cs::ConstraintSystem; + +#[test] +fn to_constraint_field_test() { + type F = ark_bls12_377::Fr; + type CF = ark_bls12_377::Fq; + + let cs = ConstraintSystem::::new_ref(); + + let a = NonNativeFieldVar::Constant(F::from(12u8)); + let b = NonNativeFieldVar::new_input(cs.clone(), || Ok(F::from(6u8))).unwrap(); + + let b2 = &b + &b; + + let a_to_constraint_field = a.to_constraint_field().unwrap(); + let b2_to_constraint_field = b2.to_constraint_field().unwrap(); + + assert_eq!(a_to_constraint_field.len(), b2_to_constraint_field.len()); + for (left, right) in a_to_constraint_field + .iter() + .zip(b2_to_constraint_field.iter()) + { + assert_eq!(left.value(), right.value()); + } +} diff --git a/arkworks/poly-commit/.github/ISSUE_TEMPLATE/bug_report.md b/arkworks/poly-commit/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e01ca941 --- /dev/null +++ b/arkworks/poly-commit/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! + +--- + +∂ + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + + + diff --git a/arkworks/poly-commit/.github/ISSUE_TEMPLATE/feature_request.md b/arkworks/poly-commit/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..7d5ed5df --- /dev/null +++ b/arkworks/poly-commit/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Feature Request +about: Create a proposal to request a feature + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/arkworks/poly-commit/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/poly-commit/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/poly-commit/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/poly-commit/.github/workflows/ci.yml b/arkworks/poly-commit/.github/workflows/ci.yml new file mode 100644 index 00000000..0f4be64a --- /dev/null +++ b/arkworks/poly-commit/.github/workflows/ci.yml @@ -0,0 +1,105 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --all --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: --release + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - name: Build + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: build + args: --no-default-features --target aarch64-unknown-none + + - name: Check + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: check + args: --examples --no-default-features --target aarch64-unknown-none diff --git a/arkworks/poly-commit/.github/workflows/linkify_changelog.yml b/arkworks/poly-commit/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..0cbe85f1 --- /dev/null +++ b/arkworks/poly-commit/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push \ No newline at end of file diff --git a/arkworks/poly-commit/.gitignore b/arkworks/poly-commit/.gitignore new file mode 100644 index 00000000..be1aec0a --- /dev/null +++ b/arkworks/poly-commit/.gitignore @@ -0,0 +1,9 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params diff --git a/arkworks/poly-commit/AUTHORS b/arkworks/poly-commit/AUTHORS new file mode 100644 index 00000000..22273b90 --- /dev/null +++ b/arkworks/poly-commit/AUTHORS @@ -0,0 +1,7 @@ +Alessandro Chiesa +Yuncong Hu +William Lin +Mary Maller +Pratyush Mishra +Noah Vesely +Nicholas Ward diff --git a/arkworks/poly-commit/CHANGELOG.md b/arkworks/poly-commit/CHANGELOG.md new file mode 100644 index 00000000..b6bfc521 --- /dev/null +++ b/arkworks/poly-commit/CHANGELOG.md @@ -0,0 +1,27 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +- [\#78](https://github.com/arkworks-rs/poly-commit/pull/78) Fix MarlinPC's CommitterKey to return the correct `supported_degree`. + +### Features + +### Improvements + +### Bug fixes + +## v0.2.0 + +- initial release of `ark-poly-commit`. \ No newline at end of file diff --git a/arkworks/poly-commit/Cargo.toml b/arkworks/poly-commit/Cargo.toml new file mode 100644 index 00000000..1c3650ba --- /dev/null +++ b/arkworks/poly-commit/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "ark-poly-commit" +version = "0.3.0" +authors = [ + "Alessandro Chiesa ", + "Mary Maller ", + "Yuncong Hu ", + "William Lin", + "Pratyush Mishra ", + "Noah Vesely ", + "Nicholas Ward ", + "arkworks contributors" +] +description = "A library for constructing polynomial commitment schemes for use in zkSNARKs" +repository = "https://github.com/arkworks-rs/poly-commit" +documentation = "https://docs.rs/ark-poly-commit/" +keywords = ["cryptography", "commitments", "elliptic-curves", "pairing"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-serialize = { path = "../algebra/serialize", version = "^0.3.0", default-features = false, features = [ "derive" ] } +ark-ff = { path = "../algebra/ff", version = "^0.3.0", default-features = false } +ark-ec = { path = "../algebra/ec", version = "^0.3.0", default-features = false } +ark-poly = { path = "../algebra/poly", version = "^0.3.0", default-features = false } + +ark-std = { path = "../std", version = "^0.3.0", default-features = false } +ark-relations = { path = "../snark/relations", version = "^0.3.0", default-features = false, optional = true } +ark-r1cs-std = { path = "../r1cs-std", version = "^0.3.0", default-features = false, optional = true } +ark-nonnative-field = { path = "../nonnative", version = "^0.3.0", default-features = false, optional = true } +hashbrown = { version = "0.9", optional = true } + +digest = "0.9" +rayon = { version = "1", optional = true } +derivative = { version = "2", features = [ "use_core" ] } + +tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } + +[dev-dependencies] +ark-ed-on-bls12-381 = { path = "../curves/ed_on_bls12_381", version = "^0.3.0", default-features = false } +ark-bls12-381 = { path = "../curves/bls12_381", version = "^0.3.0", default-features = false, features = [ "curve" ] } +ark-bls12-377 = { path = "../curves/bls12_377", version = "^0.3.0", default-features = false, features = [ "curve" ] } +blake2 = { version = "0.9", default-features = false } + +[profile.release] +opt-level = 3 +lto = "thin" +incremental = true +debug = true + +[profile.test] +opt-level = 3 +debug-assertions = true +incremental = true +debug = true + +[features] +default = [ "std", "parallel" ] +std = [ "ark-ff/std", "ark-ec/std", "ark-nonnative-field/std", "ark-poly/std", "ark-std/std", "ark-relations/std", "ark-serialize/std" ] +r1cs = [ "ark-relations", "ark-r1cs-std", "ark-nonnative-field", "hashbrown" ] +print-trace = [ "ark-std/print-trace" ] +parallel = [ "std", "ark-ff/parallel", "ark-ec/parallel", "ark-poly/parallel", "ark-std/parallel", "rayon" ] diff --git a/arkworks/poly-commit/LICENSE-APACHE b/arkworks/poly-commit/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/poly-commit/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/poly-commit/LICENSE-MIT b/arkworks/poly-commit/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/poly-commit/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/poly-commit/README.md b/arkworks/poly-commit/README.md new file mode 100644 index 00000000..6f5a3b9c --- /dev/null +++ b/arkworks/poly-commit/README.md @@ -0,0 +1,87 @@ +

Polynomial Commitments

+ +

+ + +

+ +`poly-commit` is a Rust library that implements *polynomial commitment schemes*. This library was initially developed as part of the [Marlin paper][marlin], and is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Overview + +A polynomial commitment scheme is a cryptographic primitive that enables a party to commit to a polynomial over a given finite field, and then, later on, to reveal desired evaluations of the polynomial along with cryptographic proofs attesting to their correctness. + +This library provides various constructions of polynomial commitment schemes. These constructions support committing to multiple polynomials at a time with differing degree bounds, batching multiple evaluation proofs for the same evaluation point into a single one, and batch verification of proofs. + +The key properties satisfied by the polynomial commitment schemes are **succinctness**, **extractability**, and **hiding**. See [the Marlin paper][marlin] for definitions of these properties. + + +[kzg10]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler. To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: +```bash +rustup install stable +``` + +After that, use `cargo` (the standard Rust build tool) to build the library: +```bash +git clone https://github.com/scipr-lab/poly-commit.git +cd poly-commit +cargo build --release +``` + +This library comes with some unit and integration tests. Run these tests with: +```bash +cargo test +``` + +Lastly, this library is instrumented with profiling infrastructure that prints detailed traces of execution time. To enable this, compile with `cargo build --features print-trace`. + +## License + +This library is licensed under either of the following licenses, at your discretion. + + * [Apache License Version 2.0](LICENSE-APACHE) + * [MIT License](LICENSE-MIT) + +Unless you explicitly state otherwise, any contribution that you submit to this library shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +[marlin]: https://ia.cr/2019/1047 +[sonic]: https://ia.cr/2019/099 +[aurora-light]: https://ia.cr/2019/601 +[pcd-acc]: https://ia.cr/2020/499 +[pst]: https://ia.cr.org/2011/587 + +## Reference papers + +[Polynomial Commitments][kzg10] +Aniket Kate, Gregory M. Zaverucha, Ian Goldberg +ASIACRYPT 2010 + +[Sonic: Zero-Knowledge SNARKs from Linear-Size Universal and Updateable Structured Reference Strings][sonic] +Mary Maller, Sean Bowe, Markulf Kohlweiss, Sarah Meiklejohn +CCS 2019 + +[AuroraLight: Improved Prover Efficiency and SRS Size in a Sonic-Like System][aurora-light] +Ariel Gabizon +ePrint, 2019 + +[Marlin: Preprocessing zkSNARKs with Universal and Updatable SRS][marlin] +Alessandro Chiesa, Yuncong Hu, Mary Maller, [Pratyush Mishra](https://www.github.com/pratyush), Noah Vesely, [Nicholas Ward](https://www.github.com/npwardberkeley) +EUROCRYPT 2020 + +[Proof-Carrying Data from Accumulation Schemes][pcd-acc] +Benedikt Bünz, Alessandro Chiesa, [Pratyush Mishra](https://www.github.com/pratyush), Nicholas Spooner +TCC 2020 + +[Signatures of Correct Computation][pst] +Charalampos Papamanthou, Elaine Shi, Roberto Tamassia +TCC 2013 + +## Acknowledgements + +This work was supported by: an Engineering and Physical Sciences Research Council grant; a Google Faculty Award; the RISELab at UC Berkeley; and donations from the Ethereum Foundation and the Interchain Foundation. diff --git a/arkworks/poly-commit/scripts/install-hook.sh b/arkworks/poly-commit/scripts/install-hook.sh new file mode 100755 index 00000000..eafcf818 --- /dev/null +++ b/arkworks/poly-commit/scripts/install-hook.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +# This script will install the provided directory ../.hooks as the hook +# directory for the present repo. See there for hooks, including a pre-commit +# hook that runs rustfmt on files before a commit. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HOOKS_DIR="${DIR}/../.hooks" + +git config core.hooksPath "$HOOKS_DIR" diff --git a/arkworks/poly-commit/scripts/linkify_changelog.py b/arkworks/poly-commit/scripts/linkify_changelog.py new file mode 100644 index 00000000..867ae14d --- /dev/null +++ b/arkworks/poly-commit/scripts/linkify_changelog.py @@ -0,0 +1,31 @@ +import re +import sys +import fileinput +import os + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +if len(sys.argv) < 2: + print("Must include path to changelog as the first argument to the script") + print("Example Usage: python3 linkify_changelog.py ../CHANGELOG.md") + exit() + +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub(r"\- #([0-9]*)", r"- [\\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", line.rstrip()) + # edits the current file + print(line) \ No newline at end of file diff --git a/arkworks/poly-commit/src/constraints.rs b/arkworks/poly-commit/src/constraints.rs new file mode 100644 index 00000000..41fe3582 --- /dev/null +++ b/arkworks/poly-commit/src/constraints.rs @@ -0,0 +1,197 @@ +use crate::{ + data_structures::LabeledCommitment, BatchLCProof, LCTerm, LinearCombination, + PolynomialCommitment, String, Vec, +}; +use ark_ff::PrimeField; +use ark_nonnative_field::NonNativeFieldVar; +use ark_poly::Polynomial; +use ark_r1cs_std::{fields::fp::FpVar, prelude::*}; +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, Result as R1CSResult, SynthesisError}; +use ark_std::{borrow::Borrow, cmp::Eq, cmp::PartialEq, hash::Hash, marker::Sized}; +use hashbrown::{HashMap, HashSet}; + +/// Define the minimal interface of prepared allocated structures. +pub trait PrepareGadget: Sized { + /// Prepare from an unprepared element. + fn prepare(unprepared: &Unprepared) -> R1CSResult; +} + +/// A coefficient of `LinearCombination`. +#[derive(Clone)] +pub enum LinearCombinationCoeffVar { + /// Coefficient 1. + One, + /// Coefficient -1. + MinusOne, + /// Other coefficient, represented as a nonnative field element. + Var(NonNativeFieldVar), +} + +/// An allocated version of `LinearCombination`. +#[derive(Clone)] +pub struct LinearCombinationVar { + /// The label. + pub label: String, + /// The linear combination of `(coeff, poly_label)` pairs. + pub terms: Vec<(LinearCombinationCoeffVar, LCTerm)>, +} + +impl + AllocVar, BaseField> + for LinearCombinationVar +{ + fn new_variable( + cs: impl Into>, + val: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> R1CSResult + where + T: Borrow>, + { + let LinearCombination { label, terms } = val()?.borrow().clone(); + + let ns = cs.into(); + let cs = ns.cs(); + + let new_terms: Vec<(LinearCombinationCoeffVar, LCTerm)> = terms + .iter() + .map(|term| { + let (f, lc_term) = term; + + let fg = + NonNativeFieldVar::new_variable(ark_relations::ns!(cs, "term"), || Ok(f), mode) + .unwrap(); + + (LinearCombinationCoeffVar::Var(fg), lc_term.clone()) + }) + .collect(); + + Ok(Self { + label, + terms: new_terms, + }) + } +} + +#[derive(Clone, Debug)] +/// A collection of random data used in the polynomial commitment checking. +pub struct PCCheckRandomDataVar { + /// Opening challenges. + /// The prover and the verifier MUST use the same opening challenges. + pub opening_challenges: Vec>, + /// Bit representations of the opening challenges. + pub opening_challenges_bits: Vec>>, + /// Batching random numbers. + /// The verifier can choose these numbers freely, as long as they are random. + pub batching_rands: Vec>, + /// Bit representations of the batching random numbers. + pub batching_rands_bits: Vec>>, +} + +/// Describes the interface for a gadget for a `PolynomialCommitment` +/// verifier. +pub trait PCCheckVar< + PCF: PrimeField, + P: Polynomial, + PC: PolynomialCommitment, + ConstraintF: PrimeField, +>: Clone +{ + /// An allocated version of `PC::VerifierKey`. + type VerifierKeyVar: AllocVar + Clone + ToBytesGadget; + /// An allocated version of `PC::PreparedVerifierKey`. + type PreparedVerifierKeyVar: AllocVar + + Clone + + PrepareGadget; + /// An allocated version of `PC::Commitment`. + type CommitmentVar: AllocVar + Clone + ToBytesGadget; + /// An allocated version of `PC::PreparedCommitment`. + type PreparedCommitmentVar: AllocVar + + PrepareGadget + + Clone; + /// An allocated version of `LabeledCommitment`. + type LabeledCommitmentVar: AllocVar, ConstraintF> + Clone; + /// A prepared, allocated version of `LabeledCommitment`. + type PreparedLabeledCommitmentVar: Clone; + /// An allocated version of `PC::Proof`. + type ProofVar: AllocVar + Clone; + + /// An allocated version of `PC::BatchLCProof`. + type BatchLCProofVar: AllocVar, ConstraintF> + Clone; + + /// Add to `ConstraintSystemRef` new constraints that check that `proof_i` is a valid evaluation + /// proof at `point_i` for the polynomial in `commitment_i`. + fn batch_check_evaluations( + cs: ConstraintSystemRef, + verification_key: &Self::VerifierKeyVar, + commitments: &[Self::LabeledCommitmentVar], + query_set: &QuerySetVar, + evaluations: &EvaluationsVar, + proofs: &[Self::ProofVar], + rand_data: &PCCheckRandomDataVar, + ) -> R1CSResult>; + + /// Add to `ConstraintSystemRef` new constraints that conditionally check that `proof` is a valid evaluation + /// proof at the points in `query_set` for the combinations `linear_combinations`. + fn prepared_check_combinations( + cs: ConstraintSystemRef, + prepared_verification_key: &Self::PreparedVerifierKeyVar, + linear_combinations: &[LinearCombinationVar], + prepared_commitments: &[Self::PreparedLabeledCommitmentVar], + query_set: &QuerySetVar, + evaluations: &EvaluationsVar, + proof: &Self::BatchLCProofVar, + rand_data: &PCCheckRandomDataVar, + ) -> R1CSResult>; + + /// Create the labeled commitment gadget from the commitment gadget + fn create_labeled_commitment( + label: String, + commitment: Self::CommitmentVar, + degree_bound: Option>, + ) -> Self::LabeledCommitmentVar; + + /// Create the prepared labeled commitment gadget from the commitment gadget + fn create_prepared_labeled_commitment( + label: String, + commitment: Self::PreparedCommitmentVar, + degree_bound: Option>, + ) -> Self::PreparedLabeledCommitmentVar; +} + +#[derive(Clone, Hash, PartialEq, Eq)] +/// A labeled point variable, for queries to a polynomial commitment. +pub struct LabeledPointVar { + /// The label of the point. + /// MUST be a unique identifier in a query set. + pub name: String, + /// The point value. + pub value: NonNativeFieldVar, +} + +/// An allocated version of `QuerySet`. +#[derive(Clone)] +pub struct QuerySetVar( + pub HashSet<(String, LabeledPointVar)>, +); + +/// An allocated version of `Evaluations`. +#[derive(Clone)] +pub struct EvaluationsVar( + pub HashMap, NonNativeFieldVar>, +); + +impl EvaluationsVar { + /// find the evaluation result + pub fn get_lc_eval( + &self, + lc_string: &str, + point: &NonNativeFieldVar, + ) -> Result, SynthesisError> { + let key = LabeledPointVar:: { + name: String::from(lc_string), + value: point.clone(), + }; + Ok(self.0.get(&key).map(|v| (*v).clone()).unwrap()) + } +} diff --git a/arkworks/poly-commit/src/data_structures.rs b/arkworks/poly-commit/src/data_structures.rs new file mode 100644 index 00000000..e07c2e6a --- /dev/null +++ b/arkworks/poly-commit/src/data_structures.rs @@ -0,0 +1,395 @@ +use crate::{Polynomial, PolynomialCommitment, Rc, String, Vec}; +use ark_ff::{Field, ToConstraintField}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::rand::RngCore; +use ark_std::{ + borrow::Borrow, + io::{Read, Write}, + marker::PhantomData, + ops::{AddAssign, MulAssign, SubAssign}, +}; + +/// Labels a `LabeledPolynomial` or a `LabeledCommitment`. +pub type PolynomialLabel = String; + +/// Defines the minimal interface for public params for any polynomial +/// commitment scheme. +pub trait PCUniversalParams: + Clone + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize +{ + /// Outputs the maximum degree supported by the committer key. + fn max_degree(&self) -> usize; +} + +/// Defines the minimal interface of committer keys for any polynomial +/// commitment scheme. +pub trait PCCommitterKey: + Clone + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize +{ + /// Outputs the maximum degree supported by the universal parameters + /// `Self` was derived from. + fn max_degree(&self) -> usize; + + /// Outputs the maximum degree supported by the committer key. + fn supported_degree(&self) -> usize; +} + +/// Defines the minimal interface of verifier keys for any polynomial +/// commitment scheme. +pub trait PCVerifierKey: + Clone + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize +{ + /// Outputs the maximum degree supported by the universal parameters + /// `Self` was derived from. + fn max_degree(&self) -> usize; + + /// Outputs the maximum degree supported by the verifier key. + fn supported_degree(&self) -> usize; +} + +/// Defines the minimal interface of prepared verifier keys for any polynomial +/// commitment scheme. +pub trait PCPreparedVerifierKey { + /// prepare + fn prepare(vk: &Unprepared) -> Self; +} + +/// Defines the minimal interface of commitments for any polynomial +/// commitment scheme. +pub trait PCCommitment: + Clone + ark_ff::ToBytes + CanonicalSerialize + CanonicalDeserialize +{ + /// Outputs a non-hiding commitment to the zero polynomial. + fn empty() -> Self; + + /// Does this commitment have a degree bound? + fn has_degree_bound(&self) -> bool; + + /// Size in bytes + fn size_in_bytes(&self) -> usize; +} + +/// Defines the minimal interface of prepared commitments for any polynomial +/// commitment scheme. +pub trait PCPreparedCommitment: Clone { + /// prepare + fn prepare(comm: &UNPREPARED) -> Self; +} + +/// Defines the minimal interface of commitment randomness for any polynomial +/// commitment scheme. +pub trait PCRandomness: Clone + CanonicalSerialize + CanonicalDeserialize { + /// Outputs empty randomness that does not hide the commitment. + fn empty() -> Self; + + /// Samples randomness for commitments; + /// `num_queries` specifies the number of queries that the commitment will be opened at. + /// `has_degree_bound` indicates that the corresponding commitment has an enforced + /// `num_vars` specifies the number of variables for multivariate commitment. + /// strict degree bound. + fn rand( + num_queries: usize, + has_degree_bound: bool, + num_vars: Option, + rng: &mut R, + ) -> Self; +} + +/// Defines the minimal interface of evaluation proofs for any polynomial +/// commitment scheme. +pub trait PCProof: Clone + ark_ff::ToBytes + CanonicalSerialize + CanonicalDeserialize { + /// Size in bytes + fn size_in_bytes(&self) -> usize; +} + +/// A proof of satisfaction of linear combinations. +#[derive(Clone, CanonicalSerialize, CanonicalDeserialize)] +pub struct BatchLCProof, PC: PolynomialCommitment> { + /// Evaluation proof. + pub proof: PC::BatchProof, + /// Evaluations required to verify the proof. + pub evals: Option>, +} + +/// A polynomial along with information about its degree bound (if any), and the +/// maximum number of queries that will be made to it. This latter number determines +/// the amount of protection that will be provided to a commitment for this polynomial. +#[derive(Debug, Clone, CanonicalSerialize, CanonicalDeserialize)] +pub struct LabeledPolynomial> { + label: PolynomialLabel, + polynomial: Rc

, + degree_bound: Option, + hiding_bound: Option, + _field: PhantomData, +} + +impl<'a, F: Field, P: Polynomial> core::ops::Deref for LabeledPolynomial { + type Target = P; + + fn deref(&self) -> &Self::Target { + &self.polynomial + } +} + +impl<'a, F: Field, P: Polynomial> LabeledPolynomial { + /// Construct a new labeled polynomial. + pub fn new( + label: PolynomialLabel, + polynomial: P, + degree_bound: Option, + hiding_bound: Option, + ) -> Self { + Self { + label, + polynomial: Rc::new(polynomial), + degree_bound, + hiding_bound, + _field: PhantomData, + } + } + + /// Return the label for `self`. + pub fn label(&self) -> &String { + &self.label + } + + /// Retrieve the polynomial from `self` + pub fn polynomial(&self) -> &P { + &self.polynomial + } + + /// Evaluate the polynomial in `self`. + pub fn evaluate(&self, point: &P::Point) -> F { + self.polynomial.evaluate(point) + } + + /// Retrieve the degree of the polynomial in `self`. + pub fn degree(&self) -> usize { + self.polynomial.degree() + } + + /// Retrieve the degree bound in `self`. + pub fn degree_bound(&self) -> Option { + self.degree_bound + } + + /// Retrieve whether the polynomial in `self` should be hidden. + pub fn is_hiding(&self) -> bool { + self.hiding_bound.is_some() + } + + /// Retrieve the hiding bound for the polynomial in `self`. + pub fn hiding_bound(&self) -> Option { + self.hiding_bound + } +} + +/// A commitment along with information about its degree bound (if any). +#[derive(Clone)] +pub struct LabeledCommitment { + label: PolynomialLabel, + commitment: C, + degree_bound: Option, +} + +impl> ToConstraintField + for LabeledCommitment +{ + fn to_field_elements(&self) -> Option> { + self.commitment.to_field_elements() + } +} + +impl LabeledCommitment { + /// Instantiate a new polynomial_context. + pub fn new(label: PolynomialLabel, commitment: C, degree_bound: Option) -> Self { + Self { + label, + commitment, + degree_bound, + } + } + + /// Return the label for `self`. + pub fn label(&self) -> &String { + &self.label + } + + /// Retrieve the commitment from `self`. + pub fn commitment(&self) -> &C { + &self.commitment + } + + /// Retrieve the degree bound in `self`. + pub fn degree_bound(&self) -> Option { + self.degree_bound + } +} + +impl ark_ff::ToBytes for LabeledCommitment { + #[inline] + fn write(&self, writer: W) -> ark_std::io::Result<()> { + self.commitment.write(writer) + } +} + +/// A term in a linear combination. +#[derive(Hash, Ord, PartialOrd, Clone, Eq, PartialEq, Debug)] +pub enum LCTerm { + /// The constant term representing `one`. + One, + /// Label for a polynomial. + PolyLabel(String), +} + +impl LCTerm { + /// Returns `true` if `self == LCTerm::One` + #[inline] + pub fn is_one(&self) -> bool { + if let LCTerm::One = self { + true + } else { + false + } + } +} + +impl From for LCTerm { + fn from(other: PolynomialLabel) -> Self { + Self::PolyLabel(other) + } +} + +impl<'a> From<&'a str> for LCTerm { + fn from(other: &str) -> Self { + Self::PolyLabel(other.into()) + } +} + +impl core::convert::TryInto for LCTerm { + type Error = (); + fn try_into(self) -> Result { + match self { + Self::One => Err(()), + Self::PolyLabel(l) => Ok(l), + } + } +} + +impl<'a> core::convert::TryInto<&'a PolynomialLabel> for &'a LCTerm { + type Error = (); + + fn try_into(self) -> Result<&'a PolynomialLabel, ()> { + match self { + LCTerm::One => Err(()), + LCTerm::PolyLabel(l) => Ok(l), + } + } +} + +impl> PartialEq for LCTerm { + fn eq(&self, other: &B) -> bool { + match self { + Self::One => false, + Self::PolyLabel(l) => l == other.borrow(), + } + } +} + +/// A labeled linear combinations of polynomials. +#[derive(Clone, Debug)] +pub struct LinearCombination { + /// The label. + pub label: String, + /// The linear combination of `(coeff, poly_label)` pairs. + pub terms: Vec<(F, LCTerm)>, +} + +impl LinearCombination { + /// Construct an empty labeled linear combination. + pub fn empty(label: impl Into) -> Self { + Self { + label: label.into(), + terms: Vec::new(), + } + } + + /// Construct a new labeled linear combination. + /// with the terms specified in `term`. + pub fn new(label: impl Into, terms: Vec<(F, impl Into)>) -> Self { + let terms = terms.into_iter().map(|(c, t)| (c, t.into())).collect(); + Self { + label: label.into(), + terms: terms, + } + } + + /// Returns the label of the linear combination. + pub fn label(&self) -> &String { + &self.label + } + + /// Returns `true` if the linear combination has no terms. + pub fn is_empty(&self) -> bool { + self.terms.is_empty() + } + + /// Add a term to the linear combination. + pub fn push(&mut self, term: (F, LCTerm)) -> &mut Self { + self.terms.push(term); + self + } +} + +impl<'a, F: Field> AddAssign<(F, &'a LinearCombination)> for LinearCombination { + fn add_assign(&mut self, (coeff, other): (F, &'a LinearCombination)) { + self.terms + .extend(other.terms.iter().map(|(c, t)| (coeff * c, t.clone()))); + } +} + +impl<'a, F: Field> SubAssign<(F, &'a LinearCombination)> for LinearCombination { + fn sub_assign(&mut self, (coeff, other): (F, &'a LinearCombination)) { + self.terms + .extend(other.terms.iter().map(|(c, t)| (-coeff * c, t.clone()))); + } +} + +impl<'a, F: Field> AddAssign<&'a LinearCombination> for LinearCombination { + fn add_assign(&mut self, other: &'a LinearCombination) { + self.terms.extend(other.terms.iter().cloned()); + } +} + +impl<'a, F: Field> SubAssign<&'a LinearCombination> for LinearCombination { + fn sub_assign(&mut self, other: &'a LinearCombination) { + self.terms + .extend(other.terms.iter().map(|(c, t)| (-*c, t.clone()))); + } +} + +impl AddAssign for LinearCombination { + fn add_assign(&mut self, coeff: F) { + self.terms.push((coeff, LCTerm::One)); + } +} + +impl SubAssign for LinearCombination { + fn sub_assign(&mut self, coeff: F) { + self.terms.push((-coeff, LCTerm::One)); + } +} + +impl MulAssign for LinearCombination { + fn mul_assign(&mut self, coeff: F) { + self.terms.iter_mut().for_each(|(c, _)| *c *= coeff); + } +} + +impl core::ops::Deref for LinearCombination { + type Target = [(F, LCTerm)]; + + fn deref(&self) -> &Self::Target { + &self.terms + } +} diff --git a/arkworks/poly-commit/src/error.rs b/arkworks/poly-commit/src/error.rs new file mode 100644 index 00000000..de7091eb --- /dev/null +++ b/arkworks/poly-commit/src/error.rs @@ -0,0 +1,186 @@ +use crate::String; + +/// The error type for `PolynomialCommitment`. +#[derive(Debug)] +pub enum Error { + /// The query set contains a label for a polynomial that was not provided as + /// input to the `PC::open`. + MissingPolynomial { + /// The label of the missing polynomial. + label: String, + }, + + /// `Evaluations` does not contain an evaluation for the polynomial labelled + /// `label` at a particular query. + MissingEvaluation { + /// The label of the missing polynomial. + label: String, + }, + + /// The LHS of the equation is empty. + MissingLHS { + /// The label of the equation. + label: String, + }, + + /// The provided polynomial was meant to be hiding, but `rng` was `None`. + MissingRng, + + /// The degree provided in setup was too small; degree 0 polynomials + /// are not supported. + DegreeIsZero, + + /// The degree of the polynomial passed to `commit` or `open` + /// was too large. + TooManyCoefficients { + /// The number of coefficients in the polynomial. + num_coefficients: usize, + /// The maximum number of powers provided in `Powers`. + num_powers: usize, + }, + + /// The hiding bound was not `None`, but the hiding bound was zero. + HidingBoundIsZero, + + /// The hiding bound was too large for the given `Powers`. + HidingBoundToolarge { + /// The hiding bound + hiding_poly_degree: usize, + /// The number of powers. + num_powers: usize, + }, + + /// The degree provided to `trim` was too large. + TrimmingDegreeTooLarge, + + /// The provided `enforced_degree_bounds` was `Some<&[]>`. + EmptyDegreeBounds, + + /// The provided equation contained multiple polynomials, of which least one + /// had a strict degree bound. + EquationHasDegreeBounds(String), + + /// The required degree bound is not supported by ck/vk + UnsupportedDegreeBound(usize), + + /// The degree bound for the `index`-th polynomial passed to `commit`, `open` + /// or `check` was incorrect, that is, `degree_bound >= poly_degree` or + /// `degree_bound <= max_degree`. + IncorrectDegreeBound { + /// Degree of the polynomial. + poly_degree: usize, + /// Degree bound. + degree_bound: usize, + /// Maximum supported degree. + supported_degree: usize, + /// Index of the offending polynomial. + label: String, + }, + + /// The inputs to `commit`, `open` or `verify` had incorrect lengths. + IncorrectInputLength(String), + + /// An invalid number of variables was provided to `setup` + InvalidNumberOfVariables, + + /// The degree of the `index`-th polynomial passed to `commit`, `open` + /// or `check` was incorrect, that is, `supported_degree <= poly_degree` + PolynomialDegreeTooLarge { + /// Degree of the polynomial. + poly_degree: usize, + /// Maximum supported degree. + supported_degree: usize, + /// Index of the offending polynomial. + label: String, + }, +} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Error::MissingPolynomial { label } => write!( + f, + "`QuerySet` refers to polynomial \"{}\", but it was not provided.", + label + ), + Error::MissingEvaluation { label } => write!( + f, + "`QuerySet` refers to polynomial \"{}\", but `Evaluations` does not contain an evaluation for it.", + label + ), + Error::MissingLHS { label } => { + write!(f, "Equation \"{}\" does not have a LHS.", label) + }, + Error::MissingRng => write!(f, "hiding commitments require `Some(rng)`"), + Error::DegreeIsZero => write!( + f, + "this scheme does not support committing to degree 0 polynomials" + ), + Error::TooManyCoefficients { + num_coefficients, + num_powers, + } => write!( + f, + "the number of coefficients in the polynomial ({:?}) is greater than\ + the maximum number of powers in `Powers` ({:?})", + num_coefficients, num_powers + ), + Error::HidingBoundIsZero => write!( + f, + "this scheme does not support non-`None` hiding bounds that are 0" + ), + Error::HidingBoundToolarge { + hiding_poly_degree, + num_powers, + } => write!( + f, + "the degree of the hiding poly ({:?}) is not less than the maximum number of powers in `Powers` ({:?})", + hiding_poly_degree, num_powers + ), + Error::TrimmingDegreeTooLarge => { + write!(f, "the degree provided to `trim` was too large") + } + Error::EmptyDegreeBounds => { + write!(f, "provided `enforced_degree_bounds` was `Some<&[]>`") + } + Error::EquationHasDegreeBounds(e) => write!( + f, + "the eqaution \"{}\" contained degree-bounded polynomials", + e + ), + Error::UnsupportedDegreeBound(bound) => write!( + f, + "the degree bound ({:?}) is not supported by the parameters", + bound, + ), + Error::IncorrectDegreeBound { + poly_degree, + degree_bound, + supported_degree, + label, + } => write!( + f, + "the degree bound ({:?}) for the polynomial {} \ + (having degree {:?}) is greater than the maximum \ + supported degree ({:?})", + degree_bound, label, poly_degree, supported_degree + ), + Error::InvalidNumberOfVariables => write!( + f, + "An invalid number of variables was provided to `setup`" + ), + Error::PolynomialDegreeTooLarge { + poly_degree, + supported_degree, + label, + } => write!( + f, + "the polynomial {} has degree {:?}, but parameters only + support up to degree ({:?})", label, poly_degree, supported_degree + ), + Error::IncorrectInputLength(err) => write!(f, "{}", err), + } + } +} + +impl ark_std::error::Error for Error {} diff --git a/arkworks/poly-commit/src/ipa_pc/data_structures.rs b/arkworks/poly-commit/src/ipa_pc/data_structures.rs new file mode 100644 index 00000000..2925d90c --- /dev/null +++ b/arkworks/poly-commit/src/ipa_pc/data_structures.rs @@ -0,0 +1,283 @@ +use crate::*; +use crate::{PCCommitterKey, PCVerifierKey, Vec}; +use ark_ec::AffineCurve; +use ark_ff::{Field, ToBytes, UniformRand, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::rand::RngCore; +use ark_std::{ + io::{Read, Write}, + vec, +}; + +/// `UniversalParams` are the universal parameters for the inner product arg scheme. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct UniversalParams { + /// The key used to commit to polynomials. + pub comm_key: Vec, + + /// Some group generator. + pub h: G, + + /// Some group generator specifically used for hiding. + pub s: G, +} + +impl PCUniversalParams for UniversalParams { + fn max_degree(&self) -> usize { + self.comm_key.len() - 1 + } +} + +/// `CommitterKey` is used to commit to, and create evaluation proofs for, a given +/// polynomial. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = "") +)] +pub struct CommitterKey { + /// The key used to commit to polynomials. + pub comm_key: Vec, + + /// A random group generator. + pub h: G, + + /// A random group generator that is to be used to make + /// a commitment hiding. + pub s: G, + + /// The maximum degree supported by the parameters + /// this key was derived from. + pub max_degree: usize, +} + +impl PCCommitterKey for CommitterKey { + fn max_degree(&self) -> usize { + self.max_degree + } + fn supported_degree(&self) -> usize { + self.comm_key.len() - 1 + } +} + +/// `VerifierKey` is used to check evaluation proofs for a given commitment. +pub type VerifierKey = CommitterKey; + +impl PCVerifierKey for VerifierKey { + fn max_degree(&self) -> usize { + self.max_degree + } + + fn supported_degree(&self) -> usize { + self.comm_key.len() - 1 + } +} + +/// Nothing to do to prepare this verifier key (for now). +pub type PreparedVerifierKey = VerifierKey; + +impl PCPreparedVerifierKey> for PreparedVerifierKey { + /// prepare `PreparedVerifierKey` from `VerifierKey` + fn prepare(vk: &VerifierKey) -> Self { + vk.clone() + } +} + +/// Commitment to a polynomial that optionally enforces a degree bound. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Copy(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Commitment { + /// A Pedersen commitment to the polynomial. + pub comm: G, + + /// A Pedersen commitment to the shifted polynomial. + /// This is `none` if the committed polynomial does not + /// enforce a strict degree bound. + pub shifted_comm: Option, +} + +impl PCCommitment for Commitment { + #[inline] + fn empty() -> Self { + Commitment { + comm: G::zero(), + shifted_comm: None, + } + } + + fn has_degree_bound(&self) -> bool { + false + } + + fn size_in_bytes(&self) -> usize { + ark_ff::to_bytes![G::zero()].unwrap().len() / 2 + } +} + +impl ToBytes for Commitment { + #[inline] + fn write(&self, mut writer: W) -> ark_std::io::Result<()> { + self.comm.write(&mut writer)?; + let shifted_exists = self.shifted_comm.is_some(); + shifted_exists.write(&mut writer)?; + self.shifted_comm + .as_ref() + .unwrap_or(&G::zero()) + .write(&mut writer) + } +} + +/// Nothing to do to prepare this commitment (for now). +pub type PreparedCommitment = Commitment; + +impl PCPreparedCommitment> for PreparedCommitment { + /// prepare `PreparedCommitment` from `Commitment` + fn prepare(vk: &Commitment) -> Self { + vk.clone() + } +} + +/// `Randomness` hides the polynomial inside a commitment and is outputted by `InnerProductArg::commit`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Randomness { + /// Randomness is some scalar field element. + pub rand: G::ScalarField, + + /// Randomness applied to the shifted commitment is some scalar field element. + pub shifted_rand: Option, +} + +impl PCRandomness for Randomness { + fn empty() -> Self { + Self { + rand: G::ScalarField::zero(), + shifted_rand: None, + } + } + + fn rand(_: usize, has_degree_bound: bool, _: Option, rng: &mut R) -> Self { + let rand = G::ScalarField::rand(rng); + let shifted_rand = if has_degree_bound { + Some(G::ScalarField::rand(rng)) + } else { + None + }; + + Self { rand, shifted_rand } + } +} + +/// `Proof` is an evaluation proof that is output by `InnerProductArg::open`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = "") +)] +pub struct Proof { + /// Vector of left elements for each of the log_d iterations in `open` + pub l_vec: Vec, + + /// Vector of right elements for each of the log_d iterations within `open` + pub r_vec: Vec, + + /// Committer key from the last iteration within `open` + pub final_comm_key: G, + + /// Coefficient from the last iteration within withinopen` + pub c: G::ScalarField, + + /// Commitment to the blinding polynomial. + pub hiding_comm: Option, + + /// Linear combination of all the randomness used for commitments + /// to the opened polynomials, along with the randomness used for the + /// commitment to the hiding polynomial. + pub rand: Option, +} + +impl PCProof for Proof { + fn size_in_bytes(&self) -> usize { + ark_ff::to_bytes![self].unwrap().len() + } +} + +impl ToBytes for Proof { + #[inline] + fn write(&self, mut writer: W) -> ark_std::io::Result<()> { + self.l_vec.write(&mut writer)?; + self.r_vec.write(&mut writer)?; + self.final_comm_key.write(&mut writer)?; + self.c.write(&mut writer)?; + self.hiding_comm + .as_ref() + .unwrap_or(&G::zero()) + .write(&mut writer)?; + self.rand + .as_ref() + .unwrap_or(&G::ScalarField::zero()) + .write(&mut writer) + } +} + +/// `SuccinctCheckPolynomial` is a succinctly-representated polynomial +/// generated from the `log_d` random oracle challenges generated in `open`. +/// It has the special property that can be evaluated in `O(log_d)` time. +pub struct SuccinctCheckPolynomial(pub Vec); + +impl SuccinctCheckPolynomial { + /// Computes the coefficients of the underlying degree `d` polynomial. + pub fn compute_coeffs(&self) -> Vec { + let challenges = &self.0; + let log_d = challenges.len(); + + let mut coeffs = vec![F::one(); 1 << log_d]; + for (i, challenge) in challenges.iter().enumerate() { + let i = i + 1; + let elem_degree = 1 << (log_d - i); + for start in (elem_degree..coeffs.len()).step_by(elem_degree * 2) { + for offset in 0..elem_degree { + coeffs[start + offset] *= challenge; + } + } + } + + coeffs + } + + /// Evaluate `self` at `point` in time `O(log_d)`. + pub fn evaluate(&self, point: F) -> F { + let challenges = &self.0; + let log_d = challenges.len(); + + let mut product = F::one(); + for (i, challenge) in challenges.iter().enumerate() { + let i = i + 1; + let elem_degree: u64 = (1 << (log_d - i)) as u64; + let elem = point.pow([elem_degree]); + product *= &(F::one() + &(elem * challenge)); + } + + product + } +} diff --git a/arkworks/poly-commit/src/ipa_pc/mod.rs b/arkworks/poly-commit/src/ipa_pc/mod.rs new file mode 100644 index 00000000..03ca3f8c --- /dev/null +++ b/arkworks/poly-commit/src/ipa_pc/mod.rs @@ -0,0 +1,1163 @@ +use crate::{BTreeMap, BTreeSet, String, ToString, Vec}; +use crate::{BatchLCProof, Error, Evaluations, QuerySet, UVPolynomial}; +use crate::{LabeledCommitment, LabeledPolynomial, LinearCombination}; +use crate::{PCCommitterKey, PCRandomness, PCUniversalParams, PolynomialCommitment}; + +use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; +use ark_ff::{to_bytes, Field, One, PrimeField, UniformRand, Zero}; +use ark_std::rand::RngCore; +use ark_std::{convert::TryInto, format, marker::PhantomData, vec}; + +mod data_structures; +pub use data_structures::*; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +use digest::Digest; + +/// A polynomial commitment scheme based on the hardness of the +/// discrete logarithm problem in prime-order groups. +/// The construction is described in detail in [[BCMS20]][pcdas]. +/// +/// Degree bound enforcement requires that (at least one of) the points at +/// which a committed polynomial is evaluated are from a distribution that is +/// random conditioned on the polynomial. This is because degree bound +/// enforcement relies on checking a polynomial identity at this point. +/// More formally, the points must be sampled from an admissible query sampler, +/// as detailed in [[CHMMVW20]][marlin]. +/// +/// [pcdas]: https://eprint.iacr.org/2020/499 +/// [marlin]: https://eprint.iacr.org/2019/1047 +pub struct InnerProductArgPC> { + _projective: PhantomData, + _digest: PhantomData, + _poly: PhantomData

, +} + +impl> InnerProductArgPC { + /// `PROTOCOL_NAME` is used as a seed for the setup function. + pub const PROTOCOL_NAME: &'static [u8] = b"PC-DL-2020"; + + /// Create a Pedersen commitment to `scalars` using the commitment key `comm_key`. + /// Optionally, randomize the commitment using `hiding_generator` and `randomizer`. + fn cm_commit( + comm_key: &[G], + scalars: &[G::ScalarField], + hiding_generator: Option, + randomizer: Option, + ) -> G::Projective { + let scalars_bigint = ark_std::cfg_iter!(scalars) + .map(|s| s.into_repr()) + .collect::>(); + + let mut comm = VariableBaseMSM::multi_scalar_mul(comm_key, &scalars_bigint); + + if randomizer.is_some() { + assert!(hiding_generator.is_some()); + comm += &hiding_generator.unwrap().mul(randomizer.unwrap()); + } + + comm + } + + fn compute_random_oracle_challenge(bytes: &[u8]) -> G::ScalarField { + let mut i = 0u64; + let mut challenge = None; + while challenge.is_none() { + let hash_input = ark_ff::to_bytes![bytes, i].unwrap(); + let hash = D::digest(&hash_input); + challenge = ::from_random_bytes(&hash); + + i += 1; + } + + challenge.unwrap() + } + + #[inline] + fn inner_product(l: &[G::ScalarField], r: &[G::ScalarField]) -> G::ScalarField { + ark_std::cfg_iter!(l).zip(r).map(|(li, ri)| *li * ri).sum() + } + + /// The succinct portion of `PC::check`. This algorithm runs in time + /// O(log d), where d is the degree of the committed polynomials. + fn succinct_check<'a>( + vk: &VerifierKey, + commitments: impl IntoIterator>>, + point: G::ScalarField, + values: impl IntoIterator, + proof: &Proof, + opening_challenges: &dyn Fn(u64) -> G::ScalarField, + ) -> Option> { + let check_time = start_timer!(|| "Succinct checking"); + + let d = vk.supported_degree(); + + // `log_d` is ceil(log2 (d + 1)), which is the number of steps to compute all of the challenges + let log_d = ark_std::log2(d + 1) as usize; + + let mut combined_commitment_proj = G::Projective::zero(); + let mut combined_v = G::ScalarField::zero(); + + let mut opening_challenge_counter = 0; + let mut cur_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + let labeled_commitments = commitments.into_iter(); + let values = values.into_iter(); + + for (labeled_commitment, value) in labeled_commitments.zip(values) { + let commitment = labeled_commitment.commitment(); + combined_v += &(cur_challenge * &value); + combined_commitment_proj += &labeled_commitment.commitment().comm.mul(cur_challenge); + cur_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + let degree_bound = labeled_commitment.degree_bound(); + assert_eq!(degree_bound.is_some(), commitment.shifted_comm.is_some()); + + if let Some(degree_bound) = degree_bound { + let shift = point.pow([(vk.supported_degree() - degree_bound) as u64]); + combined_v += &(cur_challenge * &value * &shift); + combined_commitment_proj += &commitment.shifted_comm.unwrap().mul(cur_challenge); + } + + cur_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + } + + let mut combined_commitment = combined_commitment_proj.into_affine(); + + assert_eq!(proof.hiding_comm.is_some(), proof.rand.is_some()); + if proof.hiding_comm.is_some() { + let hiding_comm = proof.hiding_comm.unwrap(); + let rand = proof.rand.unwrap(); + + let hiding_challenge = Self::compute_random_oracle_challenge( + &ark_ff::to_bytes![combined_commitment, point, combined_v, hiding_comm].unwrap(), + ); + combined_commitment_proj += &(hiding_comm.mul(hiding_challenge) - &vk.s.mul(rand)); + combined_commitment = combined_commitment_proj.into_affine(); + } + + // Challenge for each round + let mut round_challenges = Vec::with_capacity(log_d); + let mut round_challenge = Self::compute_random_oracle_challenge( + &ark_ff::to_bytes![combined_commitment, point, combined_v].unwrap(), + ); + + let h_prime = vk.h.mul(round_challenge); + + let mut round_commitment_proj = + combined_commitment_proj + &h_prime.mul(&combined_v.into_repr()); + + let l_iter = proof.l_vec.iter(); + let r_iter = proof.r_vec.iter(); + + for (l, r) in l_iter.zip(r_iter) { + round_challenge = Self::compute_random_oracle_challenge( + &ark_ff::to_bytes![round_challenge, l, r].unwrap(), + ); + round_challenges.push(round_challenge); + round_commitment_proj += + &(l.mul(round_challenge.inverse().unwrap()) + &r.mul(round_challenge)); + } + + let check_poly = SuccinctCheckPolynomial::(round_challenges); + let v_prime = check_poly.evaluate(point) * &proof.c; + let h_prime = h_prime.into_affine(); + + let check_commitment_elem: G::Projective = Self::cm_commit( + &[proof.final_comm_key.clone(), h_prime], + &[proof.c.clone(), v_prime], + None, + None, + ); + + if !(round_commitment_proj - &check_commitment_elem).is_zero() { + end_timer!(check_time); + return None; + } + + end_timer!(check_time); + Some(check_poly) + } + + fn check_degrees_and_bounds( + supported_degree: usize, + p: &LabeledPolynomial, + ) -> Result<(), Error> { + if p.degree() > supported_degree { + return Err(Error::TooManyCoefficients { + num_coefficients: p.degree() + 1, + num_powers: supported_degree + 1, + }); + } + + if let Some(bound) = p.degree_bound() { + if bound < p.degree() || bound > supported_degree { + return Err(Error::IncorrectDegreeBound { + poly_degree: p.degree(), + degree_bound: bound, + supported_degree, + label: p.label().to_string(), + }); + } + } + + Ok(()) + } + + fn shift_polynomial(ck: &CommitterKey, p: &P, degree_bound: usize) -> P { + if p.is_zero() { + P::zero() + } else { + let mut shifted_polynomial_coeffs = + vec![G::ScalarField::zero(); ck.supported_degree() - degree_bound]; + shifted_polynomial_coeffs.extend_from_slice(&p.coeffs()); + P::from_coefficients_vec(shifted_polynomial_coeffs) + } + } + + fn combine_shifted_rand( + combined_rand: Option, + new_rand: Option, + coeff: G::ScalarField, + ) -> Option { + if let Some(new_rand) = new_rand { + let coeff_new_rand = new_rand * &coeff; + return Some(combined_rand.map_or(coeff_new_rand, |r| r + &coeff_new_rand)); + }; + + combined_rand + } + + fn combine_shifted_comm( + combined_comm: Option, + new_comm: Option, + coeff: G::ScalarField, + ) -> Option { + if let Some(new_comm) = new_comm { + let coeff_new_comm = new_comm.mul(coeff); + return Some(combined_comm.map_or(coeff_new_comm, |c| c + &coeff_new_comm)); + }; + + combined_comm + } + + fn construct_labeled_commitments( + lc_info: &[(String, Option)], + elements: &[G::Projective], + ) -> Vec>> { + let comms = G::Projective::batch_normalization_into_affine(elements); + let mut commitments = Vec::new(); + + let mut i = 0; + for info in lc_info.into_iter() { + let commitment; + let label = info.0.clone(); + let degree_bound = info.1; + + if degree_bound.is_some() { + commitment = Commitment { + comm: comms[i].clone(), + shifted_comm: Some(comms[i + 1].clone()), + }; + + i += 2; + } else { + commitment = Commitment { + comm: comms[i].clone(), + shifted_comm: None, + }; + + i += 1; + } + + commitments.push(LabeledCommitment::new(label, commitment, degree_bound)); + } + + return commitments; + } + + fn sample_generators(num_generators: usize) -> Vec { + let generators: Vec<_> = ark_std::cfg_into_iter!(0..num_generators) + .map(|i| { + let i = i as u64; + let mut hash = D::digest(&to_bytes![&Self::PROTOCOL_NAME, i].unwrap()); + let mut g = G::from_random_bytes(&hash); + let mut j = 0u64; + while g.is_none() { + hash = D::digest(&to_bytes![&Self::PROTOCOL_NAME, i, j].unwrap()); + g = G::from_random_bytes(&hash); + j += 1; + } + let generator = g.unwrap(); + generator.mul_by_cofactor_to_projective() + }) + .collect(); + + G::Projective::batch_normalization_into_affine(&generators) + } +} + +impl PolynomialCommitment for InnerProductArgPC +where + G: AffineCurve, + D: Digest, + P: UVPolynomial, +{ + type UniversalParams = UniversalParams; + type CommitterKey = CommitterKey; + type VerifierKey = VerifierKey; + type PreparedVerifierKey = PreparedVerifierKey; + type Commitment = Commitment; + type PreparedCommitment = PreparedCommitment; + type Randomness = Randomness; + type Proof = Proof; + type BatchProof = Vec; + type Error = Error; + + fn setup( + max_degree: usize, + _: Option, + _rng: &mut R, + ) -> Result { + // Ensure that max_degree + 1 is a power of 2 + let max_degree = (max_degree + 1).next_power_of_two() - 1; + + let setup_time = start_timer!(|| format!("Sampling {} generators", max_degree + 3)); + let mut generators = Self::sample_generators(max_degree + 3); + end_timer!(setup_time); + + let h = generators.pop().unwrap(); + let s = generators.pop().unwrap(); + + let pp = UniversalParams { + comm_key: generators, + h, + s, + }; + + Ok(pp) + } + + fn trim( + pp: &Self::UniversalParams, + supported_degree: usize, + _supported_hiding_bound: usize, + _enforced_degree_bounds: Option<&[usize]>, + ) -> Result<(Self::CommitterKey, Self::VerifierKey), Self::Error> { + // Ensure that supported_degree + 1 is a power of two + let supported_degree = (supported_degree + 1).next_power_of_two() - 1; + if supported_degree > pp.max_degree() { + return Err(Error::TrimmingDegreeTooLarge); + } + + let trim_time = + start_timer!(|| format!("Trimming to supported degree of {}", supported_degree)); + + let ck = CommitterKey { + comm_key: pp.comm_key[0..(supported_degree + 1)].to_vec(), + h: pp.h.clone(), + s: pp.s.clone(), + max_degree: pp.max_degree(), + }; + + let vk = VerifierKey { + comm_key: pp.comm_key[0..(supported_degree + 1)].to_vec(), + h: pp.h.clone(), + s: pp.s.clone(), + max_degree: pp.max_degree(), + }; + + end_timer!(trim_time); + + Ok((ck, vk)) + } + + /// Outputs a commitment to `polynomial`. + fn commit<'a>( + ck: &Self::CommitterKey, + polynomials: impl IntoIterator>, + rng: Option<&mut dyn RngCore>, + ) -> Result< + ( + Vec>, + Vec, + ), + Self::Error, + > + where + P: 'a, + { + let rng = &mut crate::optional_rng::OptionalRng(rng); + let mut comms = Vec::new(); + let mut rands = Vec::new(); + + let commit_time = start_timer!(|| "Committing to polynomials"); + for labeled_polynomial in polynomials { + Self::check_degrees_and_bounds(ck.supported_degree(), labeled_polynomial)?; + + let polynomial: &P = labeled_polynomial.polynomial(); + let label = labeled_polynomial.label(); + let hiding_bound = labeled_polynomial.hiding_bound(); + let degree_bound = labeled_polynomial.degree_bound(); + + let commit_time = start_timer!(|| format!( + "Polynomial {} of degree {}, degree bound {:?}, and hiding bound {:?}", + label, + polynomial.degree(), + degree_bound, + hiding_bound, + )); + + let randomness = if let Some(h) = hiding_bound { + Randomness::rand(h, degree_bound.is_some(), None, rng) + } else { + Randomness::empty() + }; + + let comm = Self::cm_commit( + &ck.comm_key[..(polynomial.degree() + 1)], + &polynomial.coeffs(), + Some(ck.s), + Some(randomness.rand), + ) + .into(); + + let shifted_comm = degree_bound.map(|d| { + Self::cm_commit( + &ck.comm_key[(ck.supported_degree() - d)..], + &polynomial.coeffs(), + Some(ck.s), + randomness.shifted_rand, + ) + .into() + }); + + let commitment = Commitment { comm, shifted_comm }; + let labeled_comm = LabeledCommitment::new(label.to_string(), commitment, degree_bound); + + comms.push(labeled_comm); + rands.push(randomness); + + end_timer!(commit_time); + } + + end_timer!(commit_time); + Ok((comms, rands)) + } + + fn open_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + point: &'a P::Point, + opening_challenges: &dyn Fn(u64) -> G::ScalarField, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + Self::Randomness: 'a, + P: 'a, + { + let mut combined_polynomial = P::zero(); + let mut combined_rand = G::ScalarField::zero(); + let mut combined_commitment_proj = G::Projective::zero(); + + let mut has_hiding = false; + + let polys_iter = labeled_polynomials.into_iter(); + let rands_iter = rands.into_iter(); + let comms_iter = commitments.into_iter(); + + let combine_time = start_timer!(|| "Combining polynomials, randomness, and commitments."); + + let mut opening_challenge_counter = 0; + let mut cur_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + for (labeled_polynomial, (labeled_commitment, randomness)) in + polys_iter.zip(comms_iter.zip(rands_iter)) + { + let label = labeled_polynomial.label(); + assert_eq!(labeled_polynomial.label(), labeled_commitment.label()); + Self::check_degrees_and_bounds(ck.supported_degree(), labeled_polynomial)?; + + let polynomial = labeled_polynomial.polynomial(); + let degree_bound = labeled_polynomial.degree_bound(); + let hiding_bound = labeled_polynomial.hiding_bound(); + let commitment = labeled_commitment.commitment(); + + combined_polynomial += (cur_challenge, polynomial); + combined_commitment_proj += &commitment.comm.mul(cur_challenge); + + if hiding_bound.is_some() { + has_hiding = true; + combined_rand += &(cur_challenge * &randomness.rand); + } + + cur_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + let has_degree_bound = degree_bound.is_some(); + + assert_eq!( + has_degree_bound, + commitment.shifted_comm.is_some(), + "shifted_comm mismatch for {}", + label + ); + + assert_eq!( + degree_bound, + labeled_commitment.degree_bound(), + "labeled_comm degree bound mismatch for {}", + label + ); + if let Some(degree_bound) = degree_bound { + let shifted_polynomial = Self::shift_polynomial(ck, polynomial, degree_bound); + combined_polynomial += (cur_challenge, &shifted_polynomial); + combined_commitment_proj += &commitment.shifted_comm.unwrap().mul(cur_challenge); + + if hiding_bound.is_some() { + let shifted_rand = randomness.shifted_rand; + assert!( + shifted_rand.is_some(), + "shifted_rand.is_none() for {}", + label + ); + combined_rand += &(cur_challenge * &shifted_rand.unwrap()); + } + } + + cur_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + } + + end_timer!(combine_time); + + let combined_v = combined_polynomial.evaluate(point); + + // Pad the coefficients to the appropriate vector size + let d = ck.supported_degree(); + + // `log_d` is ceil(log2 (d + 1)), which is the number of steps to compute all of the challenges + let log_d = ark_std::log2(d + 1) as usize; + + let mut combined_commitment; + let mut hiding_commitment = None; + + if has_hiding { + let mut rng = rng.expect("hiding commitments require randomness"); + let hiding_time = start_timer!(|| "Applying hiding."); + let mut hiding_polynomial = P::rand(d, &mut rng); + hiding_polynomial -= &P::from_coefficients_slice(&[hiding_polynomial.evaluate(point)]); + + let hiding_rand = G::ScalarField::rand(rng); + let hiding_commitment_proj = Self::cm_commit( + ck.comm_key.as_slice(), + hiding_polynomial.coeffs(), + Some(ck.s), + Some(hiding_rand), + ); + + let mut batch = G::Projective::batch_normalization_into_affine(&[ + combined_commitment_proj, + hiding_commitment_proj, + ]); + hiding_commitment = Some(batch.pop().unwrap()); + combined_commitment = batch.pop().unwrap(); + + let hiding_challenge = Self::compute_random_oracle_challenge( + &ark_ff::to_bytes![ + combined_commitment, + point, + combined_v, + hiding_commitment.unwrap() + ] + .unwrap(), + ); + combined_polynomial += (hiding_challenge, &hiding_polynomial); + combined_rand += &(hiding_challenge * &hiding_rand); + combined_commitment_proj += + &(hiding_commitment.unwrap().mul(hiding_challenge) - &ck.s.mul(combined_rand)); + + end_timer!(hiding_time); + } + + let combined_rand = if has_hiding { + Some(combined_rand) + } else { + None + }; + + let proof_time = + start_timer!(|| format!("Generating proof for degree {} combined polynomial", d + 1)); + + combined_commitment = combined_commitment_proj.into_affine(); + + // ith challenge + let mut round_challenge = Self::compute_random_oracle_challenge( + &ark_ff::to_bytes![combined_commitment, point, combined_v].unwrap(), + ); + + let h_prime = ck.h.mul(round_challenge).into_affine(); + + // Pads the coefficients with zeroes to get the number of coeff to be d+1 + let mut coeffs = combined_polynomial.coeffs().to_vec(); + if coeffs.len() < d + 1 { + for _ in coeffs.len()..(d + 1) { + coeffs.push(G::ScalarField::zero()); + } + } + let mut coeffs = coeffs.as_mut_slice(); + + // Powers of z + let mut z: Vec = Vec::with_capacity(d + 1); + let mut cur_z: G::ScalarField = G::ScalarField::one(); + for _ in 0..(d + 1) { + z.push(cur_z); + cur_z *= point; + } + let mut z = z.as_mut_slice(); + + // This will be used for transforming the key in each step + let mut key_proj: Vec = ck.comm_key.iter().map(|x| (*x).into()).collect(); + let mut key_proj = key_proj.as_mut_slice(); + + let mut temp; + + // Key for MSM + // We initialize this to capacity 0 initially because we want to use the key slice first + let mut comm_key = &ck.comm_key; + + let mut l_vec = Vec::with_capacity(log_d); + let mut r_vec = Vec::with_capacity(log_d); + + let mut n = d + 1; + while n > 1 { + let (coeffs_l, coeffs_r) = coeffs.split_at_mut(n / 2); + let (z_l, z_r) = z.split_at_mut(n / 2); + let (key_l, key_r) = comm_key.split_at(n / 2); + let (key_proj_l, _) = key_proj.split_at_mut(n / 2); + + let l = Self::cm_commit(key_l, coeffs_r, None, None) + + &h_prime.mul(Self::inner_product(coeffs_r, z_l)); + + let r = Self::cm_commit(key_r, coeffs_l, None, None) + + &h_prime.mul(Self::inner_product(coeffs_l, z_r)); + + let lr = G::Projective::batch_normalization_into_affine(&[l, r]); + l_vec.push(lr[0]); + r_vec.push(lr[1]); + + round_challenge = Self::compute_random_oracle_challenge( + &ark_ff::to_bytes![round_challenge, lr[0], lr[1]].unwrap(), + ); + let round_challenge_inv = round_challenge.inverse().unwrap(); + + ark_std::cfg_iter_mut!(coeffs_l) + .zip(coeffs_r) + .for_each(|(c_l, c_r)| *c_l += &(round_challenge_inv * &*c_r)); + + ark_std::cfg_iter_mut!(z_l) + .zip(z_r) + .for_each(|(z_l, z_r)| *z_l += &(round_challenge * &*z_r)); + + ark_std::cfg_iter_mut!(key_proj_l) + .zip(key_r) + .for_each(|(k_l, k_r)| *k_l += &(k_r.mul(round_challenge))); + + coeffs = coeffs_l; + z = z_l; + + key_proj = key_proj_l; + temp = G::Projective::batch_normalization_into_affine(key_proj); + comm_key = &temp; + + n /= 2; + } + + end_timer!(proof_time); + + Ok(Proof { + l_vec, + r_vec, + final_comm_key: comm_key[0], + c: coeffs[0], + hiding_comm: hiding_commitment, + rand: combined_rand, + }) + } + + fn check_individual_opening_challenges<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + values: impl IntoIterator, + proof: &Self::Proof, + opening_challenges: &dyn Fn(u64) -> G::ScalarField, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + { + let check_time = start_timer!(|| "Checking evaluations"); + let d = vk.supported_degree(); + + // `log_d` is ceil(log2 (d + 1)), which is the number of steps to compute all of the challenges + let log_d = ark_std::log2(d + 1) as usize; + + if proof.l_vec.len() != proof.r_vec.len() || proof.l_vec.len() != log_d { + return Err(Error::IncorrectInputLength( + format!( + "Expected proof vectors to be {:}. Instead, l_vec size is {:} and r_vec size is {:}", + log_d, + proof.l_vec.len(), + proof.r_vec.len() + ) + )); + } + + let check_poly = + Self::succinct_check(vk, commitments, *point, values, proof, opening_challenges); + + if check_poly.is_none() { + return Ok(false); + } + + let check_poly_coeffs = check_poly.unwrap().compute_coeffs(); + let final_key = Self::cm_commit( + vk.comm_key.as_slice(), + check_poly_coeffs.as_slice(), + None, + None, + ); + if !(final_key - &proof.final_comm_key.into()).is_zero() { + return Ok(false); + } + + end_timer!(check_time); + Ok(true) + } + + fn batch_check_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + query_set: &QuerySet, + values: &Evaluations, + proof: &Self::BatchProof, + opening_challenges: &dyn Fn(u64) -> G::ScalarField, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let commitments: BTreeMap<_, _> = commitments.into_iter().map(|c| (c.label(), c)).collect(); + let mut query_to_labels_map = BTreeMap::new(); + + for (label, (point_label, point)) in query_set.iter() { + let labels = query_to_labels_map + .entry(point_label) + .or_insert((point, BTreeSet::new())); + labels.1.insert(label); + } + + assert_eq!(proof.len(), query_to_labels_map.len()); + + let mut randomizer = G::ScalarField::one(); + + let mut combined_check_poly = P::zero(); + let mut combined_final_key = G::Projective::zero(); + + for ((_point_label, (point, labels)), p) in query_to_labels_map.into_iter().zip(proof) { + let lc_time = + start_timer!(|| format!("Randomly combining {} commitments", labels.len())); + let mut comms: Vec<&'_ LabeledCommitment<_>> = Vec::new(); + let mut vals = Vec::new(); + for label in labels.into_iter() { + let commitment = commitments.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + let v_i = values + .get(&(label.clone(), *point)) + .ok_or(Error::MissingEvaluation { + label: label.to_string(), + })?; + + comms.push(commitment); + vals.push(*v_i); + } + + let check_poly = Self::succinct_check( + vk, + comms.into_iter(), + *point, + vals.into_iter(), + p, + opening_challenges, + ); + + if check_poly.is_none() { + return Ok(false); + } + + let check_poly = P::from_coefficients_vec(check_poly.unwrap().compute_coeffs()); + combined_check_poly += (randomizer, &check_poly); + combined_final_key += &p.final_comm_key.mul(randomizer); + + randomizer = u128::rand(rng).into(); + end_timer!(lc_time); + } + + let proof_time = start_timer!(|| "Checking batched proof"); + let final_key = Self::cm_commit( + vk.comm_key.as_slice(), + combined_check_poly.coeffs(), + None, + None, + ); + if !(final_key - &combined_final_key).is_zero() { + return Ok(false); + } + + end_timer!(proof_time); + + Ok(true) + } + + fn open_combinations_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + lc_s: impl IntoIterator>, + polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> G::ScalarField, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result, Self::Error> + where + Self::Randomness: 'a, + Self::Commitment: 'a, + P: 'a, + { + let label_poly_map = polynomials + .into_iter() + .zip(rands) + .zip(commitments) + .map(|((p, r), c)| (p.label(), (p, r, c))) + .collect::>(); + + let mut lc_polynomials = Vec::new(); + let mut lc_randomness = Vec::new(); + let mut lc_commitments = Vec::new(); + let mut lc_info = Vec::new(); + + for lc in lc_s { + let lc_label = lc.label().clone(); + let mut poly = P::zero(); + let mut degree_bound = None; + let mut hiding_bound = None; + + let mut combined_comm = G::Projective::zero(); + let mut combined_shifted_comm: Option = None; + + let mut combined_rand = G::ScalarField::zero(); + let mut combined_shifted_rand: Option = None; + + let num_polys = lc.len(); + for (coeff, label) in lc.iter().filter(|(_, l)| !l.is_one()) { + let label: &String = label.try_into().expect("cannot be one!"); + let &(cur_poly, cur_rand, cur_comm) = + label_poly_map.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + if num_polys == 1 && cur_poly.degree_bound().is_some() { + assert!( + coeff.is_one(), + "Coefficient must be one for degree-bounded equations" + ); + degree_bound = cur_poly.degree_bound(); + } else if cur_poly.degree_bound().is_some() { + eprintln!("Degree bound when number of equations is non-zero"); + return Err(Self::Error::EquationHasDegreeBounds(lc_label)); + } + + // Some(_) > None, always. + hiding_bound = core::cmp::max(hiding_bound, cur_poly.hiding_bound()); + poly += (*coeff, cur_poly.polynomial()); + + combined_rand += &(cur_rand.rand * coeff); + combined_shifted_rand = Self::combine_shifted_rand( + combined_shifted_rand, + cur_rand.shifted_rand, + *coeff, + ); + + let commitment = cur_comm.commitment(); + combined_comm += &commitment.comm.mul(*coeff); + combined_shifted_comm = Self::combine_shifted_comm( + combined_shifted_comm, + commitment.shifted_comm, + *coeff, + ); + } + + let lc_poly = + LabeledPolynomial::new(lc_label.clone(), poly, degree_bound, hiding_bound); + lc_polynomials.push(lc_poly); + lc_randomness.push(Randomness { + rand: combined_rand, + shifted_rand: combined_shifted_rand, + }); + + lc_commitments.push(combined_comm); + if let Some(combined_shifted_comm) = combined_shifted_comm { + lc_commitments.push(combined_shifted_comm); + } + + lc_info.push((lc_label, degree_bound)); + } + + let lc_commitments = Self::construct_labeled_commitments(&lc_info, &lc_commitments); + + let proof = Self::batch_open_individual_opening_challenges( + ck, + lc_polynomials.iter(), + lc_commitments.iter(), + &query_set, + opening_challenges, + lc_randomness.iter(), + rng, + )?; + Ok(BatchLCProof { proof, evals: None }) + } + + /// Checks that `values` are the true evaluations at `query_set` of the polynomials + /// committed in `labeled_commitments`. + fn check_combinations_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + lc_s: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + evaluations: &Evaluations, + proof: &BatchLCProof, + opening_challenges: &dyn Fn(u64) -> G::ScalarField, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let BatchLCProof { proof, .. } = proof; + let label_comm_map = commitments + .into_iter() + .map(|c| (c.label(), c)) + .collect::>(); + + let mut lc_commitments = Vec::new(); + let mut lc_info = Vec::new(); + let mut evaluations = evaluations.clone(); + for lc in lc_s { + let lc_label = lc.label().clone(); + let num_polys = lc.len(); + + let mut degree_bound = None; + let mut combined_comm = G::Projective::zero(); + let mut combined_shifted_comm: Option = None; + + for (coeff, label) in lc.iter() { + if label.is_one() { + for (&(ref label, _), ref mut eval) in evaluations.iter_mut() { + if label == &lc_label { + **eval -= coeff; + } + } + } else { + let label: &String = label.try_into().unwrap(); + let &cur_comm = label_comm_map.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + if num_polys == 1 && cur_comm.degree_bound().is_some() { + assert!( + coeff.is_one(), + "Coefficient must be one for degree-bounded equations" + ); + degree_bound = cur_comm.degree_bound(); + } else if cur_comm.degree_bound().is_some() { + return Err(Self::Error::EquationHasDegreeBounds(lc_label)); + } + + let commitment = cur_comm.commitment(); + combined_comm += &commitment.comm.mul(*coeff); + combined_shifted_comm = Self::combine_shifted_comm( + combined_shifted_comm, + commitment.shifted_comm, + *coeff, + ); + } + } + + lc_commitments.push(combined_comm); + + if let Some(combined_shifted_comm) = combined_shifted_comm { + lc_commitments.push(combined_shifted_comm); + } + + lc_info.push((lc_label, degree_bound)); + } + + let lc_commitments = Self::construct_labeled_commitments(&lc_info, &lc_commitments); + + Self::batch_check_individual_opening_challenges( + vk, + &lc_commitments, + &query_set, + &evaluations, + proof, + opening_challenges, + rng, + ) + } +} + +#[cfg(test)] +mod tests { + #![allow(non_camel_case_types)] + + use super::InnerProductArgPC; + use ark_ed_on_bls12_381::{EdwardsAffine, Fr}; + use ark_ff::PrimeField; + use ark_poly::{univariate::DensePolynomial as DensePoly, UVPolynomial}; + use ark_std::rand::rngs::StdRng; + use blake2::Blake2s; + + type UniPoly = DensePoly; + type PC = InnerProductArgPC; + type PC_JJB2S = PC; + + fn rand_poly(degree: usize, _: Option, rng: &mut StdRng) -> DensePoly { + DensePoly::rand(degree, rng) + } + + fn constant_poly(_: usize, _: Option, rng: &mut StdRng) -> DensePoly { + DensePoly::from_coefficients_slice(&[F::rand(rng)]) + } + + fn rand_point(_: Option, rng: &mut StdRng) -> F { + F::rand(rng) + } + + #[test] + fn single_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, PC_JJB2S>(None, rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + } + + #[test] + fn constant_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, PC_JJB2S>(None, constant_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + } + + #[test] + fn quadratic_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + quadratic_poly_degree_bound_multiple_queries_test::<_, _, PC_JJB2S>( + rand_poly::, + rand_point::, + ) + .expect("test failed for ed_on_bls12_381-blake2s"); + } + + #[test] + fn linear_poly_degree_bound_test() { + use crate::tests::*; + linear_poly_degree_bound_test::<_, _, PC_JJB2S>(rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + } + + #[test] + fn single_poly_degree_bound_test() { + use crate::tests::*; + single_poly_degree_bound_test::<_, _, PC_JJB2S>(rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + } + + #[test] + fn single_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + single_poly_degree_bound_multiple_queries_test::<_, _, PC_JJB2S>( + rand_poly::, + rand_point::, + ) + .expect("test failed for ed_on_bls12_381-blake2s"); + } + + #[test] + fn two_polys_degree_bound_single_query_test() { + use crate::tests::*; + two_polys_degree_bound_single_query_test::<_, _, PC_JJB2S>( + rand_poly::, + rand_point::, + ) + .expect("test failed for ed_on_bls12_381-blake2s"); + } + + #[test] + fn full_end_to_end_test() { + use crate::tests::*; + full_end_to_end_test::<_, _, PC_JJB2S>(None, rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + println!("Finished ed_on_bls12_381-blake2s"); + } + + #[test] + fn single_equation_test() { + use crate::tests::*; + single_equation_test::<_, _, PC_JJB2S>(None, rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + println!("Finished ed_on_bls12_381-blake2s"); + } + + #[test] + fn two_equation_test() { + use crate::tests::*; + two_equation_test::<_, _, PC_JJB2S>(None, rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + println!("Finished ed_on_bls12_381-blake2s"); + } + + #[test] + fn two_equation_degree_bound_test() { + use crate::tests::*; + two_equation_degree_bound_test::<_, _, PC_JJB2S>(rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + println!("Finished ed_on_bls12_381-blake2s"); + } + + #[test] + fn full_end_to_end_equation_test() { + use crate::tests::*; + full_end_to_end_equation_test::<_, _, PC_JJB2S>(None, rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + println!("Finished ed_on_bls12_381-blake2s"); + } + + #[test] + #[should_panic] + fn bad_degree_bound_test() { + use crate::tests::*; + bad_degree_bound_test::<_, _, PC_JJB2S>(rand_poly::, rand_point::) + .expect("test failed for ed_on_bls12_381-blake2s"); + println!("Finished ed_on_bls12_381-blake2s"); + } +} diff --git a/arkworks/poly-commit/src/kzg10/data_structures.rs b/arkworks/poly-commit/src/kzg10/data_structures.rs new file mode 100644 index 00000000..4bb95430 --- /dev/null +++ b/arkworks/poly-commit/src/kzg10/data_structures.rs @@ -0,0 +1,559 @@ +use crate::*; +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{PrimeField, ToBytes, ToConstraintField, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + borrow::Cow, + io::{Read, Write}, + marker::PhantomData, + ops::{Add, AddAssign}, +}; + +/// `UniversalParams` are the universal parameters for the KZG10 scheme. +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""))] +pub struct UniversalParams { + /// Group elements of the form `{ \beta^i G }`, where `i` ranges from 0 to `degree`. + pub powers_of_g: Vec, + /// Group elements of the form `{ \beta^i \gamma G }`, where `i` ranges from 0 to `degree`. + pub powers_of_gamma_g: BTreeMap, + /// The generator of G2. + pub h: E::G2Affine, + /// \beta times the above generator of G2. + pub beta_h: E::G2Affine, + /// Group elements of the form `{ \beta^i G2 }`, where `i` ranges from `0` to `-degree`. + pub neg_powers_of_h: BTreeMap, + /// The generator of G2, prepared for use in pairings. + #[derivative(Debug = "ignore")] + pub prepared_h: E::G2Prepared, + /// \beta times the above generator of G2, prepared for use in pairings. + #[derivative(Debug = "ignore")] + pub prepared_beta_h: E::G2Prepared, +} + +impl PCUniversalParams for UniversalParams { + fn max_degree(&self) -> usize { + self.powers_of_g.len() - 1 + } +} + +impl CanonicalSerialize for UniversalParams { + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.powers_of_g.serialize(&mut writer)?; + self.powers_of_gamma_g.serialize(&mut writer)?; + self.h.serialize(&mut writer)?; + self.beta_h.serialize(&mut writer)?; + self.neg_powers_of_h.serialize(&mut writer) + } + + fn serialized_size(&self) -> usize { + self.powers_of_g.serialized_size() + + self.powers_of_gamma_g.serialized_size() + + self.h.serialized_size() + + self.beta_h.serialized_size() + + self.neg_powers_of_h.serialized_size() + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + self.powers_of_g.serialize_unchecked(&mut writer)?; + self.powers_of_gamma_g.serialize_unchecked(&mut writer)?; + self.h.serialize_unchecked(&mut writer)?; + self.beta_h.serialize_unchecked(&mut writer)?; + self.neg_powers_of_h.serialize_unchecked(&mut writer) + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.powers_of_g.serialize_uncompressed(&mut writer)?; + self.powers_of_gamma_g.serialize_uncompressed(&mut writer)?; + self.h.serialize_uncompressed(&mut writer)?; + self.beta_h.serialize_uncompressed(&mut writer)?; + self.neg_powers_of_h.serialize_uncompressed(&mut writer) + } + + fn uncompressed_size(&self) -> usize { + self.powers_of_g.uncompressed_size() + + self.powers_of_gamma_g.uncompressed_size() + + self.h.uncompressed_size() + + self.beta_h.uncompressed_size() + + self.neg_powers_of_h.uncompressed_size() + } +} + +impl CanonicalDeserialize for UniversalParams { + fn deserialize(mut reader: R) -> Result { + let powers_of_g = Vec::::deserialize(&mut reader)?; + let powers_of_gamma_g = BTreeMap::::deserialize(&mut reader)?; + let h = E::G2Affine::deserialize(&mut reader)?; + let beta_h = E::G2Affine::deserialize(&mut reader)?; + let neg_powers_of_h = BTreeMap::::deserialize(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + powers_of_g, + powers_of_gamma_g, + h, + beta_h, + neg_powers_of_h, + prepared_h, + prepared_beta_h, + }) + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let powers_of_g = Vec::::deserialize_uncompressed(&mut reader)?; + let powers_of_gamma_g = + BTreeMap::::deserialize_uncompressed(&mut reader)?; + let h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + let beta_h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + let neg_powers_of_h = + BTreeMap::::deserialize_uncompressed(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + powers_of_g, + powers_of_gamma_g, + h, + beta_h, + neg_powers_of_h, + prepared_h, + prepared_beta_h, + }) + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let powers_of_g = Vec::::deserialize_unchecked(&mut reader)?; + let powers_of_gamma_g = BTreeMap::::deserialize_unchecked(&mut reader)?; + let h = E::G2Affine::deserialize_unchecked(&mut reader)?; + let beta_h = E::G2Affine::deserialize_unchecked(&mut reader)?; + let neg_powers_of_h = BTreeMap::::deserialize_unchecked(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + powers_of_g, + powers_of_gamma_g, + h, + beta_h, + neg_powers_of_h, + prepared_h, + prepared_beta_h, + }) + } +} + +/// `Powers` is used to commit to and create evaluation proofs for a given +/// polynomial. +#[derive(Derivative)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = "") +)] +pub struct Powers<'a, E: PairingEngine> { + /// Group elements of the form `β^i G`, for different values of `i`. + pub powers_of_g: Cow<'a, [E::G1Affine]>, + /// Group elements of the form `β^i γG`, for different values of `i`. + pub powers_of_gamma_g: Cow<'a, [E::G1Affine]>, +} + +impl Powers<'_, E> { + /// The number of powers in `self`. + pub fn size(&self) -> usize { + self.powers_of_g.len() + } +} + +/// `VerifierKey` is used to check evaluation proofs for a given commitment. +#[derive(Derivative)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct VerifierKey { + /// The generator of G1. + pub g: E::G1Affine, + /// The generator of G1 that is used for making a commitment hiding. + pub gamma_g: E::G1Affine, + /// The generator of G2. + pub h: E::G2Affine, + /// \beta times the above generator of G2. + pub beta_h: E::G2Affine, + /// The generator of G2, prepared for use in pairings. + #[derivative(Debug = "ignore")] + pub prepared_h: E::G2Prepared, + /// \beta times the above generator of G2, prepared for use in pairings. + #[derivative(Debug = "ignore")] + pub prepared_beta_h: E::G2Prepared, +} + +impl CanonicalSerialize for VerifierKey { + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize(&mut writer)?; + self.gamma_g.serialize(&mut writer)?; + self.h.serialize(&mut writer)?; + self.beta_h.serialize(&mut writer) + } + + fn serialized_size(&self) -> usize { + self.g.serialized_size() + + self.gamma_g.serialized_size() + + self.h.serialized_size() + + self.beta_h.serialized_size() + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize_uncompressed(&mut writer)?; + self.gamma_g.serialize_uncompressed(&mut writer)?; + self.h.serialize_uncompressed(&mut writer)?; + self.beta_h.serialize_uncompressed(&mut writer) + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize_unchecked(&mut writer)?; + self.gamma_g.serialize_unchecked(&mut writer)?; + self.h.serialize_unchecked(&mut writer)?; + self.beta_h.serialize_unchecked(&mut writer) + } + + fn uncompressed_size(&self) -> usize { + self.g.uncompressed_size() + + self.gamma_g.uncompressed_size() + + self.h.uncompressed_size() + + self.beta_h.uncompressed_size() + } +} + +impl CanonicalDeserialize for VerifierKey { + fn deserialize(mut reader: R) -> Result { + let g = E::G1Affine::deserialize(&mut reader)?; + let gamma_g = E::G1Affine::deserialize(&mut reader)?; + let h = E::G2Affine::deserialize(&mut reader)?; + let beta_h = E::G2Affine::deserialize(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + }) + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let g = E::G1Affine::deserialize_uncompressed(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_uncompressed(&mut reader)?; + let h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + let beta_h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + }) + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let g = E::G1Affine::deserialize_unchecked(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_unchecked(&mut reader)?; + let h = E::G2Affine::deserialize_unchecked(&mut reader)?; + let beta_h = E::G2Affine::deserialize_unchecked(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + }) + } +} + +impl ToBytes for VerifierKey { + #[inline] + fn write(&self, mut writer: W) -> ark_std::io::Result<()> { + self.g.write(&mut writer)?; + self.gamma_g.write(&mut writer)?; + self.h.write(&mut writer)?; + self.beta_h.write(&mut writer)?; + self.prepared_h.write(&mut writer)?; + self.prepared_beta_h.write(&mut writer) + } +} + +impl ToConstraintField<::BasePrimeField> for VerifierKey +where + E::G1Affine: ToConstraintField<::BasePrimeField>, + E::G2Affine: ToConstraintField<::BasePrimeField>, +{ + fn to_field_elements(&self) -> Option::BasePrimeField>> { + let mut res = Vec::new(); + + res.extend_from_slice(&self.g.to_field_elements().unwrap()); + res.extend_from_slice(&self.gamma_g.to_field_elements().unwrap()); + res.extend_from_slice(&self.h.to_field_elements().unwrap()); + res.extend_from_slice(&self.beta_h.to_field_elements().unwrap()); + + Some(res) + } +} + +/// `PreparedVerifierKey` is the fully prepared version for checking evaluation proofs for a given commitment. +/// We omit gamma here for simplicity. +#[derive(Derivative)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct PreparedVerifierKey { + /// The generator of G1, prepared for power series. + pub prepared_g: Vec, + /// The generator of G2, prepared for use in pairings. + pub prepared_h: E::G2Prepared, + /// \beta times the above generator of G2, prepared for use in pairings. + pub prepared_beta_h: E::G2Prepared, +} + +impl PreparedVerifierKey { + /// prepare `PreparedVerifierKey` from `VerifierKey` + pub fn prepare(vk: &VerifierKey) -> Self { + let supported_bits = E::Fr::size_in_bits(); + + let mut prepared_g = Vec::::new(); + let mut g = E::G1Projective::from(vk.g.clone()); + for _ in 0..supported_bits { + prepared_g.push(g.clone().into()); + g.double_in_place(); + } + + Self { + prepared_g, + prepared_h: vk.prepared_h.clone(), + prepared_beta_h: vk.prepared_beta_h.clone(), + } + } +} + +/// `Commitment` commits to a polynomial. It is output by `KZG10::commit`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Copy(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Commitment( + /// The commitment is a group element. + pub E::G1Affine, +); + +impl ToBytes for Commitment { + #[inline] + fn write(&self, writer: W) -> ark_std::io::Result<()> { + self.0.write(writer) + } +} + +impl PCCommitment for Commitment { + #[inline] + fn empty() -> Self { + Commitment(E::G1Affine::zero()) + } + + fn has_degree_bound(&self) -> bool { + false + } + + fn size_in_bytes(&self) -> usize { + ark_ff::to_bytes![E::G1Affine::zero()].unwrap().len() / 2 + } +} + +impl ToConstraintField<::BasePrimeField> for Commitment +where + E::G1Affine: ToConstraintField<::BasePrimeField>, +{ + fn to_field_elements(&self) -> Option::BasePrimeField>> { + self.0.to_field_elements() + } +} + +impl<'a, E: PairingEngine> AddAssign<(E::Fr, &'a Commitment)> for Commitment { + #[inline] + fn add_assign(&mut self, (f, other): (E::Fr, &'a Commitment)) { + let mut other = other.0.mul(f.into_repr()); + other.add_assign_mixed(&self.0); + self.0 = other.into(); + } +} + +/// `PreparedCommitment` commits to a polynomial and prepares for mul_bits. +#[derive(Derivative)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct PreparedCommitment( + /// The commitment is a group element. + pub Vec, +); + +impl PreparedCommitment { + /// prepare `PreparedCommitment` from `Commitment` + pub fn prepare(comm: &Commitment) -> Self { + let mut prepared_comm = Vec::::new(); + let mut cur = E::G1Projective::from(comm.0.clone()); + + let supported_bits = E::Fr::size_in_bits(); + + for _ in 0..supported_bits { + prepared_comm.push(cur.clone().into()); + cur.double_in_place(); + } + + Self { 0: prepared_comm } + } +} + +/// `Randomness` hides the polynomial inside a commitment. It is output by `KZG10::commit`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Randomness> { + /// For KZG10, the commitment randomness is a random polynomial. + pub blinding_polynomial: P, + _field: PhantomData, +} + +impl> Randomness { + /// Does `self` provide any hiding properties to the corresponding commitment? + /// `self.is_hiding() == true` only if the underlying polynomial is non-zero. + #[inline] + pub fn is_hiding(&self) -> bool { + !self.blinding_polynomial.is_zero() + } + + /// What is the degree of the hiding polynomial for a given hiding bound? + #[inline] + pub fn calculate_hiding_polynomial_degree(hiding_bound: usize) -> usize { + hiding_bound + 1 + } +} + +impl> PCRandomness for Randomness { + fn empty() -> Self { + Self { + blinding_polynomial: P::zero(), + _field: PhantomData, + } + } + + fn rand(hiding_bound: usize, _: bool, _: Option, rng: &mut R) -> Self { + let mut randomness = Randomness::empty(); + let hiding_poly_degree = Self::calculate_hiding_polynomial_degree(hiding_bound); + randomness.blinding_polynomial = P::rand(hiding_poly_degree, rng); + randomness + } +} + +impl<'a, F: PrimeField, P: UVPolynomial> Add<&'a Randomness> for Randomness { + type Output = Self; + + #[inline] + fn add(mut self, other: &'a Self) -> Self { + self.blinding_polynomial += &other.blinding_polynomial; + self + } +} + +impl<'a, F: PrimeField, P: UVPolynomial> Add<(F, &'a Randomness)> for Randomness { + type Output = Self; + + #[inline] + fn add(mut self, other: (F, &'a Randomness)) -> Self { + self += other; + self + } +} + +impl<'a, F: PrimeField, P: UVPolynomial> AddAssign<&'a Randomness> for Randomness { + #[inline] + fn add_assign(&mut self, other: &'a Self) { + self.blinding_polynomial += &other.blinding_polynomial; + } +} + +impl<'a, F: PrimeField, P: UVPolynomial> AddAssign<(F, &'a Randomness)> + for Randomness +{ + #[inline] + fn add_assign(&mut self, (f, other): (F, &'a Randomness)) { + self.blinding_polynomial += (f, &other.blinding_polynomial); + } +} + +/// `Proof` is an evaluation proof that is output by `KZG10::open`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Copy(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Proof { + /// This is a commitment to the witness polynomial; see [KZG10] for more details. + pub w: E::G1Affine, + /// This is the evaluation of the random polynomial at the point for which + /// the evaluation proof was produced. + pub random_v: Option, +} + +impl PCProof for Proof { + fn size_in_bytes(&self) -> usize { + let hiding_size = if self.random_v.is_some() { + ark_ff::to_bytes![E::Fr::zero()].unwrap().len() + } else { + 0 + }; + ark_ff::to_bytes![E::G1Affine::zero()].unwrap().len() / 2 + hiding_size + } +} + +impl ToBytes for Proof { + #[inline] + fn write(&self, mut writer: W) -> ark_std::io::Result<()> { + self.w.write(&mut writer)?; + self.random_v + .as_ref() + .unwrap_or(&E::Fr::zero()) + .write(&mut writer) + } +} diff --git a/arkworks/poly-commit/src/kzg10/mod.rs b/arkworks/poly-commit/src/kzg10/mod.rs new file mode 100644 index 00000000..c1facc82 --- /dev/null +++ b/arkworks/poly-commit/src/kzg10/mod.rs @@ -0,0 +1,657 @@ +//! Here we construct a polynomial commitment that enables users to commit to a +//! single polynomial `p`, and then later provide an evaluation proof that +//! convinces verifiers that a claimed value `v` is the true evaluation of `p` +//! at a chosen point `x`. Our construction follows the template of the construction +//! proposed by Kate, Zaverucha, and Goldberg ([KZG11](http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf)). +//! This construction achieves extractability in the algebraic group model (AGM). + +use crate::{BTreeMap, Error, LabeledPolynomial, PCRandomness, ToString, Vec}; +use ark_ec::msm::{FixedBaseMSM, VariableBaseMSM}; +use ark_ec::{group::Group, AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{One, PrimeField, UniformRand, Zero}; +use ark_poly::UVPolynomial; +use ark_std::{format, marker::PhantomData, ops::Div, vec}; + +use ark_std::rand::RngCore; +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +mod data_structures; +pub use data_structures::*; + +/// `KZG10` is an implementation of the polynomial commitment scheme of +/// [Kate, Zaverucha and Goldbgerg][kzg10] +/// +/// [kzg10]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf +pub struct KZG10> { + _engine: PhantomData, + _poly: PhantomData

, +} + +impl KZG10 +where + E: PairingEngine, + P: UVPolynomial, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + /// Constructs public parameters when given as input the maximum degree `degree` + /// for the polynomial commitment scheme. + pub fn setup( + max_degree: usize, + produce_g2_powers: bool, + rng: &mut R, + ) -> Result, Error> { + if max_degree < 1 { + return Err(Error::DegreeIsZero); + } + let setup_time = start_timer!(|| format!("KZG10::Setup with degree {}", max_degree)); + let beta = E::Fr::rand(rng); + let g = E::G1Projective::rand(rng); + let gamma_g = E::G1Projective::rand(rng); + let h = E::G2Projective::rand(rng); + + let mut powers_of_beta = vec![E::Fr::one()]; + + let mut cur = beta; + for _ in 0..max_degree { + powers_of_beta.push(cur); + cur *= β + } + + let window_size = FixedBaseMSM::get_mul_window_size(max_degree + 1); + + let scalar_bits = E::Fr::size_in_bits(); + let g_time = start_timer!(|| "Generating powers of G"); + let g_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, g); + let powers_of_g = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + window_size, + &g_table, + &powers_of_beta, + ); + end_timer!(g_time); + let gamma_g_time = start_timer!(|| "Generating powers of gamma * G"); + let gamma_g_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, gamma_g); + let mut powers_of_gamma_g = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + window_size, + &gamma_g_table, + &powers_of_beta, + ); + // Add an additional power of gamma_g, because we want to be able to support + // up to D queries. + powers_of_gamma_g.push(powers_of_gamma_g.last().unwrap().mul(&beta)); + end_timer!(gamma_g_time); + + let powers_of_g = E::G1Projective::batch_normalization_into_affine(&powers_of_g); + let powers_of_gamma_g = + E::G1Projective::batch_normalization_into_affine(&powers_of_gamma_g) + .into_iter() + .enumerate() + .collect(); + + let neg_powers_of_h_time = start_timer!(|| "Generating negative powers of h in G2"); + let neg_powers_of_h = if produce_g2_powers { + let mut neg_powers_of_beta = vec![E::Fr::one()]; + let mut cur = E::Fr::one() / β + for _ in 0..max_degree { + neg_powers_of_beta.push(cur); + cur /= β + } + + let neg_h_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, h); + let neg_powers_of_h = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + window_size, + &neg_h_table, + &neg_powers_of_beta, + ); + + let affines = E::G2Projective::batch_normalization_into_affine(&neg_powers_of_h); + let mut affines_map = BTreeMap::new(); + affines.into_iter().enumerate().for_each(|(i, a)| { + affines_map.insert(i, a); + }); + affines_map + } else { + BTreeMap::new() + }; + + end_timer!(neg_powers_of_h_time); + + let h = h.into_affine(); + let beta_h = h.scalar_mul(beta).into_affine(); + let prepared_h = h.into(); + let prepared_beta_h = beta_h.into(); + + let pp = UniversalParams { + powers_of_g, + powers_of_gamma_g, + h, + beta_h, + neg_powers_of_h, + prepared_h, + prepared_beta_h, + }; + end_timer!(setup_time); + Ok(pp) + } + + /// Outputs a commitment to `polynomial`. + pub fn commit( + powers: &Powers, + polynomial: &P, + hiding_bound: Option, + rng: Option<&mut dyn RngCore>, + ) -> Result<(Commitment, Randomness), Error> { + Self::check_degree_is_too_large(polynomial.degree(), powers.size())?; + + let commit_time = start_timer!(|| format!( + "Committing to polynomial of degree {} with hiding_bound: {:?}", + polynomial.degree(), + hiding_bound, + )); + + let (num_leading_zeros, plain_coeffs) = + skip_leading_zeros_and_convert_to_bigints(polynomial); + + let msm_time = start_timer!(|| "MSM to compute commitment to plaintext poly"); + let mut commitment = VariableBaseMSM::multi_scalar_mul( + &powers.powers_of_g[num_leading_zeros..], + &plain_coeffs, + ); + end_timer!(msm_time); + + let mut randomness = Randomness::::empty(); + if let Some(hiding_degree) = hiding_bound { + let mut rng = rng.ok_or(Error::MissingRng)?; + let sample_random_poly_time = start_timer!(|| format!( + "Sampling a random polynomial of degree {}", + hiding_degree + )); + + randomness = + as PCRandomness>::rand(hiding_degree, false, None, &mut rng); + Self::check_hiding_bound( + randomness.blinding_polynomial.degree(), + powers.powers_of_gamma_g.len(), + )?; + end_timer!(sample_random_poly_time); + } + + let random_ints = convert_to_bigints(&randomness.blinding_polynomial.coeffs()); + let msm_time = start_timer!(|| "MSM to compute commitment to random poly"); + let random_commitment = + VariableBaseMSM::multi_scalar_mul(&powers.powers_of_gamma_g, random_ints.as_slice()) + .into_affine(); + end_timer!(msm_time); + + commitment.add_assign_mixed(&random_commitment); + + end_timer!(commit_time); + Ok((Commitment(commitment.into()), randomness)) + } + + /// Compute witness polynomial. + /// + /// The witness polynomial w(x) the quotient of the division (p(x) - p(z)) / (x - z) + /// Observe that this quotient does not change with z because + /// p(z) is the remainder term. We can therefore omit p(z) when computing the quotient. + pub fn compute_witness_polynomial( + p: &P, + point: P::Point, + randomness: &Randomness, + ) -> Result<(P, Option

), Error> { + let divisor = P::from_coefficients_vec(vec![-point, E::Fr::one()]); + + let witness_time = start_timer!(|| "Computing witness polynomial"); + let witness_polynomial = p / &divisor; + end_timer!(witness_time); + + let random_witness_polynomial = if randomness.is_hiding() { + let random_p = &randomness.blinding_polynomial; + + let witness_time = start_timer!(|| "Computing random witness polynomial"); + let random_witness_polynomial = random_p / &divisor; + end_timer!(witness_time); + Some(random_witness_polynomial) + } else { + None + }; + + Ok((witness_polynomial, random_witness_polynomial)) + } + + pub(crate) fn open_with_witness_polynomial<'a>( + powers: &Powers, + point: P::Point, + randomness: &Randomness, + witness_polynomial: &P, + hiding_witness_polynomial: Option<&P>, + ) -> Result, Error> { + Self::check_degree_is_too_large(witness_polynomial.degree(), powers.size())?; + let (num_leading_zeros, witness_coeffs) = + skip_leading_zeros_and_convert_to_bigints(witness_polynomial); + + let witness_comm_time = start_timer!(|| "Computing commitment to witness polynomial"); + let mut w = VariableBaseMSM::multi_scalar_mul( + &powers.powers_of_g[num_leading_zeros..], + &witness_coeffs, + ); + end_timer!(witness_comm_time); + + let random_v = if let Some(hiding_witness_polynomial) = hiding_witness_polynomial { + let blinding_p = &randomness.blinding_polynomial; + let blinding_eval_time = start_timer!(|| "Evaluating random polynomial"); + let blinding_evaluation = blinding_p.evaluate(&point); + end_timer!(blinding_eval_time); + + let random_witness_coeffs = convert_to_bigints(&hiding_witness_polynomial.coeffs()); + let witness_comm_time = + start_timer!(|| "Computing commitment to random witness polynomial"); + w += &VariableBaseMSM::multi_scalar_mul( + &powers.powers_of_gamma_g, + &random_witness_coeffs, + ); + end_timer!(witness_comm_time); + Some(blinding_evaluation) + } else { + None + }; + + Ok(Proof { + w: w.into_affine(), + random_v, + }) + } + + /// On input a polynomial `p` and a point `point`, outputs a proof for the same. + pub(crate) fn open<'a>( + powers: &Powers, + p: &P, + point: P::Point, + rand: &Randomness, + ) -> Result, Error> { + Self::check_degree_is_too_large(p.degree(), powers.size())?; + let open_time = start_timer!(|| format!("Opening polynomial of degree {}", p.degree())); + + let witness_time = start_timer!(|| "Computing witness polynomials"); + let (witness_poly, hiding_witness_poly) = Self::compute_witness_polynomial(p, point, rand)?; + end_timer!(witness_time); + + let proof = Self::open_with_witness_polynomial( + powers, + point, + rand, + &witness_poly, + hiding_witness_poly.as_ref(), + ); + + end_timer!(open_time); + proof + } + + /// Verifies that `value` is the evaluation at `point` of the polynomial + /// committed inside `comm`. + pub fn check( + vk: &VerifierKey, + comm: &Commitment, + point: E::Fr, + value: E::Fr, + proof: &Proof, + ) -> Result { + let check_time = start_timer!(|| "Checking evaluation"); + let mut inner = comm.0.into_projective() - &vk.g.scalar_mul(value); + if let Some(random_v) = proof.random_v { + inner -= &vk.gamma_g.scalar_mul(random_v); + } + let lhs = E::pairing(inner, vk.h); + + let inner = vk.beta_h.into_projective() - &vk.h.scalar_mul(point); + let rhs = E::pairing(proof.w, inner); + + end_timer!(check_time, || format!("Result: {}", lhs == rhs)); + Ok(lhs == rhs) + } + + /// Check that each `proof_i` in `proofs` is a valid proof of evaluation for + /// `commitment_i` at `point_i`. + pub fn batch_check( + vk: &VerifierKey, + commitments: &[Commitment], + points: &[E::Fr], + values: &[E::Fr], + proofs: &[Proof], + rng: &mut R, + ) -> Result { + let check_time = + start_timer!(|| format!("Checking {} evaluation proofs", commitments.len())); + + let mut total_c = ::zero(); + let mut total_w = ::zero(); + + let combination_time = start_timer!(|| "Combining commitments and proofs"); + let mut randomizer = E::Fr::one(); + // Instead of multiplying g and gamma_g in each turn, we simply accumulate + // their coefficients and perform a final multiplication at the end. + let mut g_multiplier = E::Fr::zero(); + let mut gamma_g_multiplier = E::Fr::zero(); + for (((c, z), v), proof) in commitments.iter().zip(points).zip(values).zip(proofs) { + let w = proof.w; + let mut temp = w.scalar_mul(*z); + temp.add_assign_mixed(&c.0); + let c = temp; + g_multiplier += &(randomizer * v); + if let Some(random_v) = proof.random_v { + gamma_g_multiplier += &(randomizer * &random_v); + } + total_c += &c.mul(randomizer.into_repr()); + total_w += &w.mul(randomizer.into_repr()); + // We don't need to sample randomizers from the full field, + // only from 128-bit strings. + randomizer = u128::rand(rng).into(); + } + total_c -= &vk.g.scalar_mul(g_multiplier); + total_c -= &vk.gamma_g.scalar_mul(gamma_g_multiplier); + end_timer!(combination_time); + + let to_affine_time = start_timer!(|| "Converting results to affine for pairing"); + let affine_points = E::G1Projective::batch_normalization_into_affine(&[-total_w, total_c]); + let (total_w, total_c) = (affine_points[0], affine_points[1]); + end_timer!(to_affine_time); + + let pairing_time = start_timer!(|| "Performing product of pairings"); + let result = E::product_of_pairings(&[ + (total_w.into(), vk.prepared_beta_h.clone()), + (total_c.into(), vk.prepared_h.clone()), + ]) + .is_one(); + end_timer!(pairing_time); + end_timer!(check_time, || format!("Result: {}", result)); + Ok(result) + } + + pub(crate) fn check_degree_is_too_large(degree: usize, num_powers: usize) -> Result<(), Error> { + let num_coefficients = degree + 1; + if num_coefficients > num_powers { + Err(Error::TooManyCoefficients { + num_coefficients, + num_powers, + }) + } else { + Ok(()) + } + } + + pub(crate) fn check_hiding_bound( + hiding_poly_degree: usize, + num_powers: usize, + ) -> Result<(), Error> { + if hiding_poly_degree == 0 { + Err(Error::HidingBoundIsZero) + } else if hiding_poly_degree >= num_powers { + // The above check uses `>=` because committing to a hiding poly with + // degree `hiding_poly_degree` requires `hiding_poly_degree + 1` + // powers. + Err(Error::HidingBoundToolarge { + hiding_poly_degree, + num_powers, + }) + } else { + Ok(()) + } + } + + pub(crate) fn check_degrees_and_bounds<'a>( + supported_degree: usize, + max_degree: usize, + enforced_degree_bounds: Option<&[usize]>, + p: &'a LabeledPolynomial, + ) -> Result<(), Error> { + if let Some(bound) = p.degree_bound() { + let enforced_degree_bounds = + enforced_degree_bounds.ok_or(Error::UnsupportedDegreeBound(bound))?; + + if enforced_degree_bounds.binary_search(&bound).is_err() { + Err(Error::UnsupportedDegreeBound(bound)) + } else if bound < p.degree() || bound > max_degree { + return Err(Error::IncorrectDegreeBound { + poly_degree: p.degree(), + degree_bound: p.degree_bound().unwrap(), + supported_degree, + label: p.label().to_string(), + }); + } else { + Ok(()) + } + } else { + Ok(()) + } + } +} + +fn skip_leading_zeros_and_convert_to_bigints>( + p: &P, +) -> (usize, Vec) { + let mut num_leading_zeros = 0; + while num_leading_zeros < p.coeffs().len() && p.coeffs()[num_leading_zeros].is_zero() { + num_leading_zeros += 1; + } + let coeffs = convert_to_bigints(&p.coeffs()[num_leading_zeros..]); + (num_leading_zeros, coeffs) +} + +fn convert_to_bigints(p: &[F]) -> Vec { + let to_bigint_time = start_timer!(|| "Converting polynomial coeffs to bigints"); + let coeffs = ark_std::cfg_iter!(p) + .map(|s| s.into_repr()) + .collect::>(); + end_timer!(to_bigint_time); + coeffs +} + +#[cfg(test)] +mod tests { + #![allow(non_camel_case_types)] + use crate::kzg10::*; + use crate::*; + + use ark_bls12_377::Bls12_377; + use ark_bls12_381::Bls12_381; + use ark_bls12_381::Fr; + use ark_ec::PairingEngine; + use ark_poly::univariate::DensePolynomial as DensePoly; + use ark_std::test_rng; + + type UniPoly_381 = DensePoly<::Fr>; + type UniPoly_377 = DensePoly<::Fr>; + type KZG_Bls12_381 = KZG10; + + impl> KZG10 { + /// Specializes the public parameters for a given maximum degree `d` for polynomials + /// `d` should be less that `pp.max_degree()`. + pub(crate) fn trim( + pp: &UniversalParams, + mut supported_degree: usize, + ) -> Result<(Powers, VerifierKey), Error> { + if supported_degree == 1 { + supported_degree += 1; + } + let powers_of_g = pp.powers_of_g[..=supported_degree].to_vec(); + let powers_of_gamma_g = (0..=supported_degree) + .map(|i| pp.powers_of_gamma_g[&i]) + .collect(); + + let powers = Powers { + powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g), + powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g), + }; + let vk = VerifierKey { + g: pp.powers_of_g[0], + gamma_g: pp.powers_of_gamma_g[&0], + h: pp.h, + beta_h: pp.beta_h, + prepared_h: pp.prepared_h.clone(), + prepared_beta_h: pp.prepared_beta_h.clone(), + }; + Ok((powers, vk)) + } + } + + #[test] + fn add_commitments_test() { + let rng = &mut test_rng(); + let p = DensePoly::from_coefficients_slice(&[ + Fr::rand(rng), + Fr::rand(rng), + Fr::rand(rng), + Fr::rand(rng), + Fr::rand(rng), + ]); + let f = Fr::rand(rng); + let mut f_p = DensePoly::zero(); + f_p += (f, &p); + + let degree = 4; + let pp = KZG_Bls12_381::setup(degree, false, rng).unwrap(); + let (powers, _) = KZG_Bls12_381::trim(&pp, degree).unwrap(); + + let hiding_bound = None; + let (comm, _) = KZG10::commit(&powers, &p, hiding_bound, Some(rng)).unwrap(); + let (f_comm, _) = KZG10::commit(&powers, &f_p, hiding_bound, Some(rng)).unwrap(); + let mut f_comm_2 = Commitment::empty(); + f_comm_2 += (f, &comm); + + assert_eq!(f_comm, f_comm_2); + } + + fn end_to_end_test_template() -> Result<(), Error> + where + E: PairingEngine, + P: UVPolynomial, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let rng = &mut test_rng(); + for _ in 0..100 { + let mut degree = 0; + while degree <= 1 { + degree = usize::rand(rng) % 20; + } + let pp = KZG10::::setup(degree, false, rng)?; + let (ck, vk) = KZG10::::trim(&pp, degree)?; + let p = P::rand(degree, rng); + let hiding_bound = Some(1); + let (comm, rand) = KZG10::::commit(&ck, &p, hiding_bound, Some(rng))?; + let point = E::Fr::rand(rng); + let value = p.evaluate(&point); + let proof = KZG10::::open(&ck, &p, point, &rand)?; + assert!( + KZG10::::check(&vk, &comm, point, value, &proof)?, + "proof was incorrect for max_degree = {}, polynomial_degree = {}, hiding_bound = {:?}", + degree, + p.degree(), + hiding_bound, + ); + } + Ok(()) + } + + fn linear_polynomial_test_template() -> Result<(), Error> + where + E: PairingEngine, + P: UVPolynomial, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let rng = &mut test_rng(); + for _ in 0..100 { + let degree = 50; + let pp = KZG10::::setup(degree, false, rng)?; + let (ck, vk) = KZG10::::trim(&pp, 2)?; + let p = P::rand(1, rng); + let hiding_bound = Some(1); + let (comm, rand) = KZG10::::commit(&ck, &p, hiding_bound, Some(rng))?; + let point = E::Fr::rand(rng); + let value = p.evaluate(&point); + let proof = KZG10::::open(&ck, &p, point, &rand)?; + assert!( + KZG10::::check(&vk, &comm, point, value, &proof)?, + "proof was incorrect for max_degree = {}, polynomial_degree = {}, hiding_bound = {:?}", + degree, + p.degree(), + hiding_bound, + ); + } + Ok(()) + } + + fn batch_check_test_template() -> Result<(), Error> + where + E: PairingEngine, + P: UVPolynomial, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let rng = &mut test_rng(); + for _ in 0..10 { + let mut degree = 0; + while degree <= 1 { + degree = usize::rand(rng) % 20; + } + let pp = KZG10::::setup(degree, false, rng)?; + let (ck, vk) = KZG10::::trim(&pp, degree)?; + let mut comms = Vec::new(); + let mut values = Vec::new(); + let mut points = Vec::new(); + let mut proofs = Vec::new(); + for _ in 0..10 { + let p = P::rand(degree, rng); + let hiding_bound = Some(1); + let (comm, rand) = KZG10::::commit(&ck, &p, hiding_bound, Some(rng))?; + let point = E::Fr::rand(rng); + let value = p.evaluate(&point); + let proof = KZG10::::open(&ck, &p, point, &rand)?; + + assert!(KZG10::::check(&vk, &comm, point, value, &proof)?); + comms.push(comm); + values.push(value); + points.push(point); + proofs.push(proof); + } + assert!(KZG10::::batch_check( + &vk, &comms, &points, &values, &proofs, rng + )?); + } + Ok(()) + } + + #[test] + fn end_to_end_test() { + end_to_end_test_template::().expect("test failed for bls12-377"); + end_to_end_test_template::().expect("test failed for bls12-381"); + } + + #[test] + fn linear_polynomial_test() { + linear_polynomial_test_template::() + .expect("test failed for bls12-377"); + linear_polynomial_test_template::() + .expect("test failed for bls12-381"); + } + #[test] + fn batch_check_test() { + batch_check_test_template::().expect("test failed for bls12-377"); + batch_check_test_template::().expect("test failed for bls12-381"); + } + + #[test] + fn test_degree_is_too_large() { + let rng = &mut test_rng(); + + let max_degree = 123; + let pp = KZG_Bls12_381::setup(max_degree, false, rng).unwrap(); + let (powers, _) = KZG_Bls12_381::trim(&pp, max_degree).unwrap(); + + let p = DensePoly::::rand(max_degree + 1, rng); + assert!(p.degree() > max_degree); + assert!(KZG_Bls12_381::check_degree_is_too_large(p.degree(), powers.size()).is_err()); + } +} diff --git a/arkworks/poly-commit/src/lib.rs b/arkworks/poly-commit/src/lib.rs new file mode 100644 index 00000000..bb6ef5b9 --- /dev/null +++ b/arkworks/poly-commit/src/lib.rs @@ -0,0 +1,1379 @@ +#![cfg_attr(not(feature = "std"), no_std)] +//! A crate for polynomial commitment schemes. +#![deny(unused_import_braces, unused_qualifications, trivial_casts)] +#![deny(trivial_numeric_casts, private_in_public, variant_size_differences)] +#![deny(stable_features, unreachable_pub, non_shorthand_field_patterns)] +#![deny(unused_attributes, unused_mut)] +#![deny(missing_docs)] +#![deny(unused_imports)] +#![deny(renamed_and_removed_lints, stable_features, unused_allocation)] +#![deny(unused_comparisons, bare_trait_objects, unused_must_use)] +#![forbid(unsafe_code)] + +#[macro_use] +extern crate derivative; +#[macro_use] +extern crate ark_std; + +use ark_ff::Field; +pub use ark_poly::{Polynomial, UVPolynomial}; +use ark_std::rand::RngCore; + +use ark_std::{ + collections::{BTreeMap, BTreeSet}, + fmt::Debug, + hash::Hash, + iter::FromIterator, + rc::Rc, + string::{String, ToString}, + vec::Vec, +}; + +/// Data structures used by a polynomial commitment scheme. +pub mod data_structures; +pub use data_structures::*; + +/// R1CS constraints for polynomial constraints. +#[cfg(feature = "r1cs")] +mod constraints; +#[cfg(feature = "r1cs")] +pub use constraints::*; + +/// Errors pertaining to query sets. +pub mod error; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +pub use error::*; + +/// Univariate and multivariate polynomial commitment schemes +/// which (optionally) enable hiding commitments by following +/// the approach outlined in [[CHMMVW20, "Marlin"]][marlin]. +/// +/// [marlin]: https://eprint.iacr.org/2019/1047 +pub mod marlin; + +/// A random number generator that bypasses some limitations of the Rust borrow +/// checker. +pub mod optional_rng; + +#[cfg(not(feature = "std"))] +macro_rules! eprintln { + () => {}; + ($($arg: tt)*) => {}; +} +#[cfg(not(feature = "std"))] +macro_rules! println { + () => {}; + ($($arg: tt)*) => {}; +} +/// The core [[KZG10]][kzg] construction. +/// +/// [kzg]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf +pub mod kzg10; + +/// Polynomial commitment scheme from [[KZG10]][kzg] that enforces +/// strict degree bounds and (optionally) enables hiding commitments by +/// following the approach outlined in [[CHMMVW20, "Marlin"]][marlin]. +/// +/// [kzg]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf +/// [marlin]: https://eprint.iacr.org/2019/1047 +pub use marlin::marlin_pc; + +/// Polynomial commitment scheme based on the construction in [[KZG10]][kzg], +/// modified to obtain batching and to enforce strict +/// degree bounds by following the approach outlined in [[MBKM19, +/// “Sonic”]][sonic] (more precisely, via the variant in +/// [[Gabizon19, “AuroraLight”]][al] that avoids negative G1 powers). +/// +/// [kzg]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf +/// [sonic]: https://eprint.iacr.org/2019/099 +/// [al]: https://eprint.iacr.org/2019/601 +/// [marlin]: https://eprint.iacr.org/2019/1047 +pub mod sonic_pc; + +/// A polynomial commitment scheme based on the hardness of the +/// discrete logarithm problem in prime-order groups. +/// The construction is detailed in [[BCMS20]][pcdas]. +/// +/// [pcdas]: https://eprint.iacr.org/2020/499 +pub mod ipa_pc; + +/// A multilinear polynomial commitment scheme that converts n-variate multilinear polynomial into +/// n quotient UV polynomial. This scheme is based on hardness of the discrete logarithm +/// in prime-order groups. Construction is detailed in [[XZZPD19]][xzzpd19] and [[ZGKPP18]][zgkpp18] +/// +/// [xzzpd19]: https://eprint.iacr.org/2019/317 +/// [zgkpp]: https://ieeexplore.ieee.org/document/8418645 +pub mod multilinear_pc; + +/// Multivariate polynomial commitment based on the construction in +/// [[PST13]][pst] with batching and (optional) hiding property inspired +/// by the univariate scheme in [[CHMMVW20, "Marlin"]][marlin] +/// +/// [pst]: https://eprint.iacr.org/2011/587.pdf +/// [marlin]: https://eprint.iacr.org/2019/104 +pub use marlin::marlin_pst13_pc; + +/// `QuerySet` is the set of queries that are to be made to a set of labeled polynomials/equations +/// `p` that have previously been committed to. Each element of a `QuerySet` is a pair of +/// `(label, (point_label, point))`, where `label` is the label of a polynomial in `p`, +/// `point_label` is the label for the point (e.g., "beta"), and and `point` is the location +/// that `p[label]` is to be queried at. +pub type QuerySet = BTreeSet<(String, (String, T))>; + +/// `Evaluations` is the result of querying a set of labeled polynomials or equations +/// `p` at a `QuerySet` `Q`. It maps each element of `Q` to the resulting evaluation. +/// That is, if `(label, query)` is an element of `Q`, then `evaluation.get((label, query))` +/// should equal `p[label].evaluate(query)`. +pub type Evaluations = BTreeMap<(String, T), F>; + +/// Describes the interface for a polynomial commitment scheme that allows +/// a sender to commit to multiple polynomials and later provide a succinct proof +/// of evaluation for the corresponding commitments at a query set `Q`, while +/// enforcing per-polynomial degree bounds. +pub trait PolynomialCommitment>: Sized { + /// The universal parameters for the commitment scheme. These are "trimmed" + /// down to `Self::CommitterKey` and `Self::VerifierKey` by `Self::trim`. + type UniversalParams: PCUniversalParams; + /// The committer key for the scheme; used to commit to a polynomial and then + /// open the commitment to produce an evaluation proof. + type CommitterKey: PCCommitterKey; + /// The verifier key for the scheme; used to check an evaluation proof. + type VerifierKey: PCVerifierKey; + /// The prepared verifier key for the scheme; used to check an evaluation proof. + type PreparedVerifierKey: PCPreparedVerifierKey + Clone; + /// The commitment to a polynomial. + type Commitment: PCCommitment + Default; + /// The prepared commitment to a polynomial. + type PreparedCommitment: PCPreparedCommitment; + /// The commitment randomness. + type Randomness: PCRandomness; + /// The evaluation proof for a single point. + type Proof: PCProof + Clone; + /// The evaluation proof for a query set. + type BatchProof: Clone + + From> + + Into> + + CanonicalSerialize + + CanonicalDeserialize; + /// The error type for the scheme. + type Error: ark_std::error::Error + From; + + /// Constructs public parameters when given as input the maximum degree `degree` + /// for the polynomial commitment scheme. `num_vars` specifies the number of + /// variables for multivariate setup + fn setup( + max_degree: usize, + num_vars: Option, + rng: &mut R, + ) -> Result; + + /// Specializes the public parameters for polynomials up to the given `supported_degree` + /// and for enforcing degree bounds in the range `1..=supported_degree`. + fn trim( + pp: &Self::UniversalParams, + supported_degree: usize, + supported_hiding_bound: usize, + enforced_degree_bounds: Option<&[usize]>, + ) -> Result<(Self::CommitterKey, Self::VerifierKey), Self::Error>; + + /// Outputs a commitments to `polynomials`. If `polynomials[i].is_hiding()`, + /// then the `i`-th commitment is hiding up to `polynomials.hiding_bound()` queries. + /// `rng` should not be `None` if `polynomials[i].is_hiding() == true` for any `i`. + /// + /// If for some `i`, `polynomials[i].is_hiding() == false`, then the + /// corresponding randomness is `Self::Randomness::empty()`. + /// + /// If for some `i`, `polynomials[i].degree_bound().is_some()`, then that + /// polynomial will have the corresponding degree bound enforced. + fn commit<'a>( + ck: &Self::CommitterKey, + polynomials: impl IntoIterator>, + rng: Option<&mut dyn RngCore>, + ) -> Result< + ( + Vec>, + Vec, + ), + Self::Error, + > + where + P: 'a; + + /// On input a list of labeled polynomials and a query point, `open` outputs a proof of evaluation + /// of the polynomials at the query point. + fn open<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + point: &'a P::Point, + opening_challenge: F, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + let opening_challenges = |pow| opening_challenge.pow(&[pow]); + Self::open_individual_opening_challenges( + ck, + labeled_polynomials, + commitments, + point, + &opening_challenges, + rands, + rng, + ) + } + + /// On input a list of labeled polynomials and a query set, `open` outputs a proof of evaluation + /// of the polynomials at the points in the query set. + fn batch_open<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenge: F, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Randomness: 'a, + Self::Commitment: 'a, + P: 'a, + { + let opening_challenges = |pow| opening_challenge.pow(&[pow]); + Self::batch_open_individual_opening_challenges( + ck, + labeled_polynomials, + commitments, + query_set, + &opening_challenges, + rands, + rng, + ) + } + + /// Verifies that `values` are the evaluations at `point` of the polynomials + /// committed inside `commitments`. + fn check<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + values: impl IntoIterator, + proof: &Self::Proof, + opening_challenge: F, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + { + let opening_challenges = |pow| opening_challenge.pow(&[pow]); + Self::check_individual_opening_challenges( + vk, + commitments, + &point, + values, + proof, + &opening_challenges, + rng, + ) + } + + /// Checks that `values` are the true evaluations at `query_set` of the polynomials + /// committed in `labeled_commitments`. + fn batch_check<'a, R: RngCore>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + query_set: &QuerySet, + evaluations: &Evaluations, + proof: &Self::BatchProof, + opening_challenge: F, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let opening_challenges = |pow| opening_challenge.pow(&[pow]); + Self::batch_check_individual_opening_challenges( + vk, + commitments, + query_set, + evaluations, + proof, + &opening_challenges, + rng, + ) + } + + /// On input a list of polynomials, linear combinations of those polynomials, + /// and a query set, `open_combination` outputs a proof of evaluation of + /// the combinations at the points in the query set. + fn open_combinations<'a>( + ck: &Self::CommitterKey, + linear_combinations: impl IntoIterator>, + polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenge: F, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result, Self::Error> + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + let opening_challenges = |pow| opening_challenge.pow(&[pow]); + Self::open_combinations_individual_opening_challenges( + ck, + linear_combinations, + polynomials, + commitments, + query_set, + &opening_challenges, + rands, + rng, + ) + } + + /// Checks that `evaluations` are the true evaluations at `query_set` of the + /// linear combinations of polynomials committed in `commitments`. + fn check_combinations<'a, R: RngCore>( + vk: &Self::VerifierKey, + linear_combinations: impl IntoIterator>, + commitments: impl IntoIterator>, + eqn_query_set: &QuerySet, + eqn_evaluations: &Evaluations, + proof: &BatchLCProof, + opening_challenge: F, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let opening_challenges = |pow| opening_challenge.pow(&[pow]); + Self::check_combinations_individual_opening_challenges( + vk, + linear_combinations, + commitments, + eqn_query_set, + eqn_evaluations, + proof, + &opening_challenges, + rng, + ) + } + + /// open but with individual challenges + fn open_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + point: &'a P::Point, + opening_challenges: &dyn Fn(u64) -> F, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a; + + /// check but with individual challenges + fn check_individual_opening_challenges<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + values: impl IntoIterator, + proof: &Self::Proof, + opening_challenges: &dyn Fn(u64) -> F, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a; + + /// batch_check but with individual challenges + fn batch_check_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + query_set: &QuerySet, + evaluations: &Evaluations, + proof: &Self::BatchProof, + opening_challenges: &dyn Fn(u64) -> F, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let commitments: BTreeMap<_, _> = commitments.into_iter().map(|c| (c.label(), c)).collect(); + let mut query_to_labels_map = BTreeMap::new(); + for (label, (point_label, point)) in query_set.iter() { + let labels = query_to_labels_map + .entry(point_label) + .or_insert((point, BTreeSet::new())); + labels.1.insert(label); + } + + // Implicit assumption: proofs are order in same manner as queries in + // `query_to_labels_map`. + let proofs: Vec<_> = proof.clone().into(); + assert_eq!(proofs.len(), query_to_labels_map.len()); + + let mut result = true; + for ((_point_label, (point, labels)), proof) in query_to_labels_map.into_iter().zip(proofs) + { + let mut comms: Vec<&'_ LabeledCommitment<_>> = Vec::new(); + let mut values = Vec::new(); + for label in labels.into_iter() { + let commitment = commitments.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + let v_i = evaluations.get(&(label.clone(), point.clone())).ok_or( + Error::MissingEvaluation { + label: label.to_string(), + }, + )?; + + comms.push(commitment); + values.push(*v_i); + } + + let proof_time = start_timer!(|| "Checking per-query proof"); + result &= Self::check_individual_opening_challenges( + vk, + comms, + &point, + values, + &proof, + opening_challenges, + Some(rng), + )?; + end_timer!(proof_time); + } + Ok(result) + } + + /// open_combinations but with individual challenges + fn open_combinations_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + linear_combinations: impl IntoIterator>, + polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> F, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result, Self::Error> + where + Self::Randomness: 'a, + Self::Commitment: 'a, + P: 'a, + { + let linear_combinations: Vec<_> = linear_combinations.into_iter().collect(); + let polynomials: Vec<_> = polynomials.into_iter().collect(); + let poly_query_set = + lc_query_set_to_poly_query_set(linear_combinations.iter().copied(), query_set); + let poly_evals = evaluate_query_set(polynomials.iter().copied(), &poly_query_set); + let proof = Self::batch_open_individual_opening_challenges( + ck, + polynomials, + commitments, + &poly_query_set, + opening_challenges, + rands, + rng, + )?; + Ok(BatchLCProof { + proof, + evals: Some(poly_evals.values().copied().collect()), + }) + } + + /// check_combinations with individual challenges + fn check_combinations_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + linear_combinations: impl IntoIterator>, + commitments: impl IntoIterator>, + eqn_query_set: &QuerySet, + eqn_evaluations: &Evaluations, + proof: &BatchLCProof, + opening_challenges: &dyn Fn(u64) -> F, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let BatchLCProof { proof, evals } = proof; + + let lc_s = BTreeMap::from_iter(linear_combinations.into_iter().map(|lc| (lc.label(), lc))); + + let poly_query_set = lc_query_set_to_poly_query_set(lc_s.values().copied(), eqn_query_set); + let poly_evals = Evaluations::from_iter( + poly_query_set + .iter() + .map(|(_, point)| point) + .cloned() + .zip(evals.clone().unwrap()), + ); + + for &(ref lc_label, (_, ref point)) in eqn_query_set { + if let Some(lc) = lc_s.get(lc_label) { + let claimed_rhs = *eqn_evaluations + .get(&(lc_label.clone(), point.clone())) + .ok_or(Error::MissingEvaluation { + label: lc_label.to_string(), + })?; + + let mut actual_rhs = F::zero(); + + for (coeff, label) in lc.iter() { + let eval = match label { + LCTerm::One => F::one(), + LCTerm::PolyLabel(l) => *poly_evals + .get(&(l.clone().into(), point.clone())) + .ok_or(Error::MissingEvaluation { label: l.clone() })?, + }; + + actual_rhs += &(*coeff * eval); + } + if claimed_rhs != actual_rhs { + eprintln!("Claimed evaluation of {} is incorrect", lc.label()); + return Ok(false); + } + } + } + + let pc_result = Self::batch_check_individual_opening_challenges( + vk, + commitments, + &poly_query_set, + &poly_evals, + proof, + opening_challenges, + rng, + )?; + if !pc_result { + eprintln!("Evaluation proofs failed to verify"); + return Ok(false); + } + + Ok(true) + } + + /// batch_open with individual challenges + fn batch_open_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> F, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + let rng = &mut crate::optional_rng::OptionalRng(rng); + let poly_rand_comm: BTreeMap<_, _> = labeled_polynomials + .into_iter() + .zip(rands) + .zip(commitments.into_iter()) + .map(|((poly, r), comm)| (poly.label(), (poly, r, comm))) + .collect(); + + let open_time = start_timer!(|| format!( + "Opening {} polynomials at query set of size {}", + poly_rand_comm.len(), + query_set.len(), + )); + + let mut query_to_labels_map = BTreeMap::new(); + + for (label, (point_label, point)) in query_set.iter() { + let labels = query_to_labels_map + .entry(point_label) + .or_insert((point, BTreeSet::new())); + labels.1.insert(label); + } + + let mut proofs = Vec::new(); + for (_point_label, (point, labels)) in query_to_labels_map.into_iter() { + let mut query_polys: Vec<&'a LabeledPolynomial<_, _>> = Vec::new(); + let mut query_rands: Vec<&'a Self::Randomness> = Vec::new(); + let mut query_comms: Vec<&'a LabeledCommitment> = Vec::new(); + + for label in labels { + let (polynomial, rand, comm) = + poly_rand_comm.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + query_polys.push(polynomial); + query_rands.push(rand); + query_comms.push(comm); + } + + let proof_time = start_timer!(|| "Creating proof"); + let proof = Self::open_individual_opening_challenges( + ck, + query_polys, + query_comms, + &point, + opening_challenges, + query_rands, + Some(rng), + )?; + + end_timer!(proof_time); + + proofs.push(proof); + } + end_timer!(open_time); + + Ok(proofs.into()) + } +} + +/// Evaluate the given polynomials at `query_set`. +pub fn evaluate_query_set<'a, F, P, T>( + polys: impl IntoIterator>, + query_set: &QuerySet, +) -> Evaluations +where + F: Field, + P: 'a + Polynomial, + T: Clone + Debug + Hash + Ord + Sync, +{ + let polys = BTreeMap::from_iter(polys.into_iter().map(|p| (p.label(), p))); + let mut evaluations = Evaluations::new(); + for (label, (_, point)) in query_set { + let poly = polys + .get(label) + .expect("polynomial in evaluated lc is not found"); + let eval = poly.evaluate(&point); + evaluations.insert((label.clone(), point.clone()), eval); + } + evaluations +} + +fn lc_query_set_to_poly_query_set<'a, F: Field, T: Clone + Ord>( + linear_combinations: impl IntoIterator>, + query_set: &QuerySet, +) -> QuerySet { + let mut poly_query_set = QuerySet::::new(); + let lc_s = linear_combinations.into_iter().map(|lc| (lc.label(), lc)); + let linear_combinations = BTreeMap::from_iter(lc_s); + for (lc_label, (point_label, point)) in query_set { + if let Some(lc) = linear_combinations.get(lc_label) { + for (_, poly_label) in lc.iter().filter(|(_, l)| !l.is_one()) { + if let LCTerm::PolyLabel(l) = poly_label { + poly_query_set.insert((l.into(), (point_label.clone(), point.clone()))); + } + } + } + } + poly_query_set +} + +#[cfg(test)] +pub mod tests { + use crate::*; + use ark_ff::Field; + use ark_poly::Polynomial; + use ark_std::rand::{ + distributions::{Distribution, Uniform}, + rngs::StdRng, + Rng, + }; + use ark_std::test_rng; + + struct TestInfo> { + num_iters: usize, + max_degree: Option, + supported_degree: Option, + num_vars: Option, + num_polynomials: usize, + enforce_degree_bounds: bool, + max_num_queries: usize, + num_equations: Option, + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + } + + pub fn bad_degree_bound_test( + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let rng = &mut test_rng(); + let max_degree = 100; + let pp = PC::setup(max_degree, None, rng)?; + for _ in 0..10 { + let supported_degree = Uniform::from(1..=max_degree).sample(rng); + assert!( + max_degree >= supported_degree, + "max_degree < supported_degree" + ); + + let mut labels = Vec::new(); + let mut polynomials = Vec::new(); + let mut degree_bounds = Vec::new(); + + for i in 0..10 { + let label = format!("Test{}", i); + labels.push(label.clone()); + let degree_bound = 1usize; + let hiding_bound = Some(1); + degree_bounds.push(degree_bound); + + polynomials.push(LabeledPolynomial::new( + label, + rand_poly(supported_degree, None, rng), + Some(degree_bound), + hiding_bound, + )); + } + + let supported_hiding_bound = polynomials + .iter() + .map(|p| p.hiding_bound().unwrap_or(0)) + .max() + .unwrap_or(0); + println!("supported degree: {:?}", supported_degree); + println!("supported hiding bound: {:?}", supported_hiding_bound); + let (ck, vk) = PC::trim( + &pp, + supported_degree, + supported_hiding_bound, + Some(degree_bounds.as_slice()), + )?; + println!("Trimmed"); + + let (comms, rands) = PC::commit(&ck, &polynomials, Some(rng))?; + + let mut query_set = QuerySet::new(); + let mut values = Evaluations::new(); + let point = rand_point(None, rng); + for (i, label) in labels.iter().enumerate() { + query_set.insert((label.clone(), (format!("{}", i), point.clone()))); + let value = polynomials[i].evaluate(&point); + values.insert((label.clone(), point.clone()), value); + } + println!("Generated query set"); + + let opening_challenge = F::rand(rng); + let proof = PC::batch_open( + &ck, + &polynomials, + &comms, + &query_set, + opening_challenge, + &rands, + Some(rng), + )?; + let result = PC::batch_check( + &vk, + &comms, + &query_set, + &values, + &proof, + opening_challenge, + rng, + )?; + assert!(result, "proof was incorrect, Query set: {:#?}", query_set); + } + Ok(()) + } + + fn test_template(info: TestInfo) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let TestInfo { + num_iters, + max_degree, + supported_degree, + num_vars, + num_polynomials, + enforce_degree_bounds, + max_num_queries, + num_equations: _, + rand_poly, + rand_point, + } = info; + + let rng = &mut test_rng(); + // If testing multivariate polynomials, make the max degree lower + let max_degree = match num_vars { + Some(_) => max_degree.unwrap_or(Uniform::from(2..=10).sample(rng)), + None => max_degree.unwrap_or(Uniform::from(2..=64).sample(rng)), + }; + let pp = PC::setup(max_degree, num_vars, rng)?; + + for _ in 0..num_iters { + let supported_degree = + supported_degree.unwrap_or(Uniform::from(1..=max_degree).sample(rng)); + assert!( + max_degree >= supported_degree, + "max_degree < supported_degree" + ); + let mut polynomials: Vec> = Vec::new(); + let mut degree_bounds = if enforce_degree_bounds { + Some(Vec::new()) + } else { + None + }; + + let mut labels = Vec::new(); + println!("Sampled supported degree"); + + // Generate polynomials + let num_points_in_query_set = Uniform::from(1..=max_num_queries).sample(rng); + for i in 0..num_polynomials { + let label = format!("Test{}", i); + labels.push(label.clone()); + let degree = Uniform::from(1..=supported_degree).sample(rng); + let degree_bound = if let Some(degree_bounds) = &mut degree_bounds { + let range = Uniform::from(degree..=supported_degree); + let degree_bound = range.sample(rng); + degree_bounds.push(degree_bound); + Some(degree_bound) + } else { + None + }; + + let hiding_bound = if num_points_in_query_set >= degree { + Some(degree) + } else { + Some(num_points_in_query_set) + }; + + polynomials.push(LabeledPolynomial::new( + label, + rand_poly(degree, num_vars, rng).into(), + degree_bound, + hiding_bound, + )) + } + let supported_hiding_bound = polynomials + .iter() + .map(|p| p.hiding_bound().unwrap_or(0)) + .max() + .unwrap_or(0); + println!("supported degree: {:?}", supported_degree); + println!("supported hiding bound: {:?}", supported_hiding_bound); + println!("num_points_in_query_set: {:?}", num_points_in_query_set); + let (ck, vk) = PC::trim( + &pp, + supported_degree, + supported_hiding_bound, + degree_bounds.as_ref().map(|s| s.as_slice()), + )?; + println!("Trimmed"); + + let (comms, rands) = PC::commit(&ck, &polynomials, Some(rng))?; + + // Construct query set + let mut query_set = QuerySet::new(); + let mut values = Evaluations::new(); + for _ in 0..num_points_in_query_set { + let point = rand_point(num_vars, rng); + for (i, label) in labels.iter().enumerate() { + query_set.insert((label.clone(), (format!("{}", i), point.clone()))); + let value = polynomials[i].evaluate(&point); + values.insert((label.clone(), point.clone()), value); + } + } + println!("Generated query set"); + + let opening_challenge = F::rand(rng); + let proof = PC::batch_open( + &ck, + &polynomials, + &comms, + &query_set, + opening_challenge, + &rands, + Some(rng), + )?; + let result = PC::batch_check( + &vk, + &comms, + &query_set, + &values, + &proof, + opening_challenge, + rng, + )?; + if !result { + println!( + "Failed with {} polynomials, num_points_in_query_set: {:?}", + num_polynomials, num_points_in_query_set + ); + println!("Degree of polynomials:",); + for poly in polynomials { + println!("Degree: {:?}", poly.degree()); + } + } + assert!(result, "proof was incorrect, Query set: {:#?}", query_set); + } + Ok(()) + } + + fn equation_test_template(info: TestInfo) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let TestInfo { + num_iters, + max_degree, + supported_degree, + num_vars, + num_polynomials, + enforce_degree_bounds, + max_num_queries, + num_equations, + rand_poly, + rand_point, + } = info; + + let rng = &mut test_rng(); + // If testing multivariate polynomials, make the max degree lower + let max_degree = match num_vars { + Some(_) => max_degree.unwrap_or(Uniform::from(2..=10).sample(rng)), + None => max_degree.unwrap_or(Uniform::from(2..=64).sample(rng)), + }; + let pp = PC::setup(max_degree, num_vars, rng)?; + + for _ in 0..num_iters { + let supported_degree = + supported_degree.unwrap_or(Uniform::from(1..=max_degree).sample(rng)); + assert!( + max_degree >= supported_degree, + "max_degree < supported_degree" + ); + let mut polynomials = Vec::new(); + let mut degree_bounds = if enforce_degree_bounds { + Some(Vec::new()) + } else { + None + }; + + let mut labels = Vec::new(); + println!("Sampled supported degree"); + + // Generate polynomials + let num_points_in_query_set = Uniform::from(1..=max_num_queries).sample(rng); + for i in 0..num_polynomials { + let label = format!("Test{}", i); + labels.push(label.clone()); + let degree = Uniform::from(1..=supported_degree).sample(rng); + let degree_bound = if let Some(degree_bounds) = &mut degree_bounds { + if rng.gen() { + let range = Uniform::from(degree..=supported_degree); + let degree_bound = range.sample(rng); + degree_bounds.push(degree_bound); + Some(degree_bound) + } else { + None + } + } else { + None + }; + + let hiding_bound = if num_points_in_query_set >= degree { + Some(degree) + } else { + Some(num_points_in_query_set) + }; + println!("Hiding bound: {:?}", hiding_bound); + + polynomials.push(LabeledPolynomial::new( + label, + rand_poly(degree, num_vars, rng), + degree_bound, + hiding_bound, + )) + } + println!("supported degree: {:?}", supported_degree); + println!("num_points_in_query_set: {:?}", num_points_in_query_set); + println!("{:?}", degree_bounds); + println!("{}", num_polynomials); + println!("{}", enforce_degree_bounds); + + let (ck, vk) = PC::trim( + &pp, + supported_degree, + supported_degree, + degree_bounds.as_ref().map(|s| s.as_slice()), + )?; + println!("Trimmed"); + + let (comms, rands) = PC::commit(&ck, &polynomials, Some(rng))?; + + // Let's construct our equations + let mut linear_combinations = Vec::new(); + let mut query_set = QuerySet::new(); + let mut values = Evaluations::new(); + for i in 0..num_points_in_query_set { + let point = rand_point(num_vars, rng); + for j in 0..num_equations.unwrap() { + let label = format!("query {} eqn {}", i, j); + let mut lc = LinearCombination::empty(label.clone()); + + let mut value = F::zero(); + let should_have_degree_bounds: bool = rng.gen(); + for (k, label) in labels.iter().enumerate() { + if should_have_degree_bounds { + value += &polynomials[k].evaluate(&point); + lc.push((F::one(), label.to_string().into())); + break; + } else { + let poly = &polynomials[k]; + if poly.degree_bound().is_some() { + continue; + } else { + assert!(poly.degree_bound().is_none()); + let coeff = F::rand(rng); + value += &(coeff * poly.evaluate(&point)); + lc.push((coeff, label.to_string().into())); + } + } + } + values.insert((label.clone(), point.clone()), value); + if !lc.is_empty() { + linear_combinations.push(lc); + // Insert query + query_set.insert((label.clone(), (format!("{}", i), point.clone()))); + } + } + } + if linear_combinations.is_empty() { + continue; + } + println!("Generated query set"); + println!("Linear combinations: {:?}", linear_combinations); + + let opening_challenge = F::rand(rng); + let proof = PC::open_combinations( + &ck, + &linear_combinations, + &polynomials, + &comms, + &query_set, + opening_challenge, + &rands, + Some(rng), + )?; + println!("Generated proof"); + let result = PC::check_combinations( + &vk, + &linear_combinations, + &comms, + &query_set, + &values, + &proof, + opening_challenge, + rng, + )?; + if !result { + println!( + "Failed with {} polynomials, num_points_in_query_set: {:?}", + num_polynomials, num_points_in_query_set + ); + println!("Degree of polynomials:",); + for poly in polynomials { + println!("Degree: {:?}", poly.degree()); + } + } + assert!( + result, + "proof was incorrect, equations: {:#?}", + linear_combinations + ); + } + Ok(()) + } + + pub fn single_poly_test( + num_vars: Option, + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars, + num_polynomials: 1, + enforce_degree_bounds: false, + max_num_queries: 1, + num_equations: None, + rand_poly, + rand_point, + }; + test_template::(info) + } + + pub fn linear_poly_degree_bound_test( + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: Some(2), + supported_degree: Some(1), + num_vars: None, + num_polynomials: 1, + enforce_degree_bounds: true, + max_num_queries: 1, + num_equations: None, + rand_poly, + rand_point, + }; + test_template::(info) + } + + pub fn single_poly_degree_bound_test( + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars: None, + num_polynomials: 1, + enforce_degree_bounds: true, + max_num_queries: 1, + num_equations: None, + rand_poly, + rand_point, + }; + test_template::(info) + } + + pub fn quadratic_poly_degree_bound_multiple_queries_test( + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: Some(3), + supported_degree: Some(2), + num_vars: None, + num_polynomials: 1, + enforce_degree_bounds: true, + max_num_queries: 2, + num_equations: None, + rand_poly, + rand_point, + }; + test_template::(info) + } + + pub fn single_poly_degree_bound_multiple_queries_test( + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars: None, + num_polynomials: 1, + enforce_degree_bounds: true, + max_num_queries: 2, + num_equations: None, + rand_poly, + rand_point, + }; + test_template::(info) + } + + pub fn two_polys_degree_bound_single_query_test( + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars: None, + num_polynomials: 2, + enforce_degree_bounds: true, + max_num_queries: 1, + num_equations: None, + rand_poly, + rand_point, + }; + test_template::(info) + } + + pub fn full_end_to_end_test( + num_vars: Option, + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars, + num_polynomials: 10, + enforce_degree_bounds: true, + max_num_queries: 5, + num_equations: None, + rand_poly, + rand_point, + }; + test_template::(info) + } + + pub fn full_end_to_end_equation_test( + num_vars: Option, + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars, + num_polynomials: 10, + enforce_degree_bounds: true, + max_num_queries: 5, + num_equations: Some(10), + rand_poly, + rand_point, + }; + equation_test_template::(info) + } + + pub fn single_equation_test( + num_vars: Option, + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars, + num_polynomials: 1, + enforce_degree_bounds: false, + max_num_queries: 1, + num_equations: Some(1), + rand_poly, + rand_point, + }; + equation_test_template::(info) + } + + pub fn two_equation_test( + num_vars: Option, + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars, + num_polynomials: 2, + enforce_degree_bounds: false, + max_num_queries: 1, + num_equations: Some(2), + rand_poly, + rand_point, + }; + equation_test_template::(info) + } + + pub fn two_equation_degree_bound_test( + rand_poly: fn(usize, Option, &mut StdRng) -> P, + rand_point: fn(Option, &mut StdRng) -> P::Point, + ) -> Result<(), PC::Error> + where + F: Field, + P: Polynomial, + PC: PolynomialCommitment, + { + let info = TestInfo { + num_iters: 100, + max_degree: None, + supported_degree: None, + num_vars: None, + num_polynomials: 2, + enforce_degree_bounds: true, + max_num_queries: 1, + num_equations: Some(2), + rand_poly, + rand_point, + }; + equation_test_template::(info) + } +} diff --git a/arkworks/poly-commit/src/marlin/marlin_pc/data_structures.rs b/arkworks/poly-commit/src/marlin/marlin_pc/data_structures.rs new file mode 100644 index 00000000..37e075db --- /dev/null +++ b/arkworks/poly-commit/src/marlin/marlin_pc/data_structures.rs @@ -0,0 +1,418 @@ +use crate::{ + PCCommitment, PCCommitterKey, PCPreparedCommitment, PCPreparedVerifierKey, PCRandomness, + PCVerifierKey, UVPolynomial, Vec, +}; +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, PrimeField, ToBytes, ToConstraintField}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::io::{Read, Write}; +use ark_std::ops::{Add, AddAssign}; +use ark_std::rand::RngCore; + +use crate::kzg10; +/// `UniversalParams` are the universal parameters for the KZG10 scheme. +pub type UniversalParams = kzg10::UniversalParams; + +/// `CommitterKey` is used to commit to and create evaluation proofs for a given +/// polynomial. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = "") +)] +pub struct CommitterKey { + /// The key used to commit to polynomials. + pub powers: Vec, + + /// The key used to commit to shifted polynomials. + /// This is `None` if `self` does not support enforcing any degree bounds. + pub shifted_powers: Option>, + + /// The key used to commit to hiding polynomials. + pub powers_of_gamma_g: Vec, + + /// The degree bounds that are supported by `self`. + /// In ascending order from smallest to largest. + /// This is `None` if `self` does not support enforcing any degree bounds. + pub enforced_degree_bounds: Option>, + /// The maximum degree supported by the `UniversalParams` `self` was derived + /// from. + pub max_degree: usize, +} + +impl CommitterKey { + /// Obtain powers for the underlying KZG10 construction + pub fn powers<'a>(&'a self) -> kzg10::Powers<'a, E> { + kzg10::Powers { + powers_of_g: self.powers.as_slice().into(), + powers_of_gamma_g: self.powers_of_gamma_g.as_slice().into(), + } + } + + /// Obtain powers for committing to shifted polynomials. + pub fn shifted_powers<'a>( + &'a self, + degree_bound: impl Into>, + ) -> Option> { + self.shifted_powers.as_ref().map(|shifted_powers| { + let powers_range = if let Some(degree_bound) = degree_bound.into() { + assert!(self + .enforced_degree_bounds + .as_ref() + .unwrap() + .contains(°ree_bound)); + let max_bound = self + .enforced_degree_bounds + .as_ref() + .unwrap() + .last() + .unwrap(); + (max_bound - degree_bound).. + } else { + 0.. + }; + let ck = kzg10::Powers { + powers_of_g: (&shifted_powers[powers_range]).into(), + powers_of_gamma_g: self.powers_of_gamma_g.as_slice().into(), + }; + ck + }) + } +} + +impl PCCommitterKey for CommitterKey { + fn max_degree(&self) -> usize { + self.max_degree + } + + fn supported_degree(&self) -> usize { + self.powers.len() - 1 + } +} + +/// `VerifierKey` is used to check evaluation proofs for a given commitment. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct VerifierKey { + /// The verification key for the underlying KZG10 scheme. + pub vk: kzg10::VerifierKey, + /// Information required to enforce degree bounds. Each pair + /// is of the form `(degree_bound, shifting_advice)`. + /// The vector is sorted in ascending order of `degree_bound`. + /// This is `None` if `self` does not support enforcing any degree bounds. + pub degree_bounds_and_shift_powers: Option>, + /// The maximum degree supported by the `UniversalParams` `self` was derived + /// from. + pub max_degree: usize, + /// The maximum degree supported by the trimmed parameters that `self` is + /// a part of. + pub supported_degree: usize, +} + +impl VerifierKey { + /// Find the appropriate shift for the degree bound. + pub fn get_shift_power(&self, bound: usize) -> Option { + self.degree_bounds_and_shift_powers.as_ref().and_then(|v| { + v.binary_search_by(|(d, _)| d.cmp(&bound)) + .ok() + .map(|i| v[i].1) + }) + } +} + +impl PCVerifierKey for VerifierKey { + fn max_degree(&self) -> usize { + self.max_degree + } + + fn supported_degree(&self) -> usize { + self.supported_degree + } +} + +impl ToBytes for VerifierKey { + #[inline] + fn write(&self, mut writer: W) -> ark_std::io::Result<()> { + self.vk.write(&mut writer)?; + if let Some(degree_bounds_and_shift_powers) = &self.degree_bounds_and_shift_powers { + writer.write_all(°ree_bounds_and_shift_powers.len().to_le_bytes())?; + for (degree_bound, shift_power) in degree_bounds_and_shift_powers { + writer.write_all(°ree_bound.to_le_bytes())?; + shift_power.write(&mut writer)?; + } + } + writer.write_all(&self.supported_degree.to_le_bytes())?; + writer.write_all(&self.max_degree.to_le_bytes()) + } +} + +impl ToConstraintField<::BasePrimeField> for VerifierKey +where + E::G1Affine: ToConstraintField<::BasePrimeField>, + E::G2Affine: ToConstraintField<::BasePrimeField>, +{ + fn to_field_elements(&self) -> Option::BasePrimeField>> { + let mut res = Vec::new(); + res.extend_from_slice(&self.vk.to_field_elements().unwrap()); + + if let Some(degree_bounds_and_shift_powers) = &self.degree_bounds_and_shift_powers { + for (d, shift_power) in degree_bounds_and_shift_powers.iter() { + let d_elem: ::BasePrimeField = (*d as u64).into(); + + res.push(d_elem); + res.extend_from_slice(&shift_power.to_field_elements().unwrap()); + } + } + + Some(res) + } +} + +/// `PreparedVerifierKey` is used to check evaluation proofs for a given commitment. +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""))] +pub struct PreparedVerifierKey { + /// The verification key for the underlying KZG10 scheme. + pub prepared_vk: kzg10::PreparedVerifierKey, + /// Information required to enforce degree bounds. Each pair + /// is of the form `(degree_bound, shifting_advice)`. + /// This is `None` if `self` does not support enforcing any degree bounds. + pub prepared_degree_bounds_and_shift_powers: Option)>>, + /// The maximum degree supported by the `UniversalParams` `self` was derived + /// from. + pub max_degree: usize, + /// The maximum degree supported by the trimmed parameters that `self` is + /// a part of. + pub supported_degree: usize, +} + +impl PCPreparedVerifierKey> for PreparedVerifierKey { + /// prepare `PreparedVerifierKey` from `VerifierKey` + fn prepare(vk: &VerifierKey) -> Self { + let prepared_vk = kzg10::PreparedVerifierKey::::prepare(&vk.vk); + + let supported_bits = E::Fr::size_in_bits(); + + let prepared_degree_bounds_and_shift_powers: Option)>> = + if vk.degree_bounds_and_shift_powers.is_some() { + let mut res = Vec::<(usize, Vec)>::new(); + + let degree_bounds_and_shift_powers = + vk.degree_bounds_and_shift_powers.as_ref().unwrap(); + + for (d, shift_power) in degree_bounds_and_shift_powers { + let mut prepared_shift_power = Vec::::new(); + + let mut cur = E::G1Projective::from(shift_power.clone()); + for _ in 0..supported_bits { + prepared_shift_power.push(cur.clone().into()); + cur.double_in_place(); + } + + res.push((d.clone(), prepared_shift_power)); + } + + Some(res) + } else { + None + }; + + Self { + prepared_vk, + prepared_degree_bounds_and_shift_powers, + max_degree: vk.max_degree, + supported_degree: vk.supported_degree, + } + } +} + +/// Commitment to a polynomial that optionally enforces a degree bound. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Copy(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Commitment { + /// A KZG10 commitment to the polynomial. + pub comm: kzg10::Commitment, + + /// A KZG10 commitment to the shifted polynomial. + /// This is `none` if the committed polynomial does not + /// enforce a strict degree bound. + pub shifted_comm: Option>, +} + +impl ToBytes for Commitment { + #[inline] + fn write(&self, mut writer: W) -> ark_std::io::Result<()> { + self.comm.write(&mut writer)?; + let shifted_exists = self.shifted_comm.is_some(); + shifted_exists.write(&mut writer)?; + self.shifted_comm + .as_ref() + .unwrap_or(&kzg10::Commitment::empty()) + .write(&mut writer) + } +} + +impl ToConstraintField<::BasePrimeField> for Commitment +where + E::G1Affine: ToConstraintField<::BasePrimeField>, +{ + fn to_field_elements(&self) -> Option::BasePrimeField>> { + let mut res = Vec::new(); + res.extend_from_slice(&self.comm.to_field_elements().unwrap()); + + if let Some(shifted_comm) = &self.shifted_comm { + res.extend_from_slice(&shifted_comm.to_field_elements().unwrap()); + } + + Some(res) + } +} + +impl PCCommitment for Commitment { + #[inline] + fn empty() -> Self { + Self { + comm: kzg10::Commitment::empty(), + shifted_comm: Some(kzg10::Commitment::empty()), + } + } + + fn has_degree_bound(&self) -> bool { + self.shifted_comm.is_some() + } + + fn size_in_bytes(&self) -> usize { + self.comm.size_in_bytes() + self.shifted_comm.as_ref().map_or(0, |c| c.size_in_bytes()) + } +} + +/// Prepared commitment to a polynomial that optionally enforces a degree bound. +#[derive(Derivative)] +#[derivative( + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct PreparedCommitment { + pub(crate) prepared_comm: kzg10::PreparedCommitment, + pub(crate) shifted_comm: Option>, +} + +impl PCPreparedCommitment> for PreparedCommitment { + /// Prepare commitment to a polynomial that optionally enforces a degree bound. + fn prepare(comm: &Commitment) -> Self { + let prepared_comm = kzg10::PreparedCommitment::::prepare(&comm.comm); + + let shifted_comm = comm.shifted_comm.clone(); + + Self { + prepared_comm, + shifted_comm, + } + } +} + +/// `Randomness` hides the polynomial inside a commitment. It is output by `KZG10::commit`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Randomness> { + /// Commitment randomness for a KZG10 commitment. + pub rand: kzg10::Randomness, + /// Commitment randomness for a KZG10 commitment to the shifted polynomial. + /// This is `None` if the committed polynomial does not enforce a strict + /// degree bound. + pub shifted_rand: Option>, +} + +impl<'a, F: PrimeField, P: UVPolynomial> Add<&'a Self> for Randomness { + type Output = Self; + + fn add(mut self, other: &'a Self) -> Self { + self += other; + self + } +} + +impl<'a, F: PrimeField, P: UVPolynomial> AddAssign<&'a Self> for Randomness { + #[inline] + fn add_assign(&mut self, other: &'a Self) { + self.rand += &other.rand; + if let Some(r1) = &mut self.shifted_rand { + *r1 += other + .shifted_rand + .as_ref() + .unwrap_or(&kzg10::Randomness::empty()); + } else { + self.shifted_rand = other.shifted_rand.as_ref().map(|r| r.clone()); + } + } +} + +impl<'a, F: PrimeField, P: UVPolynomial> Add<(F, &'a Randomness)> for Randomness { + type Output = Self; + + #[inline] + fn add(mut self, other: (F, &'a Randomness)) -> Self { + self += other; + self + } +} + +impl<'a, F: PrimeField, P: UVPolynomial> AddAssign<(F, &'a Randomness)> + for Randomness +{ + #[inline] + fn add_assign(&mut self, (f, other): (F, &'a Randomness)) { + self.rand += (f, &other.rand); + let empty = kzg10::Randomness::empty(); + if let Some(r1) = &mut self.shifted_rand { + *r1 += (f, other.shifted_rand.as_ref().unwrap_or(&empty)); + } else { + self.shifted_rand = other.shifted_rand.as_ref().map(|r| empty + (f, r)); + } + } +} + +impl> PCRandomness for Randomness { + fn empty() -> Self { + Self { + rand: kzg10::Randomness::empty(), + shifted_rand: None, + } + } + + fn rand( + hiding_bound: usize, + has_degree_bound: bool, + _: Option, + rng: &mut R, + ) -> Self { + let shifted_rand = if has_degree_bound { + Some(kzg10::Randomness::rand(hiding_bound, false, None, rng)) + } else { + None + }; + Self { + rand: kzg10::Randomness::rand(hiding_bound, false, None, rng), + shifted_rand, + } + } +} diff --git a/arkworks/poly-commit/src/marlin/marlin_pc/mod.rs b/arkworks/poly-commit/src/marlin/marlin_pc/mod.rs new file mode 100644 index 00000000..d101e3ae --- /dev/null +++ b/arkworks/poly-commit/src/marlin/marlin_pc/mod.rs @@ -0,0 +1,790 @@ +use crate::{kzg10, marlin::Marlin, PCCommitterKey}; +use crate::{BTreeMap, BTreeSet, ToString, Vec}; +use crate::{BatchLCProof, Error, Evaluations, QuerySet}; +use crate::{LabeledCommitment, LabeledPolynomial, LinearCombination}; +use crate::{PCRandomness, PCUniversalParams, PolynomialCommitment}; +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::Zero; +use ark_poly::UVPolynomial; +use ark_std::rand::RngCore; +use ark_std::{marker::PhantomData, ops::Div, vec}; + +mod data_structures; +pub use data_structures::*; + +/// Polynomial commitment based on [[KZG10]][kzg], with degree enforcement, batching, +/// and (optional) hiding property taken from [[CHMMVW20, “Marlin”]][marlin]. +/// +/// Degree bound enforcement requires that (at least one of) the points at +/// which a committed polynomial is evaluated are from a distribution that is +/// random conditioned on the polynomial. This is because degree bound +/// enforcement relies on checking a polynomial identity at this point. +/// More formally, the points must be sampled from an admissible query sampler, +/// as detailed in [[CHMMVW20]][marlin]. +/// +/// [kzg]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf +/// [marlin]: https://eprint.iacr.org/2019/104 +pub struct MarlinKZG10> { + _engine: PhantomData, + _poly: PhantomData

, +} + +pub(crate) fn shift_polynomial>( + ck: &CommitterKey, + p: &P, + degree_bound: usize, +) -> P { + if p.is_zero() { + P::zero() + } else { + let enforced_degree_bounds = ck + .enforced_degree_bounds + .as_ref() + .expect("Polynomial requires degree bounds, but `ck` does not support any"); + let largest_enforced_degree_bound = enforced_degree_bounds.last().unwrap(); + + let mut shifted_polynomial_coeffs = + vec![E::Fr::zero(); largest_enforced_degree_bound - degree_bound]; + shifted_polynomial_coeffs.extend_from_slice(&p.coeffs()); + P::from_coefficients_vec(shifted_polynomial_coeffs) + } +} + +impl PolynomialCommitment for MarlinKZG10 +where + E: PairingEngine, + P: UVPolynomial, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + type UniversalParams = UniversalParams; + type CommitterKey = CommitterKey; + type VerifierKey = VerifierKey; + type PreparedVerifierKey = PreparedVerifierKey; + type Commitment = Commitment; + type PreparedCommitment = PreparedCommitment; + type Randomness = Randomness; + type Proof = kzg10::Proof; + type BatchProof = Vec; + type Error = Error; + + /// Constructs public parameters when given as input the maximum degree `max_degree` + /// for the polynomial commitment scheme. + fn setup( + max_degree: usize, + _num_vars: Option, + rng: &mut R, + ) -> Result { + kzg10::KZG10::setup(max_degree, false, rng).map_err(Into::into) + } + + fn trim( + pp: &Self::UniversalParams, + supported_degree: usize, + supported_hiding_bound: usize, + enforced_degree_bounds: Option<&[usize]>, + ) -> Result<(Self::CommitterKey, Self::VerifierKey), Self::Error> { + let max_degree = pp.max_degree(); + if supported_degree > max_degree { + return Err(Error::TrimmingDegreeTooLarge); + } + + // Construct the KZG10 committer key for committing to unshifted polynomials. + let ck_time = start_timer!(|| format!( + "Constructing `powers` of size {} for unshifted polys", + supported_degree + )); + let powers = pp.powers_of_g[..=supported_degree].to_vec(); + // We want to support making up to `supported_hiding_bound` queries to committed + // polynomials. + let powers_of_gamma_g = (0..=supported_hiding_bound + 1) + .map(|i| pp.powers_of_gamma_g[&i]) + .collect::>(); + + end_timer!(ck_time); + + // Construct the core KZG10 verifier key. + let vk = kzg10::VerifierKey { + g: pp.powers_of_g[0].clone(), + gamma_g: pp.powers_of_gamma_g[&0], + h: pp.h.clone(), + beta_h: pp.beta_h.clone(), + prepared_h: pp.prepared_h.clone(), + prepared_beta_h: pp.prepared_beta_h.clone(), + }; + + let enforced_degree_bounds = enforced_degree_bounds.map(|v| { + let mut v = v.to_vec(); + v.sort(); + v.dedup(); + v + }); + + // Check whether we have some degree bounds to enforce + let (shifted_powers, degree_bounds_and_shift_powers) = + if let Some(enforced_degree_bounds) = enforced_degree_bounds.as_ref() { + if enforced_degree_bounds.is_empty() { + (None, None) + } else { + let mut sorted_enforced_degree_bounds = enforced_degree_bounds.clone(); + sorted_enforced_degree_bounds.sort(); + + let lowest_shifted_power = max_degree + - sorted_enforced_degree_bounds + .last() + .ok_or(Error::EmptyDegreeBounds)?; + + let shifted_ck_time = start_timer!(|| format!( + "Constructing `shifted_powers` of size {}", + max_degree - lowest_shifted_power + 1 + )); + + let shifted_powers = pp.powers_of_g[lowest_shifted_power..].to_vec(); + end_timer!(shifted_ck_time); + + let degree_bounds_and_shift_powers = enforced_degree_bounds + .iter() + .map(|d| (*d, pp.powers_of_g[max_degree - *d])) + .collect(); + (Some(shifted_powers), Some(degree_bounds_and_shift_powers)) + } + } else { + (None, None) + }; + + let ck = CommitterKey { + powers, + shifted_powers, + powers_of_gamma_g, + enforced_degree_bounds: enforced_degree_bounds, + max_degree, + }; + + let vk = VerifierKey { + vk, + degree_bounds_and_shift_powers, + supported_degree, + max_degree, + }; + Ok((ck, vk)) + } + + /// Outputs a commitment to `polynomial`. + fn commit<'a>( + ck: &Self::CommitterKey, + polynomials: impl IntoIterator>, + rng: Option<&mut dyn RngCore>, + ) -> Result< + ( + Vec>, + Vec, + ), + Self::Error, + > + where + P: 'a, + { + let rng = &mut crate::optional_rng::OptionalRng(rng); + let commit_time = start_timer!(|| "Committing to polynomials"); + + let mut commitments = Vec::new(); + let mut randomness = Vec::new(); + + for p in polynomials { + let label = p.label(); + let degree_bound = p.degree_bound(); + let hiding_bound = p.hiding_bound(); + let polynomial: &P = p.polynomial(); + + let enforced_degree_bounds: Option<&[usize]> = ck + .enforced_degree_bounds + .as_ref() + .map(|bounds| bounds.as_slice()); + kzg10::KZG10::::check_degrees_and_bounds( + ck.supported_degree(), + ck.max_degree, + enforced_degree_bounds, + &p, + )?; + + let commit_time = start_timer!(|| format!( + "Polynomial {} of degree {}, degree bound {:?}, and hiding bound {:?}", + label, + polynomial.degree(), + degree_bound, + hiding_bound, + )); + + let (comm, rand) = + kzg10::KZG10::commit(&ck.powers(), polynomial, hiding_bound, Some(rng))?; + let (shifted_comm, shifted_rand) = if let Some(degree_bound) = degree_bound { + let shifted_powers = ck + .shifted_powers(degree_bound) + .ok_or(Error::UnsupportedDegreeBound(degree_bound))?; + let (shifted_comm, shifted_rand) = + kzg10::KZG10::commit(&shifted_powers, &polynomial, hiding_bound, Some(rng))?; + (Some(shifted_comm), Some(shifted_rand)) + } else { + (None, None) + }; + + let comm = Commitment { comm, shifted_comm }; + let rand = Randomness { rand, shifted_rand }; + commitments.push(LabeledCommitment::new( + label.to_string(), + comm, + degree_bound, + )); + randomness.push(rand); + end_timer!(commit_time); + } + end_timer!(commit_time); + Ok((commitments, randomness)) + } + + /// On input a polynomial `p` and a point `point`, outputs a proof for the same. + fn open_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + _commitments: impl IntoIterator>, + point: &'a P::Point, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + let mut p = P::zero(); + let mut r = kzg10::Randomness::empty(); + let mut shifted_w = P::zero(); + let mut shifted_r = kzg10::Randomness::empty(); + let mut shifted_r_witness = P::zero(); + + let mut enforce_degree_bound = false; + let mut opening_challenge_counter = 0; + for (polynomial, rand) in labeled_polynomials.into_iter().zip(rands) { + let degree_bound = polynomial.degree_bound(); + assert_eq!(degree_bound.is_some(), rand.shifted_rand.is_some()); + + let enforced_degree_bounds: Option<&[usize]> = ck + .enforced_degree_bounds + .as_ref() + .map(|bounds| bounds.as_slice()); + kzg10::KZG10::::check_degrees_and_bounds( + ck.supported_degree(), + ck.max_degree, + enforced_degree_bounds, + &polynomial, + )?; + + // compute challenge^j and challenge^{j+1}. + let challenge_j = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + assert_eq!(degree_bound.is_some(), rand.shifted_rand.is_some()); + + p += (challenge_j, polynomial.polynomial()); + r += (challenge_j, &rand.rand); + + if let Some(degree_bound) = degree_bound { + enforce_degree_bound = true; + let shifted_rand = rand.shifted_rand.as_ref().unwrap(); + let (witness, shifted_rand_witness) = + kzg10::KZG10::::compute_witness_polynomial( + polynomial.polynomial(), + *point, + &shifted_rand, + )?; + let challenge_j_1 = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + let shifted_witness = shift_polynomial(ck, &witness, degree_bound); + + shifted_w += (challenge_j_1, &shifted_witness); + shifted_r += (challenge_j_1, shifted_rand); + if let Some(shifted_rand_witness) = shifted_rand_witness { + shifted_r_witness += (challenge_j_1, &shifted_rand_witness); + } + } + } + let proof_time = start_timer!(|| "Creating proof for unshifted polynomials"); + let proof = kzg10::KZG10::open(&ck.powers(), &p, *point, &r)?; + let mut w = proof.w.into_projective(); + let mut random_v = proof.random_v; + end_timer!(proof_time); + + if enforce_degree_bound { + let proof_time = start_timer!(|| "Creating proof for shifted polynomials"); + let shifted_proof = kzg10::KZG10::open_with_witness_polynomial( + &ck.shifted_powers(None).unwrap(), + *point, + &shifted_r, + &shifted_w, + Some(&shifted_r_witness), + )?; + end_timer!(proof_time); + + w += &shifted_proof.w.into_projective(); + if let Some(shifted_random_v) = shifted_proof.random_v { + random_v = random_v.map(|v| v + &shifted_random_v); + } + } + + Ok(kzg10::Proof { + w: w.into_affine(), + random_v, + }) + } + + /// Verifies that `value` is the evaluation at `x` of the polynomial + /// committed inside `comm`. + fn check_individual_opening_challenges<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + values: impl IntoIterator, + proof: &Self::Proof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + { + let check_time = start_timer!(|| "Checking evaluations"); + let (combined_comm, combined_value) = + Marlin::accumulate_commitments_and_values_individual_opening_challenges( + commitments, + values, + opening_challenges, + Some(vk), + )?; + let combined_comm = kzg10::Commitment(combined_comm.into()); + let result = kzg10::KZG10::check(&vk.vk, &combined_comm, *point, combined_value, proof)?; + end_timer!(check_time); + Ok(result) + } + + fn batch_check_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + query_set: &QuerySet, + values: &Evaluations, + proof: &Self::BatchProof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let (combined_comms, combined_queries, combined_evals) = Marlin::combine_and_normalize( + commitments, + query_set, + values, + opening_challenges, + Some(vk), + )?; + assert_eq!(proof.len(), combined_queries.len()); + let proof_time = start_timer!(|| "Checking KZG10::Proof"); + let result = kzg10::KZG10::batch_check( + &vk.vk, + &combined_comms, + &combined_queries, + &combined_evals, + &proof, + rng, + )?; + end_timer!(proof_time); + Ok(result) + } + + fn open_combinations_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + lc_s: impl IntoIterator>, + polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result, Self::Error> + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + Marlin::open_combinations_individual_opening_challenges( + ck, + lc_s, + polynomials, + commitments, + query_set, + opening_challenges, + rands, + rng, + ) + } + + /// Checks that `values` are the true evaluations at `query_set` of the polynomials + /// committed in `labeled_commitments`. + fn check_combinations_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + lc_s: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + evaluations: &Evaluations, + proof: &BatchLCProof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + Marlin::check_combinations_individual_opening_challenges( + vk, + lc_s, + commitments, + query_set, + evaluations, + proof, + opening_challenges, + rng, + ) + } + + /// On input a list of labeled polynomials and a query set, `open` outputs a proof of evaluation + /// of the polynomials at the points in the query set. + fn batch_open_individual_opening_challenges<'a>( + ck: &CommitterKey, + labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result>, Error> + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + let rng = &mut crate::optional_rng::OptionalRng(rng); + let poly_rand_comm: BTreeMap<_, _> = labeled_polynomials + .into_iter() + .zip(rands) + .zip(commitments.into_iter()) + .map(|((poly, r), comm)| (poly.label(), (poly, r, comm))) + .collect(); + + let open_time = start_timer!(|| format!( + "Opening {} polynomials at query set of size {}", + poly_rand_comm.len(), + query_set.len(), + )); + + let mut query_to_labels_map = BTreeMap::new(); + + for (label, (point_label, point)) in query_set.iter() { + let labels = query_to_labels_map + .entry(point_label) + .or_insert((point, BTreeSet::new())); + labels.1.insert(label); + } + + let mut proofs = Vec::new(); + for (_point_label, (point, labels)) in query_to_labels_map.into_iter() { + let mut query_polys: Vec<&'a LabeledPolynomial<_, _>> = Vec::new(); + let mut query_rands: Vec<&'a Self::Randomness> = Vec::new(); + let mut query_comms: Vec<&'a LabeledCommitment> = Vec::new(); + + for label in labels { + let (polynomial, rand, comm) = + poly_rand_comm.get(&label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + query_polys.push(polynomial); + query_rands.push(rand); + query_comms.push(comm); + } + + let proof_time = start_timer!(|| "Creating proof"); + let proof = Self::open_individual_opening_challenges( + ck, + query_polys, + query_comms, + point, + opening_challenges, + query_rands, + Some(rng), + )?; + + end_timer!(proof_time); + + proofs.push(proof); + } + end_timer!(open_time); + + Ok(proofs.into()) + } +} + +#[cfg(test)] +mod tests { + #![allow(non_camel_case_types)] + use super::MarlinKZG10; + use ark_bls12_377::Bls12_377; + use ark_bls12_381::Bls12_381; + use ark_ec::PairingEngine; + use ark_ff::UniformRand; + use ark_poly::{univariate::DensePolynomial as DensePoly, UVPolynomial}; + use ark_std::rand::rngs::StdRng; + + type UniPoly_381 = DensePoly<::Fr>; + type UniPoly_377 = DensePoly<::Fr>; + + type PC = MarlinKZG10; + type PC_Bls12_381 = PC; + type PC_Bls12_377 = PC; + + fn rand_poly( + degree: usize, + _: Option, + rng: &mut StdRng, + ) -> DensePoly { + DensePoly::::rand(degree, rng) + } + + fn constant_poly( + _: usize, + _: Option, + rng: &mut StdRng, + ) -> DensePoly { + DensePoly::::from_coefficients_slice(&[E::Fr::rand(rng)]) + } + + fn rand_point(_: Option, rng: &mut StdRng) -> E::Fr { + E::Fr::rand(rng) + } + + #[test] + fn single_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn constant_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, PC_Bls12_377>( + None, + constant_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, PC_Bls12_381>( + None, + constant_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn quadratic_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + quadratic_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + quadratic_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn linear_poly_degree_bound_test() { + use crate::tests::*; + linear_poly_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + linear_poly_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn single_poly_degree_bound_test() { + use crate::tests::*; + single_poly_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn single_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + single_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn two_polys_degree_bound_single_query_test() { + use crate::tests::*; + two_polys_degree_bound_single_query_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + two_polys_degree_bound_single_query_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn full_end_to_end_test() { + use crate::tests::*; + full_end_to_end_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn single_equation_test() { + use crate::tests::*; + single_equation_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + single_equation_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_test() { + use crate::tests::*; + two_equation_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_degree_bound_test() { + use crate::tests::*; + two_equation_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn full_end_to_end_equation_test() { + use crate::tests::*; + full_end_to_end_equation_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_equation_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + #[should_panic] + fn bad_degree_bound_test() { + use crate::tests::*; + bad_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + bad_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } +} diff --git a/arkworks/poly-commit/src/marlin/marlin_pst13_pc/combinations.rs b/arkworks/poly-commit/src/marlin/marlin_pst13_pc/combinations.rs new file mode 100644 index 00000000..dd1aaf14 --- /dev/null +++ b/arkworks/poly-commit/src/marlin/marlin_pst13_pc/combinations.rs @@ -0,0 +1,138 @@ +//! Compute all combinations of values in a given list +//! Credit: https://github.com/meltinglava/uniquecombinations/ +use crate::Vec; + +/// Compute all combinations of values in a given list. +pub(crate) struct Combinations +where + T: Ord + Clone, +{ + original: Vec, + possition: Vec, + len: usize, + started: bool, +} + +impl Combinations +where + T: Ord + Clone, +{ + /// Initialize the permutations. + pub(crate) fn new(mut original: Vec, len: usize) -> Self { + if original.len() > len && len >= 1 { + original.sort_unstable(); + Self { + original, + possition: (0..len).collect(), + len, + started: false, + } + } else { + panic!("the length has to be smaller then the datasets len"); + } + } + + #[inline] + fn insert(&self, col: &mut Vec) { + col.clear(); + self.possition + .iter() + .enumerate() + .for_each(|(p, n)| col.insert(p, self.original[*n].clone())) + } + + /// Clear the contents of the comb vector and insert the next combination. + fn next_combination(&mut self, mut comb: &mut Vec) -> bool { + if !self.started { + // first pass throught + self.started = true; + self.insert(&mut comb); + true + } else { + let org_len = self.original.len(); + // check if we cant bump the back number + if self.original[self.possition[self.len - 1]] == self.original[org_len - 1] { + // locate the number closest behind that needs to be bumped + for i in 2..=self.len { + if self.original[self.possition[self.len - i]] < self.original[org_len - i] { + //find the value of the + let lastpos = self.possition[self.len - i]; + let val = &self.original[lastpos]; + for j in lastpos + 1..org_len { + if *val < self.original[j] { + for k in 0..i { + self.possition[self.len - i + k] = j + k; + } + self.insert(&mut comb); + return true; + } + } + } + } + false + } else { + let mut i = self.possition[self.len - 1]; + let current = &self.original[i]; + let mut next = current; + while current == next { + i += 1; + next = &self.original[i]; + } + self.possition[self.len - 1] = i; + self.insert(&mut comb); + true + } + } + } +} + +impl Iterator for Combinations +where + T: Ord + Clone, +{ + type Item = Vec; + + fn next(&mut self) -> Option { + let mut vals = Vec::with_capacity(self.len); + if self.next_combination(&mut vals) { + Some(vals) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn equals() { + assert!(Combinations::new(vec![2, 2, 2], 2).next().unwrap() == vec![2, 2]) + } + + #[test] + fn t_123() { + assert!( + dbg!(Combinations::new(vec![1, 2, 3], 2) + .take(10) + .collect::>()) + == vec![vec![1, 2], vec![1, 3], vec![2, 3]] + ) + } + + #[test] + fn complicated() { + let actual: Vec<_> = Combinations::new(vec![1, 2, 2, 3, 4], 3).collect(); + let expected = vec![ + vec![1, 2, 2], + vec![1, 2, 3], + vec![1, 2, 4], + vec![1, 3, 4], + vec![2, 2, 3], + vec![2, 2, 4], + vec![2, 3, 4], + ]; + assert!(actual == expected) + } +} diff --git a/arkworks/poly-commit/src/marlin/marlin_pst13_pc/data_structures.rs b/arkworks/poly-commit/src/marlin/marlin_pst13_pc/data_structures.rs new file mode 100644 index 00000000..ca8ddc61 --- /dev/null +++ b/arkworks/poly-commit/src/marlin/marlin_pst13_pc/data_structures.rs @@ -0,0 +1,572 @@ +use crate::{BTreeMap, Vec}; +use crate::{ + PCCommitterKey, PCPreparedVerifierKey, PCProof, PCRandomness, PCUniversalParams, PCVerifierKey, +}; +use ark_ec::PairingEngine; +use ark_ff::{ToBytes, Zero}; +use ark_poly::MVPolynomial; +use ark_std::{ + io::{Read, Write}, + marker::PhantomData, + ops::{Add, AddAssign, Index}, +}; + +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::rand::RngCore; + +/// `UniversalParams` are the universal parameters for the MarlinPST13 scheme. +#[derive(Derivative)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct UniversalParams +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + /// Contains group elements corresponding to all possible monomials with + /// `num_vars` and maximum degree `max_degree` evaluated at `\beta` + pub powers_of_g: BTreeMap, + /// `\gamma` times the generater of G1 + pub gamma_g: E::G1Affine, + /// Group elements of the form `{ \beta_i^j \gamma G }`, where `i` ranges + /// from 0 to `num_vars-1` and `j` ranges from `1` to `max_degree+1`. + pub powers_of_gamma_g: Vec>, + /// The generator of G2. + pub h: E::G2Affine, + /// Group elements of the form `{ \beta_i H }`, where `i` ranges from 0 to `num_vars-1` + pub beta_h: Vec, + /// The generator of G2, prepared for use in pairings. + #[derivative(Debug = "ignore")] + pub prepared_h: E::G2Prepared, + /// Group elements of the form `{ \beta_i H }`, where `i` ranges from 0 to `num_vars-1`, + /// prepared for use in pairings + #[derivative(Debug = "ignore")] + pub prepared_beta_h: Vec, + /// The number of variables `self` is initialized for + pub num_vars: usize, + /// The maximum degree supported by `self` + pub max_degree: usize, +} + +impl CanonicalSerialize for UniversalParams +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.powers_of_g.serialize(&mut writer)?; + self.gamma_g.serialize(&mut writer)?; + self.powers_of_gamma_g.serialize(&mut writer)?; + self.h.serialize(&mut writer)?; + self.beta_h.serialize(&mut writer)?; + self.num_vars.serialize(&mut writer)?; + self.max_degree.serialize(&mut writer) + } + + fn serialized_size(&self) -> usize { + self.powers_of_g.serialized_size() + + self.gamma_g.serialized_size() + + self.powers_of_gamma_g.serialized_size() + + self.h.serialized_size() + + self.beta_h.serialized_size() + + self.num_vars.serialized_size() + + self.max_degree.serialized_size() + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.powers_of_g.serialize_uncompressed(&mut writer)?; + self.gamma_g.serialize_uncompressed(&mut writer)?; + self.powers_of_gamma_g.serialize_uncompressed(&mut writer)?; + self.h.serialize_uncompressed(&mut writer)?; + self.beta_h.serialize_uncompressed(&mut writer)?; + self.num_vars.serialize_uncompressed(&mut writer)?; + self.max_degree.serialize_uncompressed(&mut writer) + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + self.powers_of_g.serialize_unchecked(&mut writer)?; + self.gamma_g.serialize_unchecked(&mut writer)?; + self.powers_of_gamma_g.serialize_unchecked(&mut writer)?; + self.h.serialize_unchecked(&mut writer)?; + self.beta_h.serialize_unchecked(&mut writer)?; + self.num_vars.serialize_unchecked(&mut writer)?; + self.max_degree.serialize_unchecked(&mut writer) + } + + fn uncompressed_size(&self) -> usize { + self.powers_of_g.uncompressed_size() + + self.gamma_g.uncompressed_size() + + self.powers_of_gamma_g.uncompressed_size() + + self.h.uncompressed_size() + + self.beta_h.uncompressed_size() + + self.num_vars.uncompressed_size() + + self.max_degree.uncompressed_size() + } +} + +impl CanonicalDeserialize for UniversalParams +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + fn deserialize(mut reader: R) -> Result { + let powers_of_g = BTreeMap::::deserialize(&mut reader)?; + let gamma_g = E::G1Affine::deserialize(&mut reader)?; + let powers_of_gamma_g = Vec::>::deserialize(&mut reader)?; + let h = E::G2Affine::deserialize(&mut reader)?; + let beta_h = Vec::::deserialize(&mut reader)?; + let num_vars = usize::deserialize(&mut reader)?; + let max_degree = usize::deserialize(&mut reader)?; + + let prepared_beta_h = beta_h.iter().map(|x| x.clone().into()).collect(); + Ok(Self { + powers_of_g, + gamma_g, + powers_of_gamma_g, + h, + beta_h, + prepared_h: h.into(), + prepared_beta_h, + num_vars, + max_degree, + }) + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let powers_of_g = BTreeMap::::deserialize_uncompressed(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_uncompressed(&mut reader)?; + let powers_of_gamma_g = Vec::>::deserialize_uncompressed(&mut reader)?; + let h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + let beta_h = Vec::::deserialize_uncompressed(&mut reader)?; + let num_vars = usize::deserialize_uncompressed(&mut reader)?; + let max_degree = usize::deserialize_uncompressed(&mut reader)?; + + let prepared_beta_h = beta_h.iter().map(|x| x.clone().into()).collect(); + Ok(Self { + powers_of_g, + gamma_g, + powers_of_gamma_g, + h, + beta_h, + prepared_h: h.into(), + prepared_beta_h, + num_vars, + max_degree, + }) + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let powers_of_g = BTreeMap::::deserialize_unchecked(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_unchecked(&mut reader)?; + let powers_of_gamma_g = Vec::>::deserialize_unchecked(&mut reader)?; + let h = E::G2Affine::deserialize_unchecked(&mut reader)?; + let beta_h = Vec::::deserialize_unchecked(&mut reader)?; + let num_vars = usize::deserialize_unchecked(&mut reader)?; + let max_degree = usize::deserialize_unchecked(&mut reader)?; + + let prepared_beta_h = beta_h.iter().map(|x| x.clone().into()).collect(); + Ok(Self { + powers_of_g, + gamma_g, + powers_of_gamma_g, + h, + beta_h, + prepared_h: h.into(), + prepared_beta_h, + num_vars, + max_degree, + }) + } +} + +impl PCUniversalParams for UniversalParams +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + fn max_degree(&self) -> usize { + self.max_degree + } +} + +/// `CommitterKey` is used to commit to and create evaluation proofs for a given +/// polynomial. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Hash(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct CommitterKey +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + /// Contains group elements corresponding to all possible monomials with + /// `num_vars` and maximum degree `supported_degree` evaluated at `\beta` + pub powers_of_g: BTreeMap, + /// `\gamma` times the generater of G1 + pub gamma_g: E::G1Affine, + /// Group elements of the form `{ \beta_i^j \gamma G }`, where `i` ranges + /// from 0 to `num_vars-1` and `j` ranges from `1` to `supported_degree+1`. + pub powers_of_gamma_g: Vec>, + /// The number of variables `self` is initialized for + pub num_vars: usize, + /// The maximum degree supported by the trimmed parameters that `self` is + /// a part of + pub supported_degree: usize, + /// The maximum degree supported by the `UniversalParams` `self` was derived + /// from. + pub max_degree: usize, +} + +impl PCCommitterKey for CommitterKey +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + fn max_degree(&self) -> usize { + self.max_degree + } + + fn supported_degree(&self) -> usize { + self.supported_degree + } +} + +/// `VerifierKey` is used to check evaluation proofs for a given commitment. +#[derive(Derivative)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct VerifierKey { + /// The generator of G1. + pub g: E::G1Affine, + /// The generator of G1 that is used for making a commitment hiding. + pub gamma_g: E::G1Affine, + /// The generator of G2. + pub h: E::G2Affine, + /// `\beta_i` times the above generator of G2 where `i` ranges from 0 to `num_vars-1` + pub beta_h: Vec, + /// The generator of G2, prepared for use in pairings. + #[derivative(Debug = "ignore")] + pub prepared_h: E::G2Prepared, + /// `\beta_i` times the above generator of G2 where `i` ranges from 0 to `num_vars-1`, + /// prepared for use in pairings + #[derivative(Debug = "ignore")] + pub prepared_beta_h: Vec, + /// The number of variables `self` is initialized for + pub num_vars: usize, + /// The maximum degree supported by the trimmed parameters that `self` is + /// a part of. + pub supported_degree: usize, + /// The maximum degree supported by the `UniversalParams` `self` was derived + /// from. + pub max_degree: usize, +} + +impl CanonicalSerialize for VerifierKey { + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize(&mut writer)?; + self.gamma_g.serialize(&mut writer)?; + self.h.serialize(&mut writer)?; + self.beta_h.serialize(&mut writer)?; + self.num_vars.serialize(&mut writer)?; + self.supported_degree.serialize(&mut writer)?; + self.max_degree.serialize(&mut writer) + } + + fn serialized_size(&self) -> usize { + self.g.serialized_size() + + self.gamma_g.serialized_size() + + self.h.serialized_size() + + self.beta_h.serialized_size() + + self.num_vars.serialized_size() + + self.supported_degree.serialized_size() + + self.max_degree.serialized_size() + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize_uncompressed(&mut writer)?; + self.gamma_g.serialize_uncompressed(&mut writer)?; + self.h.serialize_uncompressed(&mut writer)?; + self.beta_h.serialize_uncompressed(&mut writer)?; + self.num_vars.serialize_uncompressed(&mut writer)?; + self.supported_degree.serialize_uncompressed(&mut writer)?; + self.max_degree.serialize_uncompressed(&mut writer) + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize_unchecked(&mut writer)?; + self.gamma_g.serialize_unchecked(&mut writer)?; + self.h.serialize_unchecked(&mut writer)?; + self.beta_h.serialize_unchecked(&mut writer)?; + self.num_vars.serialize_unchecked(&mut writer)?; + self.supported_degree.serialize_unchecked(&mut writer)?; + self.max_degree.serialize_unchecked(&mut writer) + } + + fn uncompressed_size(&self) -> usize { + self.g.uncompressed_size() + + self.gamma_g.uncompressed_size() + + self.h.uncompressed_size() + + self.beta_h.uncompressed_size() + + self.num_vars.uncompressed_size() + + self.supported_degree.uncompressed_size() + + self.max_degree.uncompressed_size() + } +} + +impl CanonicalDeserialize for VerifierKey { + fn deserialize(mut reader: R) -> Result { + let g = E::G1Affine::deserialize(&mut reader)?; + let gamma_g = E::G1Affine::deserialize(&mut reader)?; + let h = E::G2Affine::deserialize(&mut reader)?; + let beta_h = Vec::::deserialize(&mut reader)?; + let num_vars = usize::deserialize(&mut reader)?; + let supported_degree = usize::deserialize(&mut reader)?; + let max_degree = usize::deserialize(&mut reader)?; + + let prepared_beta_h = beta_h.iter().map(|x| x.clone().into()).collect(); + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h: h.into(), + prepared_beta_h, + num_vars, + supported_degree, + max_degree, + }) + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let g = E::G1Affine::deserialize_uncompressed(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_uncompressed(&mut reader)?; + let h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + let beta_h = Vec::::deserialize_uncompressed(&mut reader)?; + let num_vars = usize::deserialize_uncompressed(&mut reader)?; + let supported_degree = usize::deserialize_uncompressed(&mut reader)?; + let max_degree = usize::deserialize_uncompressed(&mut reader)?; + + let prepared_beta_h = beta_h.iter().map(|x| x.clone().into()).collect(); + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h: h.into(), + prepared_beta_h, + num_vars, + supported_degree, + max_degree, + }) + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let g = E::G1Affine::deserialize_unchecked(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_unchecked(&mut reader)?; + let h = E::G2Affine::deserialize_unchecked(&mut reader)?; + let beta_h = Vec::::deserialize_unchecked(&mut reader)?; + let num_vars = usize::deserialize_unchecked(&mut reader)?; + let supported_degree = usize::deserialize_unchecked(&mut reader)?; + let max_degree = usize::deserialize_unchecked(&mut reader)?; + + let prepared_beta_h = beta_h.iter().map(|x| x.clone().into()).collect(); + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h: h.into(), + prepared_beta_h, + num_vars, + supported_degree, + max_degree, + }) + } +} + +impl PCVerifierKey for VerifierKey { + fn max_degree(&self) -> usize { + self.max_degree + } + + fn supported_degree(&self) -> usize { + self.supported_degree + } +} + +/// Nothing to do to prepare this verifier key (for now). +pub type PreparedVerifierKey = VerifierKey; + +impl PCPreparedVerifierKey> for PreparedVerifierKey { + /// prepare `PreparedVerifierKey` from `VerifierKey` + fn prepare(vk: &VerifierKey) -> Self { + vk.clone() + } +} + +/// `Randomness` hides the polynomial inside a commitment`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Randomness +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + /// A multivariate polynomial where each monomial is univariate with random coefficient + pub blinding_polynomial: P, + _engine: PhantomData, +} + +impl Randomness +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + /// Does `self` provide any hiding properties to the corresponding commitment? + /// `self.is_hiding() == true` only if the underlying polynomial is non-zero. + #[inline] + pub fn is_hiding(&self) -> bool { + !self.blinding_polynomial.is_zero() + } + + /// What is the degree of the hiding polynomial for a given hiding bound? + #[inline] + pub fn calculate_hiding_polynomial_degree(hiding_bound: usize) -> usize { + hiding_bound + 1 + } +} + +impl PCRandomness for Randomness +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + fn empty() -> Self { + Self { + blinding_polynomial: P::zero(), + _engine: PhantomData, + } + } + + fn rand( + hiding_bound: usize, + _: bool, + num_vars: Option, + rng: &mut R, + ) -> Self { + let hiding_poly_degree = Self::calculate_hiding_polynomial_degree(hiding_bound); + Randomness { + blinding_polynomial: P::rand(hiding_poly_degree, num_vars.unwrap(), rng), + _engine: PhantomData, + } + } +} + +impl<'a, E: PairingEngine, P: MVPolynomial> Add<&'a Randomness> for Randomness +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + type Output = Self; + + #[inline] + fn add(mut self, other: &'a Self) -> Self { + self.blinding_polynomial += &other.blinding_polynomial; + self + } +} + +impl<'a, E, P> Add<(E::Fr, &'a Randomness)> for Randomness +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + type Output = Self; + + #[inline] + fn add(mut self, other: (E::Fr, &'a Randomness)) -> Self { + self += other; + self + } +} + +impl<'a, E, P> AddAssign<&'a Randomness> for Randomness +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + #[inline] + fn add_assign(&mut self, other: &'a Self) { + self.blinding_polynomial += &other.blinding_polynomial; + } +} + +impl<'a, E, P> AddAssign<(E::Fr, &'a Randomness)> for Randomness +where + E: PairingEngine, + P: MVPolynomial, + P::Point: Index, +{ + #[inline] + fn add_assign(&mut self, (f, other): (E::Fr, &'a Randomness)) { + self.blinding_polynomial += (f, &other.blinding_polynomial); + } +} + +/// `Proof` is an evaluation proof that is output by `KZG10::open`. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct Proof { + /// Commitments to the witness polynomials + pub w: Vec, + /// Evaluation of the random polynomial at the point for which + /// the evaluation proof was produced. + pub random_v: Option, +} + +impl PCProof for Proof { + fn size_in_bytes(&self) -> usize { + let hiding_size = if self.random_v.is_some() { + ark_ff::to_bytes![E::Fr::zero()].unwrap().len() + } else { + 0 + }; + (self.w.len() * ark_ff::to_bytes![E::G1Affine::zero()].unwrap().len()) / 2 + hiding_size + } +} + +impl ToBytes for Proof { + #[inline] + fn write(&self, mut writer: W) -> ark_std::io::Result<()> { + self.w + .iter() + .map(|e| e.write(&mut writer)) + .collect::>()?; + self.random_v + .as_ref() + .unwrap_or(&E::Fr::zero()) + .write(&mut writer) + } +} diff --git a/arkworks/poly-commit/src/marlin/marlin_pst13_pc/mod.rs b/arkworks/poly-commit/src/marlin/marlin_pst13_pc/mod.rs new file mode 100644 index 00000000..aef7705d --- /dev/null +++ b/arkworks/poly-commit/src/marlin/marlin_pst13_pc/mod.rs @@ -0,0 +1,853 @@ +use crate::{ + kzg10, + marlin::{marlin_pc, Marlin}, +}; +use crate::{BatchLCProof, Error, Evaluations, QuerySet}; +use crate::{LabeledCommitment, LabeledPolynomial, LinearCombination}; +use crate::{PCRandomness, PCUniversalParams, PolynomialCommitment}; +use crate::{ToString, Vec}; +use ark_ec::{ + msm::{FixedBaseMSM, VariableBaseMSM}, + AffineCurve, PairingEngine, ProjectiveCurve, +}; +use ark_ff::{One, PrimeField, UniformRand, Zero}; +use ark_poly::{multivariate::Term, MVPolynomial}; +use ark_std::rand::RngCore; +use ark_std::{marker::PhantomData, ops::Index, vec}; + +mod data_structures; +pub use data_structures::*; + +mod combinations; +use combinations::*; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +/// Multivariate polynomial commitment based on the construction in [[PST13]][pst] +/// with batching and (optional) hiding property inspired by the univariate scheme +/// in [[CHMMVW20, "Marlin"]][marlin] +/// +/// [pst]: https://eprint.iacr.org/2011/587 +/// [marlin]: https://eprint.iacr.org/2019/104 +pub struct MarlinPST13> { + _engine: PhantomData, + _poly: PhantomData

, +} + +impl> MarlinPST13 { + /// Given some point `z`, compute the quotients `w_i(X)` s.t + /// + /// `p(X) - p(z) = (X_1-z_1)*w_1(X) + (X_2-z_2)*w_2(X) + ... + (X_l-z_l)*w_l(X)` + /// + /// These quotients can always be found with no remainder. + fn divide_at_point(p: &P, point: &P::Point) -> Vec

+ where + P::Point: Index, + { + let num_vars = p.num_vars(); + if p.is_zero() { + return vec![P::zero(); num_vars]; + } + let mut quotients = Vec::with_capacity(num_vars); + // `cur` represents the current dividend + let mut cur = p.clone(); + // Divide `cur` by `X_i - z_i` + for i in 0..num_vars { + let mut quotient_terms = Vec::new(); + let mut remainder_terms = Vec::new(); + for (mut coeff, term) in cur.terms() { + // Since the final remainder is guaranteed to be 0, all the constant terms + // cancel out so we don't need to keep track of them + if term.is_constant() { + continue; + } + // If the current term contains `X_i` then divide appropiately, + // otherwise add it to the remainder + let mut term_vec = (&*term).to_vec(); + match term_vec.binary_search_by(|(var, _)| var.cmp(&i)) { + Ok(idx) => { + // Repeatedly divide the term by `X_i - z_i` until the remainder + // doesn't contain any `X_i`s + while term_vec[idx].1 > 1 { + // First divide by `X_i` and add the term to the quotient + term_vec[idx] = (i, term_vec[idx].1 - 1); + quotient_terms.push((coeff, P::Term::new(term_vec.clone()))); + // Then compute the remainder term in-place + coeff *= &point[i]; + } + // Since `X_i` is power 1, we can remove it entirely + term_vec.remove(idx); + quotient_terms.push((coeff, P::Term::new(term_vec.clone()))); + remainder_terms.push((point[i] * &coeff, P::Term::new(term_vec))); + } + Err(_) => remainder_terms.push((coeff, term.clone())), + } + } + quotients.push(P::from_coefficients_vec(num_vars, quotient_terms)); + // Set the current dividend to be the remainder of this division + cur = P::from_coefficients_vec(num_vars, remainder_terms); + } + quotients + } + + /// Check that the powers support the hiding bound + fn check_hiding_bound(hiding_poly_degree: usize, num_powers: usize) -> Result<(), Error> { + if hiding_poly_degree == 0 { + Err(Error::HidingBoundIsZero) + } else if hiding_poly_degree >= num_powers { + // The above check uses `>=` because committing to a hiding poly with + // degree `hiding_poly_degree` requires `hiding_poly_degree + 1` + // powers. + Err(Error::HidingBoundToolarge { + hiding_poly_degree, + num_powers, + }) + } else { + Ok(()) + } + } + + /// Check that a given polynomial is supported by parameters + fn check_degrees_and_bounds<'a>( + supported_degree: usize, + p: &'a LabeledPolynomial, + ) -> Result<(), Error> + where + P: 'a, + { + if p.degree() > supported_degree { + return Err(Error::PolynomialDegreeTooLarge { + poly_degree: p.degree(), + supported_degree, + label: p.label().to_string(), + }); + } else { + Ok(()) + } + } + + /// Convert polynomial coefficients to `BigInt` + fn convert_to_bigints(p: &P) -> Vec<::BigInt> { + let plain_coeffs = ark_std::cfg_into_iter!(p.terms()) + .map(|(coeff, _)| coeff.into_repr()) + .collect(); + plain_coeffs + } +} + +impl PolynomialCommitment for MarlinPST13 +where + E: PairingEngine, + P: MVPolynomial + Sync, + P::Point: Index, +{ + type UniversalParams = UniversalParams; + type CommitterKey = CommitterKey; + type VerifierKey = VerifierKey; + type PreparedVerifierKey = PreparedVerifierKey; + type Commitment = marlin_pc::Commitment; + type PreparedCommitment = marlin_pc::PreparedCommitment; + type Randomness = Randomness; + type Proof = Proof; + type BatchProof = Vec; + type Error = Error; + + /// Constructs public parameters when given as input the maximum degree `max_degree` + /// and number of variables `num_vars` for the polynomial commitment scheme. + fn setup( + max_degree: usize, + num_vars: Option, + rng: &mut R, + ) -> Result, Error> { + let num_vars = num_vars.ok_or(Error::InvalidNumberOfVariables)?; + if num_vars < 1 { + return Err(Error::InvalidNumberOfVariables); + } + if max_degree < 1 { + return Err(Error::DegreeIsZero); + } + let setup_time = start_timer!(|| format!( + "MarlinPST13::Setup with {} variables and max degree {}", + num_vars, max_degree + )); + // Trapdoor evaluation points + let mut betas = Vec::with_capacity(num_vars); + for _ in 0..num_vars { + betas.push(E::Fr::rand(rng)); + } + // Generators + let g = E::G1Projective::rand(rng); + let gamma_g = E::G1Projective::rand(rng); + let h = E::G2Projective::rand(rng); + + // A list of all variable numbers of multiplicity `max_degree` + let variable_set: Vec<_> = (0..num_vars) + .flat_map(|var| vec![var; max_degree]) + .collect(); + // Generate all possible monomials with `1 <= degree <= max_degree` + let (powers_of_beta, mut powers_of_beta_terms): (Vec<_>, Vec<_>) = (1..=max_degree) + .flat_map(|degree| { + // Sample all combinations of `degree` variables from `variable_set` + let terms: Vec> = if variable_set.len() == degree { + vec![variable_set.clone()] + } else { + Combinations::new(variable_set.clone(), degree).collect() + }; + // For each multiset in `terms` evaluate the corresponding monomial at the + // trapdoor and generate a `P::Term` object to index it + ark_std::cfg_into_iter!(terms) + .map(|term| { + let value: E::Fr = term.iter().map(|e| betas[*e]).product(); + let term = (0..num_vars) + .map(|var| (var, term.iter().filter(|e| **e == var).count())) + .collect(); + (value, P::Term::new(term)) + }) + .collect::>() + }) + .unzip(); + + let scalar_bits = E::Fr::size_in_bits(); + let g_time = start_timer!(|| "Generating powers of G"); + let window_size = FixedBaseMSM::get_mul_window_size(max_degree + 1); + let g_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, g); + let mut powers_of_g = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + window_size, + &g_table, + &powers_of_beta, + ); + powers_of_g.push(g); + powers_of_beta_terms.push(P::Term::new(vec![])); + end_timer!(g_time); + + let gamma_g_time = start_timer!(|| "Generating powers of gamma * G"); + let window_size = FixedBaseMSM::get_mul_window_size(max_degree + 2); + let gamma_g_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, gamma_g); + // Each element `i` of `powers_of_gamma_g` is a vector of length `max_degree+1` + // containing `betas[i]^j \gamma G` for `j` from 1 to `max_degree+1` to support + // up to `max_degree` queries + let mut powers_of_gamma_g = vec![Vec::new(); num_vars]; + ark_std::cfg_iter_mut!(powers_of_gamma_g) + .enumerate() + .for_each(|(i, v)| { + let mut powers_of_beta = Vec::with_capacity(max_degree); + let mut cur = E::Fr::one(); + for _ in 0..=max_degree { + cur *= &betas[i]; + powers_of_beta.push(cur); + } + *v = FixedBaseMSM::multi_scalar_mul::( + scalar_bits, + window_size, + &gamma_g_table, + &powers_of_beta, + ); + }); + end_timer!(gamma_g_time); + + let powers_of_g = E::G1Projective::batch_normalization_into_affine(&powers_of_g); + let gamma_g = gamma_g.into_affine(); + let powers_of_gamma_g = powers_of_gamma_g + .into_iter() + .map(|v| E::G1Projective::batch_normalization_into_affine(&v)) + .collect(); + let beta_h: Vec<_> = betas + .iter() + .map(|b| h.mul(&(*b).into_repr()).into_affine()) + .collect(); + let h = h.into_affine(); + let prepared_h = h.into(); + let prepared_beta_h = beta_h.iter().map(|bh| (*bh).into()).collect(); + + // Convert `powers_of_g` to a BTreeMap indexed by `powers_of_beta_terms` + let powers_of_g = powers_of_beta_terms + .into_iter() + .zip(powers_of_g.into_iter()) + .collect(); + + let pp = UniversalParams { + num_vars, + max_degree, + powers_of_g, + gamma_g, + powers_of_gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + }; + end_timer!(setup_time); + Ok(pp) + } + + /// Specializes the public parameters for polynomials up to the given `supported_degree` + /// + /// TODO: Add the ability to trim the number of variables + /// TODO: Update for support_hiding_bound + fn trim( + pp: &Self::UniversalParams, + supported_degree: usize, + _supported_hiding_bound: usize, + _enforced_degree_bounds: Option<&[usize]>, + ) -> Result<(Self::CommitterKey, Self::VerifierKey), Self::Error> { + let max_degree = pp.max_degree(); + if supported_degree > max_degree { + return Err(Error::TrimmingDegreeTooLarge); + } + + let ck_time = start_timer!(|| format!( + "Constructing CommitterKey of size {} for unshifted polys", + supported_degree + )); + // We want to support making up to supported_degree queries to committed + // polynomials. + let powers_of_g = pp + .powers_of_g + .iter() + .filter(|(k, _)| k.degree() <= supported_degree) + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); + let powers_of_gamma_g = pp + .powers_of_gamma_g + .iter() + .map(|e| e[..=supported_degree].to_vec()) + .collect(); + end_timer!(ck_time); + + let ck = CommitterKey { + powers_of_g, + gamma_g: pp.gamma_g, + powers_of_gamma_g, + num_vars: pp.num_vars, + supported_degree, + max_degree, + }; + + let vk = VerifierKey { + g: pp.powers_of_g[&P::Term::new(vec![])], + gamma_g: pp.gamma_g, + h: pp.h, + beta_h: pp.beta_h.clone(), + prepared_h: pp.prepared_h.clone(), + prepared_beta_h: pp.prepared_beta_h.clone(), + num_vars: pp.num_vars, + supported_degree, + max_degree, + }; + Ok((ck, vk)) + } + + /// Outputs a commitments to `polynomials`. + fn commit<'a>( + ck: &Self::CommitterKey, + polynomials: impl IntoIterator>, + rng: Option<&mut dyn RngCore>, + ) -> Result< + ( + Vec>, + Vec, + ), + Self::Error, + > + where + P: 'a, + { + let rng = &mut crate::optional_rng::OptionalRng(rng); + let commit_time = start_timer!(|| "Committing to polynomials"); + let mut commitments = Vec::new(); + let mut randomness = Vec::new(); + for p in polynomials { + let label = p.label(); + let hiding_bound = p.hiding_bound(); + let polynomial: &P = p.polynomial(); + Self::check_degrees_and_bounds(ck.supported_degree, &p)?; + + let commit_time = start_timer!(|| { + format!( + "Polynomial {} with degree {} and hiding bound {:?}", + label, + polynomial.degree(), + hiding_bound, + ) + }); + // Get the powers of `G` corresponding to the terms of `polynomial` + let powers_of_g = ark_std::cfg_iter!(polynomial.terms()) + .map(|(_, term)| *ck.powers_of_g.get(term).unwrap()) + .collect::>(); + // Convert coefficients of `polynomial` to BigInts + let to_bigint_time = start_timer!(|| "Converting polynomial coeffs to bigints"); + let plain_ints = Self::convert_to_bigints(&polynomial); + end_timer!(to_bigint_time); + + let msm_time = start_timer!(|| "MSM to compute commitment to plaintext poly"); + let mut commitment = VariableBaseMSM::multi_scalar_mul(&powers_of_g, &plain_ints); + end_timer!(msm_time); + + // Sample random polynomial + let mut rand = Randomness::::empty(); + if let Some(hiding_degree) = hiding_bound { + let sample_random_poly_time = start_timer!(|| format!( + "Sampling a random polynomial of degree {}", + hiding_degree + )); + rand = as PCRandomness>::rand( + hiding_degree, + false, + Some(ck.num_vars), + rng, + ); + Self::check_hiding_bound(hiding_degree, ck.supported_degree + 1)?; + end_timer!(sample_random_poly_time); + } + + // Get the powers of `\gamma G` corresponding to the terms of `rand` + let powers_of_gamma_g = rand + .blinding_polynomial + .terms() + .iter() + .map(|(_, term)| { + // Implicit Assumption: Each monomial in `rand` is univariate + let vars = term.vars(); + match term.is_constant() { + true => ck.gamma_g, + false => ck.powers_of_gamma_g[vars[0]][term.degree() - 1], + } + }) + .collect::>(); + // Convert coefficients of `rand` to BigInt + let to_bigint_time = start_timer!(|| "Converting polynomial coeffs to bigints"); + let random_ints = Self::convert_to_bigints(&rand.blinding_polynomial); + end_timer!(to_bigint_time); + + let msm_time = start_timer!(|| "MSM to compute commitment to random poly"); + let random_commitment = + VariableBaseMSM::multi_scalar_mul(&powers_of_gamma_g, &random_ints).into_affine(); + end_timer!(msm_time); + + // Mask commitment with random poly + commitment.add_assign_mixed(&random_commitment); + + let comm = Self::Commitment { + comm: kzg10::Commitment(commitment.into()), + shifted_comm: None, + }; + + commitments.push(LabeledCommitment::new(label.to_string(), comm, None)); + randomness.push(rand); + end_timer!(commit_time); + } + end_timer!(commit_time); + Ok((commitments, randomness)) + } + + /// On input a polynomial `p` and a point `point`, outputs a proof for the same. + fn open_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + _commitments: impl IntoIterator>, + point: &P::Point, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + // Compute random linear combinations of committed polynomials and randomness + let mut p = P::zero(); + let mut r = Randomness::empty(); + let mut opening_challenge_counter = 0; + for (polynomial, rand) in labeled_polynomials.into_iter().zip(rands) { + Self::check_degrees_and_bounds(ck.supported_degree, &polynomial)?; + + // compute challenge^j and challenge^{j+1}. + let challenge_j = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + p += (challenge_j, polynomial.polynomial()); + r += (challenge_j, rand); + } + + let open_time = start_timer!(|| format!("Opening polynomial of degree {}", p.degree())); + let witness_time = start_timer!(|| "Computing witness polynomials"); + let witnesses = Self::divide_at_point(&p, point); + let hiding_witnesses = if r.is_hiding() { + Some(Self::divide_at_point(&r.blinding_polynomial, point)) + } else { + None + }; + end_timer!(witness_time); + + let witness_comm_time = start_timer!(|| "Computing commitment to witness polynomials"); + let mut w = witnesses + .iter() + .map(|w| { + // Get the powers of `G` corresponding to the witness poly + let powers_of_g = ark_std::cfg_iter!(w.terms()) + .map(|(_, term)| *ck.powers_of_g.get(term).unwrap()) + .collect::>(); + // Convert coefficients to BigInt + let witness_ints = Self::convert_to_bigints(&w); + // Compute MSM + VariableBaseMSM::multi_scalar_mul(&powers_of_g, &witness_ints) + }) + .collect::>(); + end_timer!(witness_comm_time); + + // If the evaluation should be hiding, compute the MSM for `hiding_witnesses` and add + // to the `w`. Additionally, compute the evaluation of `r` at `point`. + let random_v = if let Some(hiding_witnesses) = hiding_witnesses { + let witness_comm_time = + start_timer!(|| "Computing commitment to hiding witness polynomials"); + ark_std::cfg_iter_mut!(w) + .enumerate() + .for_each(|(i, witness)| { + let hiding_witness = &hiding_witnesses[i]; + // Get the powers of `\gamma G` corresponding to the terms of `hiding_witness` + let powers_of_gamma_g = hiding_witness + .terms() + .iter() + .map(|(_, term)| { + // Implicit Assumption: Each monomial in `hiding_witness` is univariate + let vars = term.vars(); + match term.is_constant() { + true => ck.gamma_g, + false => ck.powers_of_gamma_g[vars[0]][term.degree() - 1], + } + }) + .collect::>(); + // Convert coefficients to BigInt + let hiding_witness_ints = Self::convert_to_bigints(hiding_witness); + // Compute MSM and add result to witness + *witness += &VariableBaseMSM::multi_scalar_mul( + &powers_of_gamma_g, + &hiding_witness_ints, + ); + }); + end_timer!(witness_comm_time); + Some(r.blinding_polynomial.evaluate(point)) + } else { + None + }; + end_timer!(open_time); + Ok(Proof { + w: w.into_iter().map(|w| w.into_affine()).collect(), + random_v, + }) + } + + /// Verifies that `value` is the evaluation at `x` of the polynomial + /// committed inside `comm`. + fn check_individual_opening_challenges<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + values: impl IntoIterator, + proof: &Self::Proof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + { + let check_time = start_timer!(|| "Checking evaluations"); + // Accumulate commitments and values + let (combined_comm, combined_value) = + Marlin::accumulate_commitments_and_values_individual_opening_challenges( + commitments, + values, + opening_challenges, + None, + )?; + // Compute both sides of the pairing equation + let mut inner = combined_comm.into().into_projective() - &vk.g.scalar_mul(combined_value); + if let Some(random_v) = proof.random_v { + inner -= &vk.gamma_g.scalar_mul(random_v); + } + let lhs = E::pairing(inner, vk.h); + + // Create a list of elements corresponding to each pairing in the product on the rhs + let rhs_product: Vec<(E::G1Prepared, E::G2Prepared)> = ark_std::cfg_iter!(proof.w) + .enumerate() + .map(|(j, w_j)| { + let beta_minus_z: E::G2Affine = + (vk.beta_h[j].into_projective() - &vk.h.scalar_mul(point[j])).into(); + ((*w_j).into(), beta_minus_z.into()) + }) + .collect(); + let rhs = E::product_of_pairings(&rhs_product); + end_timer!(check_time); + + Ok(lhs == rhs) + } + + fn batch_check_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + query_set: &QuerySet, + values: &Evaluations, + proof: &Self::BatchProof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let (combined_comms, combined_queries, combined_evals) = Marlin::combine_and_normalize( + commitments, + query_set, + values, + opening_challenges, + None, + )?; + let check_time = + start_timer!(|| format!("Checking {} evaluation proofs", combined_comms.len())); + let g = vk.g.into_projective(); + let gamma_g = vk.gamma_g.into_projective(); + let mut total_c = ::zero(); + let mut total_w = vec![::zero(); vk.num_vars]; + let combination_time = start_timer!(|| "Combining commitments and proofs"); + let mut randomizer = E::Fr::one(); + // Instead of multiplying g and gamma_g in each turn, we simply accumulate + // their coefficients and perform a final multiplication at the end. + let mut g_multiplier = E::Fr::zero(); + let mut gamma_g_multiplier = E::Fr::zero(); + for (((c, z), v), proof) in combined_comms + .iter() + .zip(combined_queries) + .zip(combined_evals) + .zip(proof) + { + let w = &proof.w; + let mut temp: E::G1Projective = ark_std::cfg_iter!(w) + .enumerate() + .map(|(j, w_j)| w_j.scalar_mul(z[j])) + .sum(); + temp.add_assign_mixed(&c.0); + let c = temp; + g_multiplier += &(randomizer * &v); + if let Some(random_v) = proof.random_v { + gamma_g_multiplier += &(randomizer * &random_v); + } + total_c += &c.mul(&randomizer.into_repr()); + ark_std::cfg_iter_mut!(total_w) + .enumerate() + .for_each(|(i, w_i)| *w_i += &w[i].scalar_mul(randomizer)); + // We don't need to sample randomizers from the full field, + // only from 128-bit strings. + randomizer = u128::rand(rng).into(); + } + total_c -= &g.mul(&g_multiplier.into_repr()); + total_c -= &gamma_g.mul(&gamma_g_multiplier.into_repr()); + end_timer!(combination_time); + + let to_affine_time = start_timer!(|| "Converting results to affine for pairing"); + let mut pairings = Vec::new(); + total_w.into_iter().enumerate().for_each(|(j, w_j)| { + pairings.push(((-w_j).into_affine().into(), vk.prepared_beta_h[j].clone())) + }); + pairings.push((total_c.into_affine().into(), vk.prepared_h.clone())); + end_timer!(to_affine_time); + + let pairing_time = start_timer!(|| "Performing product of pairings"); + let result = E::product_of_pairings(&pairings).is_one(); + end_timer!(pairing_time); + end_timer!(check_time); + Ok(result) + } + + fn open_combinations_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + lc_s: impl IntoIterator>, + polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result, Self::Error> + where + P: 'a, + Self::Randomness: 'a, + Self::Commitment: 'a, + { + Marlin::open_combinations_individual_opening_challenges( + ck, + lc_s, + polynomials, + commitments, + query_set, + opening_challenges, + rands, + rng, + ) + } + + /// Checks that `values` are the true evaluations at `query_set` of the polynomials + /// committed in `labeled_commitments`. + fn check_combinations_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + lc_s: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + evaluations: &Evaluations, + proof: &BatchLCProof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + Marlin::check_combinations_individual_opening_challenges( + vk, + lc_s, + commitments, + query_set, + evaluations, + proof, + opening_challenges, + rng, + ) + } +} + +#[cfg(test)] +mod tests { + #![allow(non_camel_case_types)] + use super::MarlinPST13; + use ark_bls12_377::Bls12_377; + use ark_bls12_381::Bls12_381; + use ark_ec::PairingEngine; + use ark_ff::UniformRand; + use ark_poly::{ + multivariate::{SparsePolynomial as SparsePoly, SparseTerm}, + MVPolynomial, + }; + use ark_std::rand::rngs::StdRng; + + type MVPoly_381 = SparsePoly<::Fr, SparseTerm>; + type MVPoly_377 = SparsePoly<::Fr, SparseTerm>; + + type PC = MarlinPST13; + type PC_Bls12_381 = PC; + type PC_Bls12_377 = PC; + + fn rand_poly( + degree: usize, + num_vars: Option, + rng: &mut StdRng, + ) -> SparsePoly { + as MVPolynomial>::rand(degree, num_vars.unwrap(), rng) + } + + fn rand_point(num_vars: Option, rng: &mut StdRng) -> Vec { + let num_vars = num_vars.unwrap(); + let mut point = Vec::with_capacity(num_vars); + for _ in 0..num_vars { + point.push(E::Fr::rand(rng)); + } + point + } + + #[test] + fn single_poly_test() { + use crate::tests::*; + let num_vars = Some(10); + single_poly_test::<_, _, PC_Bls12_377>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, PC_Bls12_381>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn full_end_to_end_test() { + use crate::tests::*; + let num_vars = Some(10); + full_end_to_end_test::<_, _, PC_Bls12_377>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_test::<_, _, PC_Bls12_381>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn single_equation_test() { + use crate::tests::*; + let num_vars = Some(10); + single_equation_test::<_, _, PC_Bls12_377>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + single_equation_test::<_, _, PC_Bls12_381>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_test() { + use crate::tests::*; + let num_vars = Some(10); + two_equation_test::<_, _, PC_Bls12_377>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_test::<_, _, PC_Bls12_381>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn full_end_to_end_equation_test() { + use crate::tests::*; + let num_vars = Some(10); + full_end_to_end_equation_test::<_, _, PC_Bls12_377>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_equation_test::<_, _, PC_Bls12_381>( + num_vars, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } +} diff --git a/arkworks/poly-commit/src/marlin/mod.rs b/arkworks/poly-commit/src/marlin/mod.rs new file mode 100644 index 00000000..98a2333d --- /dev/null +++ b/arkworks/poly-commit/src/marlin/mod.rs @@ -0,0 +1,400 @@ +use crate::{kzg10, Error}; +use crate::{BTreeMap, BTreeSet, Debug, RngCore, String, ToString, Vec}; +use crate::{BatchLCProof, LabeledPolynomial, LinearCombination}; +use crate::{Evaluations, LabeledCommitment, QuerySet}; +use crate::{PCRandomness, Polynomial, PolynomialCommitment}; +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{One, Zero}; +use ark_std::{convert::TryInto, hash::Hash, ops::AddAssign}; + +/// Polynomial commitment scheme from [[KZG10]][kzg] that enforces +/// strict degree bounds and (optionally) enables hiding commitments by +/// following the approach outlined in [[CHMMVW20, "Marlin"]][marlin]. +/// +/// [kzg]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf +/// [marlin]: https://eprint.iacr.org/2019/1047 +pub mod marlin_pc; + +/// Multivariate polynomial commitment based on the construction in +/// [[PST13]][pst] with batching and (optional) hiding property inspired +/// by the univariate scheme in [[CHMMVW20, "Marlin"]][marlin] +/// +/// [pst]: https://eprint.iacr.org/2011/587.pdf +/// [marlin]: https://eprint.iacr.org/2019/104 +pub mod marlin_pst13_pc; + +/// Common functionalities between `marlin_pc` and `marlin_pst13_pc` +struct Marlin { + _engine: core::marker::PhantomData, +} + +impl Marlin { + /// MSM for `commitments` and `coeffs` + fn combine_commitments<'a>( + coeffs_and_comms: impl IntoIterator)>, + ) -> (E::G1Projective, Option) { + let mut combined_comm = E::G1Projective::zero(); + let mut combined_shifted_comm = None; + for (coeff, comm) in coeffs_and_comms { + if coeff.is_one() { + combined_comm.add_assign_mixed(&comm.comm.0); + } else { + combined_comm += &comm.comm.0.scalar_mul(coeff); + } + + if let Some(shifted_comm) = &comm.shifted_comm { + let cur = shifted_comm.0.scalar_mul(coeff); + combined_shifted_comm = Some(combined_shifted_comm.map_or(cur, |c| c + cur)); + } + } + (combined_comm, combined_shifted_comm) + } + + /// Normalize a list of commitments + fn normalize_commitments<'a>( + commitments: Vec<(E::G1Projective, Option)>, + ) -> Vec> { + let mut comms = Vec::with_capacity(commitments.len()); + let mut s_comms = Vec::with_capacity(commitments.len()); + let mut s_flags = Vec::with_capacity(commitments.len()); + for (comm, s_comm) in commitments { + comms.push(comm); + if let Some(c) = s_comm { + s_comms.push(c); + s_flags.push(true); + } else { + s_comms.push(E::G1Projective::zero()); + s_flags.push(false); + } + } + let comms = E::G1Projective::batch_normalization_into_affine(&comms); + let s_comms = E::G1Projective::batch_normalization_into_affine(&mut s_comms); + comms + .into_iter() + .zip(s_comms) + .zip(s_flags) + .map(|((c, s_c), flag)| { + let shifted_comm = if flag { + Some(kzg10::Commitment(s_c)) + } else { + None + }; + marlin_pc::Commitment { + comm: kzg10::Commitment(c), + shifted_comm, + } + }) + .collect() + } + + /// Accumulate `commitments` and `values` according to `opening_challenge`. + fn accumulate_commitments_and_values_individual_opening_challenges<'a>( + commitments: impl IntoIterator>>, + values: impl IntoIterator, + opening_challenges: &dyn Fn(u64) -> E::Fr, + vk: Option<&marlin_pc::VerifierKey>, + ) -> Result<(E::G1Projective, E::Fr), Error> { + let acc_time = start_timer!(|| "Accumulating commitments and values"); + let mut combined_comm = E::G1Projective::zero(); + let mut combined_value = E::Fr::zero(); + let mut opening_challenge_counter = 0; + for (labeled_commitment, value) in commitments.into_iter().zip(values) { + let degree_bound = labeled_commitment.degree_bound(); + let commitment = labeled_commitment.commitment(); + assert_eq!(degree_bound.is_some(), commitment.shifted_comm.is_some()); + + let challenge_i = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + combined_comm += &commitment.comm.0.scalar_mul(challenge_i); + combined_value += &(value * &challenge_i); + + if let Some(degree_bound) = degree_bound { + let challenge_i_1 = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + let shifted_comm = commitment + .shifted_comm + .as_ref() + .unwrap() + .0 + .into_projective(); + + let shift_power = vk + .unwrap() + .get_shift_power(degree_bound) + .ok_or(Error::UnsupportedDegreeBound(degree_bound))?; + + let mut adjusted_comm = shifted_comm - &shift_power.scalar_mul(value); + + adjusted_comm *= challenge_i_1; + combined_comm += &adjusted_comm; + } + } + + end_timer!(acc_time); + Ok((combined_comm, combined_value)) + } + + /// Combine and normalize a set of commitments + fn combine_and_normalize<'a, D: Clone + Ord + Sync>( + commitments: impl IntoIterator>>, + query_set: &QuerySet, + evaluations: &Evaluations, + opening_challenges: &dyn Fn(u64) -> E::Fr, + vk: Option<&marlin_pc::VerifierKey>, + ) -> Result<(Vec>, Vec, Vec), Error> + where + marlin_pc::Commitment: 'a, + { + let commitments: BTreeMap<_, _> = commitments.into_iter().map(|c| (c.label(), c)).collect(); + let mut query_to_labels_map = BTreeMap::new(); + + for (label, (point_label, point)) in query_set.iter() { + let labels = query_to_labels_map + .entry(point_label) + .or_insert((point, BTreeSet::new())); + labels.1.insert(label); + } + + let mut combined_comms = Vec::new(); + let mut combined_queries = Vec::new(); + let mut combined_evals = Vec::new(); + for (_, (point, labels)) in query_to_labels_map.into_iter() { + let lc_time = + start_timer!(|| format!("Randomly combining {} commitments", labels.len())); + let mut comms_to_combine: Vec<&'_ LabeledCommitment<_>> = Vec::new(); + let mut values_to_combine = Vec::new(); + for label in labels.into_iter() { + let commitment = commitments.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + let degree_bound = commitment.degree_bound(); + assert_eq!( + degree_bound.is_some(), + commitment.commitment().shifted_comm.is_some() + ); + + let v_i = evaluations.get(&(label.clone(), point.clone())).ok_or( + Error::MissingEvaluation { + label: label.to_string(), + }, + )?; + + comms_to_combine.push(commitment); + values_to_combine.push(*v_i); + } + + let (c, v) = Marlin::accumulate_commitments_and_values_individual_opening_challenges( + comms_to_combine, + values_to_combine, + opening_challenges, + vk, + )?; + end_timer!(lc_time); + + combined_comms.push(c); + combined_queries.push(point.clone()); + combined_evals.push(v); + } + let norm_time = start_timer!(|| "Normalizing combined commitments"); + E::G1Projective::batch_normalization(&mut combined_comms); + let combined_comms = combined_comms + .into_iter() + .map(|c| kzg10::Commitment(c.into())) + .collect::>(); + end_timer!(norm_time); + Ok((combined_comms, combined_queries, combined_evals)) + } + + /// On input a list of polynomials, linear combinations of those polynomials, + /// and a query set, `open_combination` outputs a proof of evaluation of + /// the combinations at the points in the query set. + fn open_combinations_individual_opening_challenges<'a, P, D, PC>( + ck: &PC::CommitterKey, + lc_s: impl IntoIterator>, + polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result, Error> + where + P: 'a + Polynomial, + D: Debug + Clone + Hash + Ord + Sync, + PC: PolynomialCommitment< + E::Fr, + P, + Commitment = marlin_pc::Commitment, + PreparedCommitment = marlin_pc::PreparedCommitment, + Error = Error, + >, + PC::Randomness: 'a + AddAssign<(E::Fr, &'a PC::Randomness)>, + PC::Commitment: 'a, + { + let label_map = polynomials + .into_iter() + .zip(rands) + .zip(commitments) + .map(|((p, r), c)| (p.label(), (p, r, c))) + .collect::>(); + + let mut lc_polynomials = Vec::new(); + let mut lc_randomness = Vec::new(); + let mut lc_commitments = Vec::new(); + let mut lc_info = Vec::new(); + + for lc in lc_s { + let lc_label = lc.label().clone(); + let mut poly = P::zero(); + let mut degree_bound = None; + let mut hiding_bound = None; + + let mut randomness = PC::Randomness::empty(); + let mut coeffs_and_comms = Vec::new(); + + let num_polys = lc.len(); + for (coeff, label) in lc.iter().filter(|(_, l)| !l.is_one()) { + let label: &String = label.try_into().expect("cannot be one!"); + let &(cur_poly, cur_rand, cur_comm) = + label_map.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + if num_polys == 1 && cur_poly.degree_bound().is_some() { + assert!( + coeff.is_one(), + "Coefficient must be one for degree-bounded equations" + ); + degree_bound = cur_poly.degree_bound(); + } else if cur_poly.degree_bound().is_some() { + return Err(Error::EquationHasDegreeBounds(lc_label)); + } + // Some(_) > None, always. + hiding_bound = core::cmp::max(hiding_bound, cur_poly.hiding_bound()); + poly += (*coeff, cur_poly.polynomial()); + randomness += (*coeff, cur_rand); + coeffs_and_comms.push((*coeff, cur_comm.commitment())); + } + + let lc_poly = + LabeledPolynomial::new(lc_label.clone(), poly, degree_bound, hiding_bound); + lc_polynomials.push(lc_poly); + lc_randomness.push(randomness); + lc_commitments.push(Marlin::combine_commitments(coeffs_and_comms)); + lc_info.push((lc_label, degree_bound)); + } + + let comms = Marlin::normalize_commitments(lc_commitments); + let lc_commitments = lc_info + .into_iter() + .zip(comms) + .map(|((label, d), c)| LabeledCommitment::new(label, c, d)) + .collect::>(); + + let proof = PC::batch_open_individual_opening_challenges( + ck, + lc_polynomials.iter(), + lc_commitments.iter(), + &query_set, + opening_challenges, + lc_randomness.iter(), + rng, + )?; + + Ok(BatchLCProof { proof, evals: None }) + } + + fn check_combinations_individual_opening_challenges<'a, R, P, D, PC>( + vk: &PC::VerifierKey, + lc_s: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + evaluations: &Evaluations, + proof: &BatchLCProof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rng: &mut R, + ) -> Result + where + R: RngCore, + P: Polynomial, + D: Debug + Clone + Hash + Ord + Sync, + PC: PolynomialCommitment< + E::Fr, + P, + Commitment = marlin_pc::Commitment, + PreparedCommitment = marlin_pc::PreparedCommitment, + Error = Error, + >, + PC::Commitment: 'a, + { + let BatchLCProof { proof, .. } = proof; + let label_comm_map = commitments + .into_iter() + .map(|c| (c.label(), c)) + .collect::>(); + + let mut lc_commitments = Vec::new(); + let mut lc_info = Vec::new(); + let mut evaluations = evaluations.clone(); + + let lc_processing_time = start_timer!(|| "Combining commitments"); + for lc in lc_s { + let lc_label = lc.label().clone(); + let num_polys = lc.len(); + + let mut degree_bound = None; + let mut coeffs_and_comms = Vec::new(); + + for (coeff, label) in lc.iter() { + if label.is_one() { + for (&(ref label, _), ref mut eval) in evaluations.iter_mut() { + if label == &lc_label { + **eval -= coeff; + } + } + } else { + let label: &String = label.try_into().unwrap(); + let &cur_comm = label_comm_map.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + if num_polys == 1 && cur_comm.degree_bound().is_some() { + assert!( + coeff.is_one(), + "Coefficient must be one for degree-bounded equations" + ); + degree_bound = cur_comm.degree_bound(); + } else if cur_comm.degree_bound().is_some() { + return Err(Error::EquationHasDegreeBounds(lc_label)); + } + coeffs_and_comms.push((*coeff, cur_comm.commitment())); + } + } + let lc_time = + start_timer!(|| format!("Combining {} commitments for {}", num_polys, lc_label)); + lc_commitments.push(Marlin::combine_commitments(coeffs_and_comms)); + end_timer!(lc_time); + lc_info.push((lc_label, degree_bound)); + } + end_timer!(lc_processing_time); + let combined_comms_norm_time = start_timer!(|| "Normalizing commitments"); + let comms = Marlin::normalize_commitments(lc_commitments); + let lc_commitments = lc_info + .into_iter() + .zip(comms) + .map(|((label, d), c)| LabeledCommitment::new(label, c, d)) + .collect::>(); + end_timer!(combined_comms_norm_time); + + PC::batch_check_individual_opening_challenges( + vk, + &lc_commitments, + &query_set, + &evaluations, + proof, + opening_challenges, + rng, + ) + } +} diff --git a/arkworks/poly-commit/src/multilinear_pc/data_structures.rs b/arkworks/poly-commit/src/multilinear_pc/data_structures.rs new file mode 100644 index 00000000..e52ef66b --- /dev/null +++ b/arkworks/poly-commit/src/multilinear_pc/data_structures.rs @@ -0,0 +1,70 @@ +use ark_ec::PairingEngine; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_std::vec::Vec; +#[allow(type_alias_bounds)] +/// Evaluations over {0,1}^n for G1 +pub type EvaluationHyperCubeOnG1 = Vec; +#[allow(type_alias_bounds)] +/// Evaluations over {0,1}^n for G2 +pub type EvaluationHyperCubeOnG2 = Vec; + +/// Public Parameter used by prover +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] +pub struct UniversalParams { + /// number of variables + pub num_vars: usize, + /// `pp_{num_vars}`, `pp_{num_vars - 1}`, `pp_{num_vars - 2}`, ..., defined by XZZPD19 + pub powers_of_g: Vec>, + /// `pp_{num_vars}`, `pp_{num_vars - 1}`, `pp_{num_vars - 2}`, ..., defined by XZZPD19 + pub powers_of_h: Vec>, + /// generator for G1 + pub g: E::G1Affine, + /// generator for G2 + pub h: E::G2Affine, + /// g^randomness + pub g_mask: Vec, +} + +/// Public Parameter used by prover +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] +pub struct CommitterKey { + /// number of variables + pub nv: usize, + /// pp_k defined by libra + pub powers_of_g: Vec>, + /// pp_h defined by libra + pub powers_of_h: Vec>, + /// generator for G1 + pub g: E::G1Affine, + /// generator for G2 + pub h: E::G2Affine, +} + +/// Public Parameter used by prover +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] +pub struct VerifierKey { + /// number of variables + pub nv: usize, + /// generator of G1 + pub g: E::G1Affine, + /// generator of G2 + pub h: E::G2Affine, + /// g^t1, g^t2, ... + pub g_mask_random: Vec, +} + +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] +/// commitment +pub struct Commitment { + /// number of variables + pub nv: usize, + /// product of g as described by the vRAM paper + pub g_product: E::G1Affine, +} + +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] +/// proof of opening +pub struct Proof { + /// Evaluation of quotients + pub proofs: Vec, +} diff --git a/arkworks/poly-commit/src/multilinear_pc/mod.rs b/arkworks/poly-commit/src/multilinear_pc/mod.rs new file mode 100644 index 00000000..bc25406d --- /dev/null +++ b/arkworks/poly-commit/src/multilinear_pc/mod.rs @@ -0,0 +1,341 @@ +use crate::multilinear_pc::data_structures::{ + Commitment, CommitterKey, Proof, UniversalParams, VerifierKey, +}; +use ark_ec::msm::{FixedBaseMSM, VariableBaseMSM}; +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{Field, PrimeField}; +use ark_ff::{One, Zero}; +use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; +use ark_std::collections::LinkedList; +use ark_std::iter::FromIterator; +use ark_std::marker::PhantomData; +use ark_std::rand::RngCore; +use ark_std::vec::Vec; +use ark_std::UniformRand; + +/// data structures used by multilinear extension commitment scheme +pub mod data_structures; + +/// Polynomial Commitment Scheme on multilinear extensions. +pub struct MultilinearPC { + _engine: PhantomData, +} + +impl MultilinearPC { + /// setup + pub fn setup(num_vars: usize, rng: &mut R) -> UniversalParams { + assert!(num_vars > 0, "constant polynomial not supported"); + let g: E::G1Projective = E::G1Projective::rand(rng); + let h: E::G2Projective = E::G2Projective::rand(rng); + let g = g.into_affine(); + let h = h.into_affine(); + let mut powers_of_g = Vec::new(); + let mut powers_of_h = Vec::new(); + let t: Vec<_> = (0..num_vars).map(|_| E::Fr::rand(rng)).collect(); + let scalar_bits = E::Fr::size_in_bits(); + + let mut eq: LinkedList> = + LinkedList::from_iter(eq_extension(&t).into_iter()); + let mut eq_arr = LinkedList::new(); + let mut base = eq.pop_back().unwrap().evaluations; + + for i in (0..num_vars).rev() { + eq_arr.push_front(remove_dummy_variable(&base, i)); + if i != 0 { + let mul = eq.pop_back().unwrap().evaluations; + base = base + .into_iter() + .zip(mul.into_iter()) + .map(|(a, b)| a * &b) + .collect(); + } + } + + let mut pp_powers = Vec::new(); + let mut total_scalars = 0; + for i in 0..num_vars { + let eq = eq_arr.pop_front().unwrap(); + let pp_k_powers = (0..(1 << (num_vars - i))).map(|x| eq[x]); + pp_powers.extend(pp_k_powers); + total_scalars += 1 << (num_vars - i); + } + let window_size = FixedBaseMSM::get_mul_window_size(total_scalars); + let g_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, g.into_projective()); + let h_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, h.into_projective()); + + let pp_g = E::G1Projective::batch_normalization_into_affine( + &FixedBaseMSM::multi_scalar_mul(scalar_bits, window_size, &g_table, &pp_powers), + ); + let pp_h = E::G2Projective::batch_normalization_into_affine( + &FixedBaseMSM::multi_scalar_mul(scalar_bits, window_size, &h_table, &pp_powers), + ); + let mut start = 0; + for i in 0..num_vars { + let size = 1 << (num_vars - i); + let pp_k_g = (&pp_g[start..(start + size)]).to_vec(); + let pp_k_h = (&pp_h[start..(start + size)]).to_vec(); + powers_of_g.push(pp_k_g); + powers_of_h.push(pp_k_h); + start += size; + } + + // uncomment to measure the time for calculating vp + // let vp_generation_timer = start_timer!(|| "VP generation"); + let g_mask = { + let window_size = FixedBaseMSM::get_mul_window_size(num_vars); + let g_table = + FixedBaseMSM::get_window_table(scalar_bits, window_size, g.into_projective()); + E::G1Projective::batch_normalization_into_affine(&FixedBaseMSM::multi_scalar_mul( + scalar_bits, + window_size, + &g_table, + &t, + )) + }; + // end_timer!(vp_generation_timer); + + UniversalParams { + num_vars, + g, + g_mask, + h, + powers_of_g, + powers_of_h, + } + } + + /// Trim the universal parameters to specialize the public parameters + /// for multilinear polynomials to the given `supported_num_vars`, and returns committer key and verifier key. + /// `supported_num_vars` should be in range `1..=params.num_vars` + pub fn trim( + params: &UniversalParams, + supported_num_vars: usize, + ) -> (CommitterKey, VerifierKey) { + assert!(supported_num_vars <= params.num_vars); + let to_reduce = params.num_vars - supported_num_vars; + let ck = CommitterKey { + powers_of_h: (¶ms.powers_of_h[to_reduce..]).to_vec(), + powers_of_g: (¶ms.powers_of_g[to_reduce..]).to_vec(), + g: params.g, + h: params.h, + nv: supported_num_vars, + }; + let vk = VerifierKey { + nv: supported_num_vars, + g: params.g, + h: params.h, + g_mask_random: (¶ms.g_mask[to_reduce..]).to_vec(), + }; + (ck, vk) + } + + /// commit + pub fn commit( + ck: &CommitterKey, + polynomial: &impl MultilinearExtension, + ) -> Commitment { + let nv = polynomial.num_vars(); + let scalars: Vec<_> = polynomial + .to_evaluations() + .into_iter() + .map(|x| x.into_repr()) + .collect(); + let g_product = + VariableBaseMSM::multi_scalar_mul(&ck.powers_of_g[0], scalars.as_slice()).into_affine(); + Commitment { nv, g_product } + } + + /// On input a polynomial `p` and a point `point`, outputs a proof for the same. + pub fn open( + ck: &CommitterKey, + polynomial: &impl MultilinearExtension, + point: &[E::Fr], + ) -> Proof { + assert_eq!(polynomial.num_vars(), ck.nv, "Invalid size of polynomial"); + let nv = polynomial.num_vars(); + let mut r: Vec> = (0..nv + 1).map(|_| Vec::new()).collect(); + let mut q: Vec> = (0..nv + 1).map(|_| Vec::new()).collect(); + + r[nv] = polynomial.to_evaluations(); + + let mut proofs = Vec::new(); + for i in 0..nv { + let k = nv - i; + let point_at_k = point[i]; + q[k] = (0..(1 << (k - 1))).map(|_| E::Fr::zero()).collect(); + r[k - 1] = (0..(1 << (k - 1))).map(|_| E::Fr::zero()).collect(); + for b in 0..(1 << (k - 1)) { + q[k][b] = r[k][(b << 1) + 1] - &r[k][b << 1]; + r[k - 1][b] = r[k][b << 1] * &(E::Fr::one() - &point_at_k) + + &(r[k][(b << 1) + 1] * &point_at_k); + } + let scalars: Vec<_> = (0..(1 << k)) + .map(|x| q[k][x >> 1].into_repr()) // fine + .collect(); + + let pi_h = + VariableBaseMSM::multi_scalar_mul(&ck.powers_of_h[i], &scalars).into_affine(); // no need to move outside and partition + proofs.push(pi_h); + } + + Proof { proofs } + } + + /// Verifies that `value` is the evaluation at `x` of the polynomial + /// committed inside `comm`. + pub fn check<'a>( + vk: &VerifierKey, + commitment: &Commitment, + point: &[E::Fr], + value: E::Fr, + proof: &Proof, + ) -> bool { + let left = E::pairing( + commitment.g_product.into_projective() - &vk.g.scalar_mul(value), + vk.h, + ); + + let scalar_size = E::Fr::size_in_bits(); + let window_size = FixedBaseMSM::get_mul_window_size(vk.nv); + + let g_table = + FixedBaseMSM::get_window_table(scalar_size, window_size, vk.g.into_projective()); + let g_mul: Vec = + FixedBaseMSM::multi_scalar_mul(scalar_size, window_size, &g_table, point); + + let pairing_lefts: Vec<_> = (0..vk.nv) + .map(|i| vk.g_mask_random[i].into_projective() - &g_mul[i]) + .collect(); + let pairing_lefts: Vec = + E::G1Projective::batch_normalization_into_affine(&pairing_lefts); + let pairing_lefts: Vec = pairing_lefts + .into_iter() + .map(|x| E::G1Prepared::from(x)) + .collect(); + + let pairing_rights: Vec = proof + .proofs + .iter() + .map(|x| E::G2Prepared::from(*x)) + .collect(); + + let pairings: Vec<_> = pairing_lefts + .into_iter() + .zip(pairing_rights.into_iter()) + .collect(); + let right = E::product_of_pairings(pairings.iter()); + left == right + } +} + +/// fix first `pad` variables of `poly` represented in evaluation form to zero +fn remove_dummy_variable(poly: &[F], pad: usize) -> Vec { + if pad == 0 { + return poly.to_vec(); + } + if !poly.len().is_power_of_two() { + panic!("Size of polynomial should be power of two. ") + } + let nv = ark_std::log2(poly.len()) as usize - pad; + let table: Vec<_> = (0..(1 << nv)).map(|x| poly[x << pad]).collect(); + table +} + +/// generate eq(t,x), a product of multilinear polynomials with fixed t. +/// eq(a,b) is takes extensions of a,b in {0,1}^num_vars such that if a and b in {0,1}^num_vars are equal +/// then this polynomial evaluates to 1. +fn eq_extension(t: &[F]) -> Vec> { + let dim = t.len(); + let mut result = Vec::new(); + for i in 0..dim { + let mut poly = Vec::with_capacity(1 << dim); + for x in 0..(1 << dim) { + let xi = if x >> i & 1 == 1 { F::one() } else { F::zero() }; + let ti = t[i]; + let ti_xi = ti * xi; + poly.push(ti_xi + ti_xi - xi - ti + F::one()); + } + result.push(DenseMultilinearExtension::from_evaluations_vec(dim, poly)); + } + + result +} + +#[cfg(test)] +mod tests { + use crate::multilinear_pc::data_structures::UniversalParams; + use crate::multilinear_pc::MultilinearPC; + use ark_bls12_381::Bls12_381; + use ark_ec::PairingEngine; + use ark_poly::{DenseMultilinearExtension, MultilinearExtension, SparseMultilinearExtension}; + use ark_std::rand::RngCore; + use ark_std::vec::Vec; + use ark_std::{test_rng, UniformRand}; + type E = Bls12_381; + type Fr = ::Fr; + + fn test_polynomial( + uni_params: &UniversalParams, + poly: &impl MultilinearExtension, + rng: &mut R, + ) { + let nv = poly.num_vars(); + assert_ne!(nv, 0); + let (ck, vk) = MultilinearPC::::trim(&uni_params, nv); + let point: Vec<_> = (0..nv).map(|_| Fr::rand(rng)).collect(); + let com = MultilinearPC::commit(&ck, poly); + let proof = MultilinearPC::open(&ck, poly, &point); + + let value = poly.evaluate(&point).unwrap(); + let result = MultilinearPC::check(&vk, &com, &point, value, &proof); + assert!(result); + } + + #[test] + fn setup_commit_verify_correct_polynomials() { + let mut rng = test_rng(); + + // normal polynomials + let uni_params = MultilinearPC::setup(10, &mut rng); + + let poly1 = DenseMultilinearExtension::rand(8, &mut rng); + test_polynomial(&uni_params, &poly1, &mut rng); + + let poly2 = SparseMultilinearExtension::rand_with_config(9, 1 << 5, &mut rng); + test_polynomial(&uni_params, &poly2, &mut rng); + + // single-variate polynomials + + let poly3 = DenseMultilinearExtension::rand(1, &mut rng); + test_polynomial(&uni_params, &poly3, &mut rng); + + let poly4 = SparseMultilinearExtension::rand_with_config(1, 1 << 1, &mut rng); + test_polynomial(&uni_params, &poly4, &mut rng); + } + + #[test] + #[should_panic] + fn setup_commit_verify_constant_polynomial() { + let mut rng = test_rng(); + + // normal polynomials + MultilinearPC::::setup(0, &mut rng); + } + + #[test] + fn setup_commit_verify_incorrect_polynomial_should_return_false() { + let mut rng = test_rng(); + let nv = 8; + let uni_params = MultilinearPC::setup(nv, &mut rng); + let poly = DenseMultilinearExtension::rand(nv, &mut rng); + let nv = uni_params.num_vars; + let (ck, vk) = MultilinearPC::::trim(&uni_params, nv); + let point: Vec<_> = (0..nv).map(|_| Fr::rand(&mut rng)).collect(); + let com = MultilinearPC::commit(&ck, &poly); + let proof = MultilinearPC::open(&ck, &poly, &point); + + let value = poly.evaluate(&point).unwrap(); + let result = MultilinearPC::check(&vk, &com, &point, value + &(1u16.into()), &proof); + assert!(!result); + } +} diff --git a/arkworks/poly-commit/src/optional_rng.rs b/arkworks/poly-commit/src/optional_rng.rs new file mode 100644 index 00000000..44c4fe1c --- /dev/null +++ b/arkworks/poly-commit/src/optional_rng.rs @@ -0,0 +1,52 @@ +use ark_std::rand::RngCore; +use core::num::NonZeroU32; + +/// `OptionalRng` is a hack that is necessary because `Option<&mut R>` is not implicitly reborrowed +/// like `&mut R` is. This causes problems when a variable of type `Option<&mut R>` +/// is moved (eg, in a loop). +/// +/// To overcome this, we define the wrapper `OptionalRng` here that can be borrowed +/// mutably, without fear of being moved. +pub struct OptionalRng(pub Option); + +impl RngCore for OptionalRng { + #[inline] + fn next_u32(&mut self) -> u32 { + (&mut self.0) + .as_mut() + .map(|r| r.next_u32()) + .expect("Rng was invoked in a non-hiding context") + } + + #[inline] + fn next_u64(&mut self) -> u64 { + (&mut self.0) + .as_mut() + .map(|r| r.next_u64()) + .expect("Rng was invoked in a non-hiding context") + } + + #[inline] + fn fill_bytes(&mut self, dest: &mut [u8]) { + (&mut self.0) + .as_mut() + .map(|r| r.fill_bytes(dest)) + .expect("Rng was invoked in a non-hiding context") + } + + #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), ark_std::rand::Error> { + match &mut self.0 { + Some(r) => r.try_fill_bytes(dest), + None => Err(NonZeroU32::new(ark_std::rand::Error::CUSTOM_START) + .unwrap() + .into()), + } + } +} + +impl From for OptionalRng { + fn from(other: R) -> Self { + Self(Some(other)) + } +} diff --git a/arkworks/poly-commit/src/sonic_pc/data_structures.rs b/arkworks/poly-commit/src/sonic_pc/data_structures.rs new file mode 100644 index 00000000..07a9b777 --- /dev/null +++ b/arkworks/poly-commit/src/sonic_pc/data_structures.rs @@ -0,0 +1,337 @@ +use crate::kzg10; +use crate::{ + BTreeMap, PCCommitterKey, PCPreparedCommitment, PCPreparedVerifierKey, PCVerifierKey, Vec, +}; +use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::io::{Read, Write}; + +/// `UniversalParams` are the universal parameters for the KZG10 scheme. +pub type UniversalParams = kzg10::UniversalParams; + +/// `Randomness` is the randomness for the KZG10 scheme. +pub type Randomness = kzg10::Randomness; + +/// `Commitment` is the commitment for the KZG10 scheme. +pub type Commitment = kzg10::Commitment; + +/// `PreparedCommitment` is the prepared commitment for the KZG10 scheme. +pub type PreparedCommitment = kzg10::PreparedCommitment; + +impl PCPreparedCommitment> for PreparedCommitment { + /// prepare `PreparedCommitment` from `Commitment` + fn prepare(comm: &Commitment) -> Self { + let mut prepared_comm = Vec::::new(); + let mut cur = E::G1Projective::from(comm.0.clone()); + for _ in 0..128 { + prepared_comm.push(cur.clone().into()); + cur.double_in_place(); + } + + Self { 0: prepared_comm } + } +} + +/// `ComitterKey` is used to commit to, and create evaluation proofs for, a given +/// polynomial. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = "") +)] +pub struct CommitterKey { + /// The key used to commit to polynomials. + pub powers_of_g: Vec, + + /// The key used to commit to hiding polynomials. + pub powers_of_gamma_g: Vec, + + /// The powers used to commit to shifted polynomials. + /// This is `None` if `self` does not support enforcing any degree bounds. + pub shifted_powers_of_g: Option>, + + /// The powers used to commit to shifted hiding polynomials. + /// This is `None` if `self` does not support enforcing any degree bounds. + pub shifted_powers_of_gamma_g: Option>>, + + /// The degree bounds that are supported by `self`. + /// Sorted in ascending order from smallest bound to largest bound. + /// This is `None` if `self` does not support enforcing any degree bounds. + pub enforced_degree_bounds: Option>, + + /// The maximum degree supported by the `UniversalParams` from which `self` was derived + pub max_degree: usize, +} + +impl CommitterKey { + /// Obtain powers for the underlying KZG10 construction + pub fn powers(&self) -> kzg10::Powers { + kzg10::Powers { + powers_of_g: self.powers_of_g.as_slice().into(), + powers_of_gamma_g: self.powers_of_gamma_g.as_slice().into(), + } + } + + /// Obtain powers for committing to shifted polynomials. + pub fn shifted_powers( + &self, + degree_bound: impl Into>, + ) -> Option> { + match (&self.shifted_powers_of_g, &self.shifted_powers_of_gamma_g) { + (Some(shifted_powers_of_g), Some(shifted_powers_of_gamma_g)) => { + let max_bound = self + .enforced_degree_bounds + .as_ref() + .unwrap() + .last() + .unwrap(); + let (bound, powers_range) = if let Some(degree_bound) = degree_bound.into() { + assert!(self + .enforced_degree_bounds + .as_ref() + .unwrap() + .contains(°ree_bound)); + (degree_bound, (max_bound - degree_bound)..) + } else { + (*max_bound, 0..) + }; + + let ck = kzg10::Powers { + powers_of_g: shifted_powers_of_g[powers_range.clone()].into(), + powers_of_gamma_g: shifted_powers_of_gamma_g[&bound].clone().into(), + }; + + Some(ck) + } + + (_, _) => None, + } + } +} + +impl PCCommitterKey for CommitterKey { + fn max_degree(&self) -> usize { + self.max_degree + } + + fn supported_degree(&self) -> usize { + self.powers_of_g.len() - 1 + } +} + +/// `VerifierKey` is used to check evaluation proofs for a given commitment. +#[derive(Derivative)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct VerifierKey { + /// The generator of G1. + pub g: E::G1Affine, + + /// The generator of G1 that is used for making a commitment hiding. + pub gamma_g: E::G1Affine, + + /// The generator of G2. + pub h: E::G2Affine, + + /// \beta times the generator of G2. + pub beta_h: E::G2Affine, + + /// The generator of G2, prepared for use in pairings. + pub prepared_h: E::G2Prepared, + + /// The \beta times the generator of G2, prepared for use in pairings. + pub prepared_beta_h: E::G2Prepared, + + /// Pairs a degree_bound with its corresponding G2 element, which has been prepared for use in pairings. + /// Each pair is in the form `(degree_bound, \beta^{degree_bound - max_degree} h),` where `h` is the generator of G2 above + pub degree_bounds_and_neg_powers_of_h: Option>, + + /// The maximum degree supported by the trimmed parameters that `self` is + /// a part of. + pub supported_degree: usize, + + /// The maximum degree supported by the `UniversalParams` `self` was derived + /// from. + pub max_degree: usize, +} + +impl VerifierKey { + /// Find the appropriate shift for the degree bound. + pub fn get_shift_power(&self, degree_bound: usize) -> Option { + self.degree_bounds_and_neg_powers_of_h + .as_ref() + .and_then(|v| { + v.binary_search_by(|(d, _)| d.cmp(°ree_bound)) + .ok() + .map(|i| v[i].1.clone().into()) + }) + } +} + +impl CanonicalSerialize for VerifierKey { + fn serialize(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize(&mut writer)?; + self.gamma_g.serialize(&mut writer)?; + self.h.serialize(&mut writer)?; + self.beta_h.serialize(&mut writer)?; + self.degree_bounds_and_neg_powers_of_h + .serialize(&mut writer)?; + self.supported_degree.serialize(&mut writer)?; + self.max_degree.serialize(&mut writer) + } + + fn serialized_size(&self) -> usize { + self.g.serialized_size() + + self.gamma_g.serialized_size() + + self.h.serialized_size() + + self.beta_h.serialized_size() + + self.degree_bounds_and_neg_powers_of_h.serialized_size() + + self.supported_degree.serialized_size() + + self.max_degree.serialized_size() + } + + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize_uncompressed(&mut writer)?; + self.gamma_g.serialize_uncompressed(&mut writer)?; + self.h.serialize_uncompressed(&mut writer)?; + self.beta_h.serialize_uncompressed(&mut writer)?; + self.degree_bounds_and_neg_powers_of_h + .serialize_uncompressed(&mut writer)?; + self.supported_degree.serialize_uncompressed(&mut writer)?; + self.max_degree.serialize_uncompressed(&mut writer) + } + + fn serialize_unchecked(&self, mut writer: W) -> Result<(), SerializationError> { + self.g.serialize_unchecked(&mut writer)?; + self.gamma_g.serialize_unchecked(&mut writer)?; + self.h.serialize_unchecked(&mut writer)?; + self.beta_h.serialize_unchecked(&mut writer)?; + self.degree_bounds_and_neg_powers_of_h + .serialize_unchecked(&mut writer)?; + self.supported_degree.serialize_unchecked(&mut writer)?; + self.max_degree.serialize_unchecked(&mut writer) + } + + fn uncompressed_size(&self) -> usize { + self.g.uncompressed_size() + + self.gamma_g.uncompressed_size() + + self.h.uncompressed_size() + + self.beta_h.uncompressed_size() + + self.degree_bounds_and_neg_powers_of_h.uncompressed_size() + + self.supported_degree.uncompressed_size() + + self.max_degree.uncompressed_size() + } +} + +impl CanonicalDeserialize for VerifierKey { + fn deserialize(mut reader: R) -> Result { + let g = E::G1Affine::deserialize(&mut reader)?; + let gamma_g = E::G1Affine::deserialize(&mut reader)?; + let h = E::G2Affine::deserialize(&mut reader)?; + let beta_h = E::G2Affine::deserialize(&mut reader)?; + let degree_bounds_and_neg_powers_of_h = + Option::>::deserialize(&mut reader)?; + let supported_degree = usize::deserialize(&mut reader)?; + let max_degree = usize::deserialize(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + degree_bounds_and_neg_powers_of_h, + supported_degree, + max_degree, + }) + } + + fn deserialize_uncompressed(mut reader: R) -> Result { + let g = E::G1Affine::deserialize_uncompressed(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_uncompressed(&mut reader)?; + let h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + let beta_h = E::G2Affine::deserialize_uncompressed(&mut reader)?; + let degree_bounds_and_neg_powers_of_h = + Option::>::deserialize_uncompressed(&mut reader)?; + let supported_degree = usize::deserialize_uncompressed(&mut reader)?; + let max_degree = usize::deserialize_uncompressed(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + degree_bounds_and_neg_powers_of_h, + supported_degree, + max_degree, + }) + } + + fn deserialize_unchecked(mut reader: R) -> Result { + let g = E::G1Affine::deserialize_unchecked(&mut reader)?; + let gamma_g = E::G1Affine::deserialize_unchecked(&mut reader)?; + let h = E::G2Affine::deserialize_unchecked(&mut reader)?; + let beta_h = E::G2Affine::deserialize_unchecked(&mut reader)?; + let degree_bounds_and_neg_powers_of_h = + Option::>::deserialize_unchecked(&mut reader)?; + let supported_degree = usize::deserialize_unchecked(&mut reader)?; + let max_degree = usize::deserialize_unchecked(&mut reader)?; + + let prepared_h = E::G2Prepared::from(h.clone()); + let prepared_beta_h = E::G2Prepared::from(beta_h.clone()); + + Ok(Self { + g, + gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + degree_bounds_and_neg_powers_of_h, + supported_degree, + max_degree, + }) + } +} + +impl PCVerifierKey for VerifierKey { + fn max_degree(&self) -> usize { + self.max_degree + } + + fn supported_degree(&self) -> usize { + self.supported_degree + } +} + +/// Nothing to do to prepare this verifier key (for now). +pub type PreparedVerifierKey = VerifierKey; + +impl PCPreparedVerifierKey> for PreparedVerifierKey { + /// prepare `PreparedVerifierKey` from `VerifierKey` + fn prepare(vk: &VerifierKey) -> Self { + vk.clone() + } +} + +/// Evaluation proof at a query set. +#[derive(Derivative)] +#[derivative( + Default(bound = ""), + Hash(bound = ""), + Clone(bound = ""), + Debug(bound = ""), + PartialEq(bound = ""), + Eq(bound = "") +)] +pub struct BatchProof(pub(crate) Vec>); diff --git a/arkworks/poly-commit/src/sonic_pc/mod.rs b/arkworks/poly-commit/src/sonic_pc/mod.rs new file mode 100644 index 00000000..cc5087b5 --- /dev/null +++ b/arkworks/poly-commit/src/sonic_pc/mod.rs @@ -0,0 +1,908 @@ +use crate::{kzg10, PCCommitterKey}; +use crate::{BTreeMap, BTreeSet, String, ToString, Vec}; +use crate::{BatchLCProof, Error, Evaluations, QuerySet, UVPolynomial}; +use crate::{LabeledCommitment, LabeledPolynomial, LinearCombination}; +use crate::{PCRandomness, PCUniversalParams, PolynomialCommitment}; + +use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; +use ark_ff::{One, PrimeField, UniformRand, Zero}; +use ark_std::rand::RngCore; +use ark_std::{convert::TryInto, marker::PhantomData, ops::Div, vec}; + +mod data_structures; +pub use data_structures::*; + +/// Polynomial commitment based on [[KZG10]][kzg], with degree enforcement and +/// batching taken from [[MBKM19, “Sonic”]][sonic] (more precisely, their +/// counterparts in [[Gabizon19, “AuroraLight”]][al] that avoid negative G1 powers). +/// The (optional) hiding property of the commitment scheme follows the approach +/// described in [[CHMMVW20, “Marlin”]][marlin]. +/// +/// [kzg]: http://cacr.uwaterloo.ca/techreports/2010/cacr2010-10.pdf +/// [sonic]: https://eprint.iacr.org/2019/099 +/// [al]: https://eprint.iacr.org/2019/601 +/// [marlin]: https://eprint.iacr.org/2019/1047 +pub struct SonicKZG10> { + _engine: PhantomData, + _poly: PhantomData

, +} + +impl> SonicKZG10 { + fn accumulate_elems_individual_opening_challenges<'a>( + combined_comms: &mut BTreeMap, E::G1Projective>, + combined_witness: &mut E::G1Projective, + combined_adjusted_witness: &mut E::G1Projective, + vk: &VerifierKey, + commitments: impl IntoIterator>>, + point: P::Point, + values: impl IntoIterator, + proof: &kzg10::Proof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + randomizer: Option, + ) { + let acc_time = start_timer!(|| "Accumulating elements"); + + let mut opening_challenge_counter = 0; + let mut curr_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + // Keeps track of running combination of values + let mut combined_values = E::Fr::zero(); + + // Iterates through all of the commitments and accumulates common degree_bound elements in a BTreeMap + for (labeled_comm, value) in commitments.into_iter().zip(values) { + combined_values += &(value * &curr_challenge); + + let comm = labeled_comm.commitment(); + let degree_bound = labeled_comm.degree_bound(); + + // Applying opening challenge and randomness (used in batch_checking) + let mut comm_with_challenge: E::G1Projective = comm.0.scalar_mul(curr_challenge); + + if let Some(randomizer) = randomizer { + comm_with_challenge = comm_with_challenge.mul(&randomizer.into_repr()); + } + + // Accumulate values in the BTreeMap + *combined_comms + .entry(degree_bound) + .or_insert(E::G1Projective::zero()) += &comm_with_challenge; + curr_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + } + + // Push expected results into list of elems. Power will be the negative of the expected power + let mut witness: E::G1Projective = proof.w.into_projective(); + let mut adjusted_witness = vk.g.scalar_mul(combined_values) - &proof.w.scalar_mul(point); + if let Some(random_v) = proof.random_v { + adjusted_witness += &vk.gamma_g.scalar_mul(random_v); + } + + if let Some(randomizer) = randomizer { + witness = proof.w.scalar_mul(randomizer); + adjusted_witness = adjusted_witness.mul(&randomizer.into_repr()); + } + + *combined_witness += &witness; + *combined_adjusted_witness += &adjusted_witness; + end_timer!(acc_time); + } + + fn check_elems( + combined_comms: BTreeMap, E::G1Projective>, + combined_witness: E::G1Projective, + combined_adjusted_witness: E::G1Projective, + vk: &VerifierKey, + ) -> Result { + let check_time = start_timer!(|| "Checking elems"); + let mut g1_projective_elems: Vec = Vec::new(); + let mut g2_prepared_elems: Vec = Vec::new(); + + for (degree_bound, comm) in combined_comms.into_iter() { + let shift_power = if let Some(degree_bound) = degree_bound { + vk.get_shift_power(degree_bound) + .ok_or(Error::UnsupportedDegreeBound(degree_bound))? + } else { + vk.prepared_h.clone() + }; + + g1_projective_elems.push(comm); + g2_prepared_elems.push(shift_power); + } + + g1_projective_elems.push(-combined_adjusted_witness); + g2_prepared_elems.push(vk.prepared_h.clone()); + + g1_projective_elems.push(-combined_witness); + g2_prepared_elems.push(vk.prepared_beta_h.clone()); + + let g1_prepared_elems_iter = + E::G1Projective::batch_normalization_into_affine(g1_projective_elems.as_slice()) + .into_iter() + .map(|a| a.into()); + + let g1_g2_prepared: Vec<(E::G1Prepared, E::G2Prepared)> = + g1_prepared_elems_iter.zip(g2_prepared_elems).collect(); + let is_one: bool = E::product_of_pairings(g1_g2_prepared.iter()).is_one(); + end_timer!(check_time); + Ok(is_one) + } +} + +impl PolynomialCommitment for SonicKZG10 +where + E: PairingEngine, + P: UVPolynomial, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + type UniversalParams = UniversalParams; + type CommitterKey = CommitterKey; + type VerifierKey = VerifierKey; + type PreparedVerifierKey = PreparedVerifierKey; + type Commitment = Commitment; + type PreparedCommitment = PreparedCommitment; + type Randomness = Randomness; + type Proof = kzg10::Proof; + type BatchProof = Vec; + type Error = Error; + + fn setup( + max_degree: usize, + _: Option, + rng: &mut R, + ) -> Result { + kzg10::KZG10::::setup(max_degree, true, rng).map_err(Into::into) + } + + fn trim( + pp: &Self::UniversalParams, + supported_degree: usize, + supported_hiding_bound: usize, + enforced_degree_bounds: Option<&[usize]>, + ) -> Result<(Self::CommitterKey, Self::VerifierKey), Self::Error> { + let trim_time = start_timer!(|| "Trimming public parameters"); + let neg_powers_of_h = &pp.neg_powers_of_h; + let max_degree = pp.max_degree(); + if supported_degree > max_degree { + return Err(Error::TrimmingDegreeTooLarge); + } + + let enforced_degree_bounds = enforced_degree_bounds.map(|bounds| { + let mut v = bounds.to_vec(); + v.sort(); + v.dedup(); + v + }); + + let (shifted_powers_of_g, shifted_powers_of_gamma_g, degree_bounds_and_neg_powers_of_h) = + if let Some(enforced_degree_bounds) = enforced_degree_bounds.as_ref() { + if enforced_degree_bounds.is_empty() { + (None, None, None) + } else { + let highest_enforced_degree_bound = *enforced_degree_bounds.last().unwrap(); + if highest_enforced_degree_bound > supported_degree { + return Err(Error::UnsupportedDegreeBound(highest_enforced_degree_bound)); + } + + let lowest_shift_degree = max_degree - highest_enforced_degree_bound; + + let shifted_ck_time = start_timer!(|| format!( + "Constructing `shifted_powers` of size {}", + max_degree - lowest_shift_degree + 1 + )); + + let shifted_powers_of_g = pp.powers_of_g[lowest_shift_degree..].to_vec(); + let mut shifted_powers_of_gamma_g = BTreeMap::new(); + // Also add degree 0. + for degree_bound in enforced_degree_bounds { + let shift_degree = max_degree - degree_bound; + let mut powers_for_degree_bound = vec![]; + for i in 0..=(supported_hiding_bound + 1) { + // We have an additional degree in `powers_of_gamma_g` beyond `powers_of_g`. + if shift_degree + i < max_degree + 2 { + powers_for_degree_bound + .push(pp.powers_of_gamma_g[&(shift_degree + i)]); + } + } + shifted_powers_of_gamma_g.insert(*degree_bound, powers_for_degree_bound); + } + + end_timer!(shifted_ck_time); + + let neg_powers_of_h_time = start_timer!(|| format!( + "Constructing `neg_powers_of_h` of size {}", + enforced_degree_bounds.len() + )); + + let degree_bounds_and_neg_powers_of_h = enforced_degree_bounds + .iter() + .map(|bound| (*bound, neg_powers_of_h[&(max_degree - *bound)].clone())) + .collect(); + + end_timer!(neg_powers_of_h_time); + + ( + Some(shifted_powers_of_g), + Some(shifted_powers_of_gamma_g), + Some(degree_bounds_and_neg_powers_of_h), + ) + } + } else { + (None, None, None) + }; + + let powers_of_g = pp.powers_of_g[..=supported_degree].to_vec(); + let powers_of_gamma_g = (0..=(supported_hiding_bound + 1)) + .map(|i| pp.powers_of_gamma_g[&i]) + .collect(); + + let ck = CommitterKey { + powers_of_g, + powers_of_gamma_g, + shifted_powers_of_g, + shifted_powers_of_gamma_g, + enforced_degree_bounds, + max_degree, + }; + + let g = pp.powers_of_g[0]; + let h = pp.h; + let beta_h = pp.beta_h; + let gamma_g = pp.powers_of_gamma_g[&0]; + let prepared_h = (&pp.prepared_h).clone(); + let prepared_beta_h = (&pp.prepared_beta_h).clone(); + + let vk = VerifierKey { + g, + gamma_g, + h, + beta_h, + prepared_h, + prepared_beta_h, + degree_bounds_and_neg_powers_of_h, + supported_degree, + max_degree, + }; + + end_timer!(trim_time); + Ok((ck, vk)) + } + + /// Outputs a commitment to `polynomial`. + fn commit<'a>( + ck: &Self::CommitterKey, + polynomials: impl IntoIterator>, + rng: Option<&mut dyn RngCore>, + ) -> Result< + ( + Vec>, + Vec, + ), + Self::Error, + > + where + P: 'a, + { + let rng = &mut crate::optional_rng::OptionalRng(rng); + let commit_time = start_timer!(|| "Committing to polynomials"); + let mut labeled_comms: Vec> = Vec::new(); + let mut randomness: Vec = Vec::new(); + + for labeled_polynomial in polynomials { + let enforced_degree_bounds: Option<&[usize]> = ck + .enforced_degree_bounds + .as_ref() + .map(|bounds| bounds.as_slice()); + + kzg10::KZG10::::check_degrees_and_bounds( + ck.supported_degree(), + ck.max_degree, + enforced_degree_bounds, + &labeled_polynomial, + )?; + + let polynomial: &P = labeled_polynomial.polynomial(); + let degree_bound = labeled_polynomial.degree_bound(); + let hiding_bound = labeled_polynomial.hiding_bound(); + let label = labeled_polynomial.label(); + + let commit_time = start_timer!(|| format!( + "Polynomial {} of degree {}, degree bound {:?}, and hiding bound {:?}", + label, + polynomial.degree(), + degree_bound, + hiding_bound, + )); + + let powers = if let Some(degree_bound) = degree_bound { + ck.shifted_powers(degree_bound).unwrap() + } else { + ck.powers() + }; + + let (comm, rand) = kzg10::KZG10::commit(&powers, polynomial, hiding_bound, Some(rng))?; + + labeled_comms.push(LabeledCommitment::new( + label.to_string(), + comm, + degree_bound, + )); + randomness.push(rand); + end_timer!(commit_time); + } + + end_timer!(commit_time); + Ok((labeled_comms, randomness)) + } + + fn open_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + labeled_polynomials: impl IntoIterator>, + _commitments: impl IntoIterator>, + point: &'a P::Point, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Randomness: 'a, + Self::Commitment: 'a, + P: 'a, + { + let mut combined_polynomial = P::zero(); + let mut combined_rand = kzg10::Randomness::empty(); + + let mut opening_challenge_counter = 0; + + let mut curr_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + + for (polynomial, rand) in labeled_polynomials.into_iter().zip(rands) { + let enforced_degree_bounds: Option<&[usize]> = ck + .enforced_degree_bounds + .as_ref() + .map(|bounds| bounds.as_slice()); + + kzg10::KZG10::::check_degrees_and_bounds( + ck.supported_degree(), + ck.max_degree, + enforced_degree_bounds, + &polynomial, + )?; + + combined_polynomial += (curr_challenge, polynomial.polynomial()); + combined_rand += (curr_challenge, rand); + curr_challenge = opening_challenges(opening_challenge_counter); + opening_challenge_counter += 1; + } + + let proof_time = start_timer!(|| "Creating proof for polynomials"); + let proof = kzg10::KZG10::open(&ck.powers(), &combined_polynomial, *point, &combined_rand)?; + end_timer!(proof_time); + + Ok(proof) + } + + fn check_individual_opening_challenges<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + values: impl IntoIterator, + proof: &Self::Proof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + { + let check_time = start_timer!(|| "Checking evaluations"); + let mut combined_comms: BTreeMap, E::G1Projective> = BTreeMap::new(); + let mut combined_witness: E::G1Projective = E::G1Projective::zero(); + let mut combined_adjusted_witness: E::G1Projective = E::G1Projective::zero(); + + Self::accumulate_elems_individual_opening_challenges( + &mut combined_comms, + &mut combined_witness, + &mut combined_adjusted_witness, + vk, + commitments, + *point, + values, + proof, + opening_challenges, + None, + ); + + let res = Self::check_elems( + combined_comms, + combined_witness, + combined_adjusted_witness, + vk, + ); + end_timer!(check_time); + res + } + + fn batch_check_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + query_set: &QuerySet, + values: &Evaluations, + proof: &Self::BatchProof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let commitments: BTreeMap<_, _> = commitments.into_iter().map(|c| (c.label(), c)).collect(); + let mut query_to_labels_map = BTreeMap::new(); + + for (label, (point_label, point)) in query_set.iter() { + let labels = query_to_labels_map + .entry(point_label) + .or_insert((point, BTreeSet::new())); + labels.1.insert(label); + } + + assert_eq!(proof.len(), query_to_labels_map.len()); + + let mut randomizer = E::Fr::one(); + + let mut combined_comms: BTreeMap, E::G1Projective> = BTreeMap::new(); + let mut combined_witness: E::G1Projective = E::G1Projective::zero(); + let mut combined_adjusted_witness: E::G1Projective = E::G1Projective::zero(); + + for ((_point_label, (point, labels)), p) in query_to_labels_map.into_iter().zip(proof) { + let mut comms_to_combine: Vec<&'_ LabeledCommitment<_>> = Vec::new(); + let mut values_to_combine = Vec::new(); + for label in labels.into_iter() { + let commitment = commitments.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + let v_i = values + .get(&(label.clone(), *point)) + .ok_or(Error::MissingEvaluation { + label: label.to_string(), + })?; + + comms_to_combine.push(commitment); + values_to_combine.push(*v_i); + } + + Self::accumulate_elems_individual_opening_challenges( + &mut combined_comms, + &mut combined_witness, + &mut combined_adjusted_witness, + vk, + comms_to_combine.into_iter(), + *point, + values_to_combine.into_iter(), + p, + opening_challenges, + Some(randomizer), + ); + + randomizer = u128::rand(rng).into(); + } + + Self::check_elems( + combined_comms, + combined_witness, + combined_adjusted_witness, + vk, + ) + } + + fn open_combinations_individual_opening_challenges<'a>( + ck: &Self::CommitterKey, + lc_s: impl IntoIterator>, + polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rands: impl IntoIterator, + rng: Option<&mut dyn RngCore>, + ) -> Result, Self::Error> + where + Self::Randomness: 'a, + Self::Commitment: 'a, + P: 'a, + { + let label_map = polynomials + .into_iter() + .zip(rands) + .zip(commitments) + .map(|((p, r), c)| (p.label(), (p, r, c))) + .collect::>(); + + let mut lc_polynomials = Vec::new(); + let mut lc_randomness = Vec::new(); + let mut lc_commitments = Vec::new(); + let mut lc_info = Vec::new(); + + for lc in lc_s { + let lc_label = lc.label().clone(); + let mut poly = P::zero(); + let mut degree_bound = None; + let mut hiding_bound = None; + let mut randomness = Self::Randomness::empty(); + let mut comm = E::G1Projective::zero(); + + let num_polys = lc.len(); + for (coeff, label) in lc.iter().filter(|(_, l)| !l.is_one()) { + let label: &String = label.try_into().expect("cannot be one!"); + let &(cur_poly, cur_rand, curr_comm) = + label_map.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + if num_polys == 1 && cur_poly.degree_bound().is_some() { + assert!( + coeff.is_one(), + "Coefficient must be one for degree-bounded equations" + ); + degree_bound = cur_poly.degree_bound(); + } else if cur_poly.degree_bound().is_some() { + eprintln!("Degree bound when number of equations is non-zero"); + return Err(Self::Error::EquationHasDegreeBounds(lc_label)); + } + + // Some(_) > None, always. + hiding_bound = core::cmp::max(hiding_bound, cur_poly.hiding_bound()); + poly += (*coeff, cur_poly.polynomial()); + randomness += (*coeff, cur_rand); + comm += &curr_comm.commitment().0.scalar_mul(*coeff); + } + + let lc_poly = + LabeledPolynomial::new(lc_label.clone(), poly, degree_bound, hiding_bound); + lc_polynomials.push(lc_poly); + lc_randomness.push(randomness); + lc_commitments.push(comm); + lc_info.push((lc_label, degree_bound)); + } + + let comms: Vec = + E::G1Projective::batch_normalization_into_affine(&lc_commitments) + .into_iter() + .map(|c| kzg10::Commitment::(c)) + .collect(); + + let lc_commitments = lc_info + .into_iter() + .zip(comms) + .map(|((label, d), c)| LabeledCommitment::new(label, c, d)) + .collect::>(); + + let proof = Self::batch_open_individual_opening_challenges( + ck, + lc_polynomials.iter(), + lc_commitments.iter(), + &query_set, + opening_challenges, + lc_randomness.iter(), + rng, + )?; + Ok(BatchLCProof { proof, evals: None }) + } + + /// Checks that `values` are the true evaluations at `query_set` of the polynomials + /// committed in `labeled_commitments`. + fn check_combinations_individual_opening_challenges<'a, R: RngCore>( + vk: &Self::VerifierKey, + lc_s: impl IntoIterator>, + commitments: impl IntoIterator>, + query_set: &QuerySet, + evaluations: &Evaluations, + proof: &BatchLCProof, + opening_challenges: &dyn Fn(u64) -> E::Fr, + rng: &mut R, + ) -> Result + where + Self::Commitment: 'a, + { + let BatchLCProof { proof, .. } = proof; + let label_comm_map = commitments + .into_iter() + .map(|c| (c.label(), c)) + .collect::>(); + + let mut lc_commitments = Vec::new(); + let mut lc_info = Vec::new(); + let mut evaluations = evaluations.clone(); + for lc in lc_s { + let lc_label = lc.label().clone(); + let num_polys = lc.len(); + + let mut degree_bound = None; + let mut combined_comm = E::G1Projective::zero(); + + for (coeff, label) in lc.iter() { + if label.is_one() { + for (&(ref label, _), ref mut eval) in evaluations.iter_mut() { + if label == &lc_label { + **eval -= coeff; + } + } + } else { + let label: &String = label.try_into().unwrap(); + let &cur_comm = label_comm_map.get(label).ok_or(Error::MissingPolynomial { + label: label.to_string(), + })?; + + if num_polys == 1 && cur_comm.degree_bound().is_some() { + assert!( + coeff.is_one(), + "Coefficient must be one for degree-bounded equations" + ); + degree_bound = cur_comm.degree_bound(); + } else if cur_comm.degree_bound().is_some() { + return Err(Self::Error::EquationHasDegreeBounds(lc_label)); + } + combined_comm += &cur_comm.commitment().0.scalar_mul(*coeff); + } + } + + lc_commitments.push(combined_comm); + lc_info.push((lc_label, degree_bound)); + } + + let comms: Vec = + E::G1Projective::batch_normalization_into_affine(&lc_commitments) + .into_iter() + .map(|c| kzg10::Commitment(c)) + .collect(); + + let lc_commitments = lc_info + .into_iter() + .zip(comms) + .map(|((label, d), c)| LabeledCommitment::new(label, c, d)) + .collect::>(); + + Self::batch_check_individual_opening_challenges( + vk, + &lc_commitments, + &query_set, + &evaluations, + proof, + opening_challenges, + rng, + ) + } +} + +#[cfg(test)] +mod tests { + #![allow(non_camel_case_types)] + use super::SonicKZG10; + use ark_bls12_377::Bls12_377; + use ark_bls12_381::Bls12_381; + use ark_ec::PairingEngine; + use ark_ff::UniformRand; + use ark_poly::{univariate::DensePolynomial as DensePoly, UVPolynomial}; + use ark_std::rand::rngs::StdRng; + + type UniPoly_381 = DensePoly<::Fr>; + type UniPoly_377 = DensePoly<::Fr>; + + type PC = SonicKZG10; + type PC_Bls12_377 = PC; + type PC_Bls12_381 = PC; + + fn rand_poly( + degree: usize, + _: Option, + rng: &mut StdRng, + ) -> DensePoly { + as UVPolynomial>::rand(degree, rng) + } + + fn rand_point(_: Option, rng: &mut StdRng) -> E::Fr { + E::Fr::rand(rng) + } + + #[test] + fn single_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn quadratic_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + quadratic_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + quadratic_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn linear_poly_degree_bound_test() { + use crate::tests::*; + linear_poly_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + linear_poly_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn single_poly_degree_bound_test() { + use crate::tests::*; + single_poly_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn single_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + single_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + single_poly_degree_bound_multiple_queries_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn two_polys_degree_bound_single_query_test() { + use crate::tests::*; + two_polys_degree_bound_single_query_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + two_polys_degree_bound_single_query_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn full_end_to_end_test() { + use crate::tests::*; + full_end_to_end_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn single_equation_test() { + use crate::tests::*; + single_equation_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + single_equation_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_test() { + use crate::tests::*; + two_equation_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_degree_bound_test() { + use crate::tests::*; + two_equation_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn full_end_to_end_equation_test() { + use crate::tests::*; + full_end_to_end_equation_test::<_, _, PC_Bls12_377>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_equation_test::<_, _, PC_Bls12_381>( + None, + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + #[should_panic] + fn bad_degree_bound_test() { + use crate::tests::*; + bad_degree_bound_test::<_, _, PC_Bls12_377>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + bad_degree_bound_test::<_, _, PC_Bls12_381>( + rand_poly::, + rand_point::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } +} diff --git a/arkworks/r1cs-std/.github/ISSUE_TEMPLATE/bug_report.md b/arkworks/r1cs-std/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e01ca941 --- /dev/null +++ b/arkworks/r1cs-std/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! + +--- + +∂ + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + + + diff --git a/arkworks/r1cs-std/.github/ISSUE_TEMPLATE/feature_request.md b/arkworks/r1cs-std/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..7d5ed5df --- /dev/null +++ b/arkworks/r1cs-std/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Feature Request +about: Create a proposal to request a feature + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/arkworks/r1cs-std/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/r1cs-std/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/r1cs-std/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/r1cs-std/.github/dependabot.yml b/arkworks/r1cs-std/.github/dependabot.yml new file mode 100644 index 00000000..d38ce390 --- /dev/null +++ b/arkworks/r1cs-std/.github/dependabot.yml @@ -0,0 +1,14 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 + ignore: + - dependency-name: rand_xorshift + versions: + - 0.3.0 + - dependency-name: rand + versions: + - 0.8.0 diff --git a/arkworks/r1cs-std/.github/workflows/ci.yml b/arkworks/r1cs-std/.github/workflows/ci.yml new file mode 100644 index 00000000..4811ae6f --- /dev/null +++ b/arkworks/r1cs-std/.github/workflows/ci.yml @@ -0,0 +1,142 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --all --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: "--all \ + --all-features \ + --exclude cp-benches " + + docs: + name: Check Documentation + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo doc --all --no-deps --document-private-items --all-features + uses: actions-rs/cargo@v1 + with: + command: doc + args: --all --no-deps --document-private-items --all-features + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: thumbv6m-none-eabi + override: true + + - name: Install Rust ARM64 (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: r1cs-std + run: | + cargo build --no-default-features --target aarch64-unknown-none + cargo check --examples --no-default-features --target aarch64-unknown-none diff --git a/arkworks/r1cs-std/.github/workflows/linkify_changelog.yml b/arkworks/r1cs-std/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..8f3086e0 --- /dev/null +++ b/arkworks/r1cs-std/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push diff --git a/arkworks/r1cs-std/.gitignore b/arkworks/r1cs-std/.gitignore new file mode 100644 index 00000000..9b5e101e --- /dev/null +++ b/arkworks/r1cs-std/.gitignore @@ -0,0 +1,11 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo diff --git a/arkworks/r1cs-std/.hooks/pre-commit b/arkworks/r1cs-std/.hooks/pre-commit new file mode 100755 index 00000000..8d4d19fe --- /dev/null +++ b/arkworks/r1cs-std/.hooks/pre-commit @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +rustfmt --version &>/dev/null +if [ $? != 0 ]; then + printf "[pre_commit] \033[0;31merror\033[0m: \"rustfmt\" not available. \n" + printf "[pre_commit] \033[0;31merror\033[0m: rustfmt can be installed via - \n" + printf "[pre_commit] $ rustup component add rustfmt \n" + exit 1 +fi + +problem_files=() + +# collect ill-formatted files +for file in $(git diff --name-only --cached); do + if [ ${file: -3} == ".rs" ]; then + rustfmt +stable --check $file &>/dev/null + if [ $? != 0 ]; then + problem_files+=($file) + fi + fi +done + +if [ ${#problem_files[@]} == 0 ]; then + # done + printf "[pre_commit] rustfmt \033[0;32mok\033[0m \n" +else + # reformat the files that need it and re-stage them. + printf "[pre_commit] the following files were rustfmt'd before commit: \n" + for file in ${problem_files[@]}; do + rustfmt +stable $file + git add $file + printf "\033[0;32m $file\033[0m \n" + done +fi + +exit 0 diff --git a/arkworks/r1cs-std/CHANGELOG.md b/arkworks/r1cs-std/CHANGELOG.md new file mode 100644 index 00000000..1eaa4276 --- /dev/null +++ b/arkworks/r1cs-std/CHANGELOG.md @@ -0,0 +1,69 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug Fixes + +## v0.3.0 + +### Breaking changes + +- [\#60](https://github.com/arkworks-rs/r1cs-std/pull/60) Rename `AllocatedBit` to `AllocatedBool` for consistency with the `Boolean` variable. + You can update downstream usage with `grep -rl 'AllocatedBit' . | xargs env LANG=C env LC_CTYPE=C sed -i '' 's/AllocatedBit/AllocatedBool/g'`. +- [\#65](https://github.com/arkworks-rs/r1cs-std/pull/65) Rename `Radix2Domain` in `r1cs-std` to `Radix2DomainVar`. + +### Features + +- [\#53](https://github.com/arkworks-rs/r1cs-std/pull/53) Add univariate evaluation domain and Lagrange interpolation. + +### Improvements + +- [\#65](https://github.com/arkworks-rs/r1cs-std/pull/65) Add support for non-constant coset offset in `Radix2DomainVar`. + +### Bug Fixes + +## v0.2.0 + +### Breaking changes + +- [\#12](https://github.com/arkworks-rs/r1cs-std/pull/12) Make the output of the `ToBitsGadget` impl for `FpVar` fixed-size +- [\#48](https://github.com/arkworks-rs/r1cs-std/pull/48) Add `Clone` trait bound to `CondSelectGadget`. + +### Features + +- [\#21](https://github.com/arkworks-rs/r1cs-std/pull/21) Add `UInt128` +- [\#50](https://github.com/arkworks-rs/r1cs-std/pull/50) Add `DensePolynomialVar` + +### Improvements + +- [\#5](https://github.com/arkworks-rs/r1cs-std/pull/5) Speedup BLS-12 pairing +- [\#13](https://github.com/arkworks-rs/r1cs-std/pull/13) Add `ToConstraintFieldGadget` to `ProjectiveVar` +- [\#15](https://github.com/arkworks-rs/r1cs-std/pull/15), #16 Allow `cs` to be `None` when converting a Montgomery point into a Twisted Edwards point +- [\#20](https://github.com/arkworks-rs/r1cs-std/pull/20) Add `CondSelectGadget` impl for `UInt`s +- [\#22](https://github.com/arkworks-rs/r1cs-std/pull/22) Reduce density of `three_bit_cond_neg_lookup` +- [\#23](https://github.com/arkworks-rs/r1cs-std/pull/23) Reduce allocations in `UInt`s +- [\#33](https://github.com/arkworks-rs/r1cs-std/pull/33) Speedup scalar multiplication by a constant +- [\#35](https://github.com/arkworks-rs/r1cs-std/pull/35) Construct a `FpVar` from bits +- [\#36](https://github.com/arkworks-rs/r1cs-std/pull/36) Implement `ToConstraintFieldGadget` for `Vec` +- [\#40](https://github.com/arkworks-rs/r1cs-std/pull/40), #43 Faster scalar multiplication for Short Weierstrass curves by relying on affine formulae +- [\#46](https://github.com/arkworks-rs/r1cs-std/pull/46) Add mux gadget as an auto-impl in `CondSelectGadget` to support random access of an array + +### Bug fixes + +- [\#8](https://github.com/arkworks-rs/r1cs-std/pull/8) Fix bug in `three_bit_cond_neg_lookup` when using a constant lookup bit +- [\#9](https://github.com/arkworks-rs/r1cs-std/pull/9) Fix bug in `short_weierstrass::ProjectiveVar::to_affine` +- [\#29](https://github.com/arkworks-rs/r1cs-std/pull/29) Fix `to_non_unique_bytes` for `BLS12::G1Prepared` +- [\#34](https://github.com/arkworks-rs/r1cs-std/pull/34) Fix `mul_by_inverse` for constants +- [\#42](https://github.com/arkworks-rs/r1cs-std/pull/42) Fix regression in `mul_by_inverse` constraint count +- [\#47](https://github.com/arkworks-rs/r1cs-std/pull/47) Compile with `panic='abort'` in release mode, for safety of the library across FFI boundaries +- [\#57](https://github.com/arkworks-rs/r1cs-std/pull/57) Clean up `UInt` docs + +## v0.1.0 + +Initial release diff --git a/arkworks/r1cs-std/CONTRIBUTING.md b/arkworks/r1cs-std/CONTRIBUTING.md new file mode 100644 index 00000000..d656528f --- /dev/null +++ b/arkworks/r1cs-std/CONTRIBUTING.md @@ -0,0 +1,65 @@ +# Contributing + +Thank you for considering making contributions to `arkworks-rs/r1cs-std`! + +Contributing to this repo can be done in several forms, such as participating in discussion or proposing code changes. +To ensure a smooth workflow for all contributors, the following general procedure for contributing has been established: + +1) Either open or find an issue you'd like to help with +2) Participate in thoughtful discussion on that issue +3) If you would like to contribute: + * If the issue is a feature proposal, ensure that the proposal has been accepted + * Ensure that nobody else has already begun working on this issue. + If they have, please try to contact them to collaborate + * If nobody has been assigned for the issue and you would like to work on it, make a comment on the issue to inform the community of your intentions to begin work. (So we can avoid duplication of efforts) + * We suggest using standard Github best practices for contributing: fork the repo, branch from the HEAD of `main`, make some commits on your branch, and submit a PR from the branch to `main`. + More detail on this is below + * Be sure to include a relevant change log entry in the Pending section of CHANGELOG.md (see file for log format) + * If the change is breaking, we may add migration instructions. + +Note that for very small or clear problems (such as typos), or well isolated improvements, it is not required to an open issue to submit a PR. +But be aware that for more complex problems/features touching multiple parts of the codebase, if a PR is opened before an adequate design discussion has taken place in a github issue, that PR runs a larger likelihood of being rejected. + +Looking for a good place to start contributing? How about checking out some good first issues + +## Branch Structure + +`r1cs-std` has its default branch as `main`, which is where PRs are merged into. Releases will be periodically made, on no set schedule. +All other branches should be assumed to be miscellaneous feature development branches. + +All downstream users of the library should be using tagged versions of the library pulled from cargo. + +## How to work on a fork +Please skip this section if you're familiar with contributing to opensource github projects. + +First fork the repo from the github UI, and clone it locally. +Then in the repo, you want to add the repo you forked from as a new remote. You do this as: +```bash +git remote add upstream git@github.com:arkworks-rs/r1cs-std.git +``` + +Then the way you make code contributions is to first think of a branch name that describes your change. +Then do the following: +```bash +git checkout main +git pull upstream main +git checkout -b $NEW_BRANCH_NAME +``` +and then work as normal on that branch, and pull request to upstream master when you're done =) + +## Updating documentation + +All PRs should aim to leave the code more documented than it started with. +Please don't assume that its easy to infer what the code is doing, +as that is almost always not the case for these complex protocols. +(Even when you understand the paper!) + +Its often very useful to describe what is the high level view of what a code block is doing, +and either refer to the relevant section of a paper or include a short proof/argument for why it makes sense before the actual logic. + +## Performance improvements + +All performance improvements should be accompanied with benchmarks improving, or otherwise have it be clear that things have improved. +For some areas of the codebase, performance roughly follows the number of field multiplications, but there are also many areas where +hard to predict low level system effects such as cache locality and superscalar operations become important for performance. +Thus performance can often become very non-intuitive / diverge from minimizing the number of arithmetic operations. \ No newline at end of file diff --git a/arkworks/r1cs-std/Cargo.toml b/arkworks/r1cs-std/Cargo.toml new file mode 100644 index 00000000..d62ba1f2 --- /dev/null +++ b/arkworks/r1cs-std/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "ark-r1cs-std" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A standard library for constraint system gadgets" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/r1cs-std" +documentation = "https://docs.rs/ark-r1cs-std/" +keywords = ["zero-knowledge", "cryptography", "zkSNARK", "SNARK", "r1cs"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[profile.release] +panic = 'abort' + +[profile.dev] +panic = 'abort' + +[dependencies] +ark-ff = { path = "../algebra/ff", version = "^0.3.0", default-features = false } +ark-ec = { path = "../algebra/ec", version = "^0.3.0", default-features = false } +ark-std = { path = "../std", version = "^0.3.0", default-features = false } +ark-relations = { path = "../snark/relations", version = "^0.3.0", default-features = false } + +derivative = { version = "2", features = ["use_core"] } +tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } +num-bigint = {version = "0.4", default-features = false } +num-traits = {version = "0.2", default-features = false } + +[dev-dependencies] +ark-test-curves = { version = "^0.3.0", default-features = false, features = ["bls12_381_scalar_field", "mnt4_753_scalar_field"] } +ark-poly = { path = "../algebra/poly", version = "^0.3.0", default-features = false } + +[features] +default = ["std"] +std = [ "ark-ff/std", "ark-relations/std", "ark-std/std", "num-bigint/std" ] +parallel = [ "std", "ark-ff/parallel", "ark-std/parallel"] diff --git a/arkworks/r1cs-std/LICENSE-APACHE b/arkworks/r1cs-std/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/r1cs-std/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/r1cs-std/LICENSE-MIT b/arkworks/r1cs-std/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/r1cs-std/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/r1cs-std/README.md b/arkworks/r1cs-std/README.md new file mode 100644 index 00000000..9312c852 --- /dev/null +++ b/arkworks/r1cs-std/README.md @@ -0,0 +1,53 @@ +

ark-r1cs-std

+ +

+ + + + +

+ +The arkworks ecosystem consist of Rust libraries for designing and working with __zero knowledge succinct non-interactive arguments (zkSNARKs)__. This repository contains efficient implementations of constraint "gadgets" that enable checking common computations inside SNARKs, such as bit operations, finite field arithmetic, elliptic curve arithmetic, and pairings. + +This library is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic proof-of-concept prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler. To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: +```bash +rustup install stable +``` + +After that, use `cargo`, the standard Rust build tool, to build the library: +```bash +git clone https://github.com/arkworks-rs/r1cs-std.git +cargo build --release +``` + +This library comes with unit tests for each of the provided crates. Run the tests with: +```bash +cargo test +``` + +## License + +This library is licensed under either of the following licenses, at your discretion. + + * Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +## Acknowledgements + +This work was supported by: +a Google Faculty Award; +the National Science Foundation; +the UC Berkeley Center for Long-Term Cybersecurity; +and donations from the Ethereum Foundation, the Interchain Foundation, and Qtum. + +An earlier version of this library was developed as part of the paper *"[ZEXE: Enabling Decentralized Private Computation][zexe]"*. + +[zexe]: https://ia.cr/2018/962 \ No newline at end of file diff --git a/arkworks/r1cs-std/rustfmt.toml b/arkworks/r1cs-std/rustfmt.toml new file mode 100644 index 00000000..71712138 --- /dev/null +++ b/arkworks/r1cs-std/rustfmt.toml @@ -0,0 +1,9 @@ +reorder_imports = true +wrap_comments = true +normalize_comments = true +use_try_shorthand = true +match_block_trailing_comma = true +use_field_init_shorthand = true +edition = "2018" +condense_wildcard_suffixes = true +merge_imports = true diff --git a/arkworks/r1cs-std/scripts/install-hook.sh b/arkworks/r1cs-std/scripts/install-hook.sh new file mode 100755 index 00000000..eafcf818 --- /dev/null +++ b/arkworks/r1cs-std/scripts/install-hook.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +# This script will install the provided directory ../.hooks as the hook +# directory for the present repo. See there for hooks, including a pre-commit +# hook that runs rustfmt on files before a commit. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HOOKS_DIR="${DIR}/../.hooks" + +git config core.hooksPath "$HOOKS_DIR" diff --git a/arkworks/r1cs-std/scripts/linkify_changelog.py b/arkworks/r1cs-std/scripts/linkify_changelog.py new file mode 100644 index 00000000..867ae14d --- /dev/null +++ b/arkworks/r1cs-std/scripts/linkify_changelog.py @@ -0,0 +1,31 @@ +import re +import sys +import fileinput +import os + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +if len(sys.argv) < 2: + print("Must include path to changelog as the first argument to the script") + print("Example Usage: python3 linkify_changelog.py ../CHANGELOG.md") + exit() + +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub(r"\- #([0-9]*)", r"- [\\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", line.rstrip()) + # edits the current file + print(line) \ No newline at end of file diff --git a/arkworks/r1cs-std/src/alloc.rs b/arkworks/r1cs-std/src/alloc.rs new file mode 100644 index 00000000..55aa5d2a --- /dev/null +++ b/arkworks/r1cs-std/src/alloc.rs @@ -0,0 +1,101 @@ +use crate::Vec; +use ark_ff::Field; +use ark_relations::r1cs::{Namespace, SynthesisError}; +use core::borrow::Borrow; + +/// Describes the mode that a variable should be allocated in within +/// a `ConstraintSystem`. +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Copy, Clone)] +pub enum AllocationMode { + /// Indicate to the `ConstraintSystem` that the high-level variable should + /// be allocated as a constant. That is, no `Variable`s should be + /// generated. + Constant = 0, + + /// Indicate to the `ConstraintSystem` that the high-level variable should + /// be allocated as a public input to the `ConstraintSystem`. + Input = 1, + + /// Indicate to the `ConstraintSystem` that the high-level variable should + /// be allocated as a private witness to the `ConstraintSystem`. + Witness = 2, +} + +impl AllocationMode { + /// Outputs the maximum according to the relation `Constant < Input < + /// Witness`. + pub fn max(&self, other: Self) -> Self { + use AllocationMode::*; + match (self, other) { + (Constant, _) => other, + (Input, Constant) => *self, + (Input, _) => other, + (Witness, _) => *self, + } + } +} + +/// Specifies how variables of type `Self` should be allocated in a +/// `ConstraintSystem`. +pub trait AllocVar +where + Self: Sized, + V: ?Sized, +{ + /// Allocates a new variable of type `Self` in the `ConstraintSystem` `cs`. + /// The mode of allocation is decided by `mode`. + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result; + + /// Allocates a new constant of type `Self` in the `ConstraintSystem` `cs`. + /// + /// This should *not* allocate any new variables or constraints in `cs`. + #[tracing::instrument(target = "r1cs", skip(cs, t))] + fn new_constant( + cs: impl Into>, + t: impl Borrow, + ) -> Result { + Self::new_variable(cs, || Ok(t), AllocationMode::Constant) + } + + /// Allocates a new public input of type `Self` in the `ConstraintSystem` + /// `cs`. + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_input>( + cs: impl Into>, + f: impl FnOnce() -> Result, + ) -> Result { + Self::new_variable(cs, f, AllocationMode::Input) + } + + /// Allocates a new private witness of type `Self` in the `ConstraintSystem` + /// `cs`. + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_witness>( + cs: impl Into>, + f: impl FnOnce() -> Result, + ) -> Result { + Self::new_variable(cs, f, AllocationMode::Witness) + } +} + +/// This blanket implementation just allocates variables in `Self` +/// element by element. +impl> AllocVar<[I], F> for Vec { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let mut vec = Vec::new(); + for value in f()?.borrow().iter() { + vec.push(A::new_variable(cs.clone(), || Ok(value), mode)?); + } + Ok(vec) + } +} diff --git a/arkworks/r1cs-std/src/bits/boolean.rs b/arkworks/r1cs-std/src/bits/boolean.rs new file mode 100644 index 00000000..3e25b3f6 --- /dev/null +++ b/arkworks/r1cs-std/src/bits/boolean.rs @@ -0,0 +1,1821 @@ +use ark_ff::{BitIteratorBE, Field, FpParameters, PrimeField}; + +use crate::{fields::fp::FpVar, prelude::*, Assignment, ToConstraintFieldGadget, Vec}; +use ark_relations::r1cs::{ + ConstraintSystemRef, LinearCombination, Namespace, SynthesisError, Variable, +}; +use core::borrow::Borrow; + +/// Represents a variable in the constraint system which is guaranteed +/// to be either zero or one. +/// +/// In general, one should prefer using `Boolean` instead of `AllocatedBool`, +/// as `Boolean` offers better support for constant values, and implements +/// more traits. +#[derive(Clone, Debug, Eq, PartialEq)] +#[must_use] +pub struct AllocatedBool { + variable: Variable, + cs: ConstraintSystemRef, +} + +pub(crate) fn bool_to_field(val: impl Borrow) -> F { + if *val.borrow() { + F::one() + } else { + F::zero() + } +} + +impl AllocatedBool { + /// Get the assigned value for `self`. + pub fn value(&self) -> Result { + let value = self.cs.assigned_value(self.variable).get()?; + if value.is_zero() { + Ok(false) + } else if value.is_one() { + Ok(true) + } else { + unreachable!("Incorrect value assigned: {:?}", value); + } + } + + /// Get the R1CS variable for `self`. + pub fn variable(&self) -> Variable { + self.variable + } + + /// Allocate a witness variable without a booleanity check. + fn new_witness_without_booleanity_check>( + cs: ConstraintSystemRef, + f: impl FnOnce() -> Result, + ) -> Result { + let variable = cs.new_witness_variable(|| f().map(bool_to_field))?; + Ok(Self { variable, cs }) + } + + /// Performs an XOR operation over the two operands, returning + /// an `AllocatedBool`. + #[tracing::instrument(target = "r1cs")] + pub fn xor(&self, b: &Self) -> Result { + let result = Self::new_witness_without_booleanity_check(self.cs.clone(), || { + Ok(self.value()? ^ b.value()?) + })?; + + // Constrain (a + a) * (b) = (a + b - c) + // Given that a and b are boolean constrained, if they + // are equal, the only solution for c is 0, and if they + // are different, the only solution for c is 1. + // + // ¬(a ∧ b) ∧ ¬(¬a ∧ ¬b) = c + // (1 - (a * b)) * (1 - ((1 - a) * (1 - b))) = c + // (1 - ab) * (1 - (1 - a - b + ab)) = c + // (1 - ab) * (a + b - ab) = c + // a + b - ab - (a^2)b - (b^2)a + (a^2)(b^2) = c + // a + b - ab - ab - ab + ab = c + // a + b - 2ab = c + // -2a * b = c - a - b + // 2a * b = a + b - c + // (a + a) * b = a + b - c + self.cs.enforce_constraint( + lc!() + self.variable + self.variable, + lc!() + b.variable, + lc!() + self.variable + b.variable - result.variable, + )?; + + Ok(result) + } + + /// Performs an AND operation over the two operands, returning + /// an `AllocatedBool`. + #[tracing::instrument(target = "r1cs")] + pub fn and(&self, b: &Self) -> Result { + let result = Self::new_witness_without_booleanity_check(self.cs.clone(), || { + Ok(self.value()? & b.value()?) + })?; + + // Constrain (a) * (b) = (c), ensuring c is 1 iff + // a AND b are both 1. + self.cs.enforce_constraint( + lc!() + self.variable, + lc!() + b.variable, + lc!() + result.variable, + )?; + + Ok(result) + } + + /// Performs an OR operation over the two operands, returning + /// an `AllocatedBool`. + #[tracing::instrument(target = "r1cs")] + pub fn or(&self, b: &Self) -> Result { + let result = Self::new_witness_without_booleanity_check(self.cs.clone(), || { + Ok(self.value()? | b.value()?) + })?; + + // Constrain (1 - a) * (1 - b) = (c), ensuring c is 1 iff + // a and b are both false, and otherwise c is 0. + self.cs.enforce_constraint( + lc!() + Variable::One - self.variable, + lc!() + Variable::One - b.variable, + lc!() + Variable::One - result.variable, + )?; + + Ok(result) + } + + /// Calculates `a AND (NOT b)`. + #[tracing::instrument(target = "r1cs")] + pub fn and_not(&self, b: &Self) -> Result { + let result = Self::new_witness_without_booleanity_check(self.cs.clone(), || { + Ok(self.value()? & !b.value()?) + })?; + + // Constrain (a) * (1 - b) = (c), ensuring c is 1 iff + // a is true and b is false, and otherwise c is 0. + self.cs.enforce_constraint( + lc!() + self.variable, + lc!() + Variable::One - b.variable, + lc!() + result.variable, + )?; + + Ok(result) + } + + /// Calculates `(NOT a) AND (NOT b)`. + #[tracing::instrument(target = "r1cs")] + pub fn nor(&self, b: &Self) -> Result { + let result = Self::new_witness_without_booleanity_check(self.cs.clone(), || { + Ok(!(self.value()? | b.value()?)) + })?; + + // Constrain (1 - a) * (1 - b) = (c), ensuring c is 1 iff + // a and b are both false, and otherwise c is 0. + self.cs.enforce_constraint( + lc!() + Variable::One - self.variable, + lc!() + Variable::One - b.variable, + lc!() + result.variable, + )?; + + Ok(result) + } +} + +impl AllocVar for AllocatedBool { + /// Produces a new variable of the appropriate kind + /// (instance or witness), with a booleanity check. + /// + /// N.B.: we could omit the booleanity check when allocating `self` + /// as a new public input, but that places an additional burden on + /// protocol designers. Better safe than sorry! + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + if mode == AllocationMode::Constant { + let variable = if *f()?.borrow() { + Variable::One + } else { + Variable::Zero + }; + Ok(Self { variable, cs }) + } else { + let variable = if mode == AllocationMode::Input { + cs.new_input_variable(|| f().map(bool_to_field))? + } else { + cs.new_witness_variable(|| f().map(bool_to_field))? + }; + + // Constrain: (1 - a) * a = 0 + // This constrains a to be either 0 or 1. + + cs.enforce_constraint(lc!() + Variable::One - variable, lc!() + variable, lc!())?; + + Ok(Self { variable, cs }) + } + } +} + +impl CondSelectGadget for AllocatedBool { + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean, + true_val: &Self, + false_val: &Self, + ) -> Result { + let res = Boolean::conditionally_select( + cond, + &true_val.clone().into(), + &false_val.clone().into(), + )?; + match res { + Boolean::Is(a) => Ok(a), + _ => unreachable!("Impossible"), + } + } +} + +/// Represents a boolean value in the constraint system which is guaranteed +/// to be either zero or one. +#[derive(Clone, Debug, Eq, PartialEq)] +#[must_use] +pub enum Boolean { + /// Existential view of the boolean variable. + Is(AllocatedBool), + /// Negated view of the boolean variable. + Not(AllocatedBool), + /// Constant (not an allocated variable). + Constant(bool), +} + +impl R1CSVar for Boolean { + type Value = bool; + + fn cs(&self) -> ConstraintSystemRef { + match self { + Self::Is(a) | Self::Not(a) => a.cs.clone(), + _ => ConstraintSystemRef::None, + } + } + + fn value(&self) -> Result { + match self { + Boolean::Constant(c) => Ok(*c), + Boolean::Is(ref v) => v.value(), + Boolean::Not(ref v) => v.value().map(|b| !b), + } + } +} + +impl Boolean { + /// The constant `true`. + pub const TRUE: Self = Boolean::Constant(true); + + /// The constant `false`. + pub const FALSE: Self = Boolean::Constant(false); + + /// Constructs a `LinearCombination` from `Self`'s variables according + /// to the following map. + /// + /// * `Boolean::Constant(true) => lc!() + Variable::One` + /// * `Boolean::Constant(false) => lc!()` + /// * `Boolean::Is(v) => lc!() + v.variable()` + /// * `Boolean::Not(v) => lc!() + Variable::One - v.variable()` + pub fn lc(&self) -> LinearCombination { + match self { + Boolean::Constant(false) => lc!(), + Boolean::Constant(true) => lc!() + Variable::One, + Boolean::Is(v) => v.variable().into(), + Boolean::Not(v) => lc!() + Variable::One - v.variable(), + } + } + + /// Constructs a `Boolean` vector from a slice of constant `u8`. + /// The `u8`s are decomposed in little-endian manner. + /// + /// This *does not* create any new variables or constraints. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// let t = Boolean::::TRUE; + /// let f = Boolean::::FALSE; + /// + /// let bits = vec![f, t]; + /// let generated_bits = Boolean::constant_vec_from_bytes(&[2]); + /// bits[..2].enforce_equal(&generated_bits[..2])?; + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + pub fn constant_vec_from_bytes(values: &[u8]) -> Vec { + let mut bits = vec![]; + for byte in values { + for i in 0..8 { + bits.push(Self::Constant(((byte >> i) & 1u8) == 1u8)); + } + } + bits + } + + /// Constructs a constant `Boolean` with value `b`. + /// + /// This *does not* create any new variables or constraints. + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_r1cs_std::prelude::*; + /// + /// let true_var = Boolean::::TRUE; + /// let false_var = Boolean::::FALSE; + /// + /// true_var.enforce_equal(&Boolean::constant(true))?; + /// false_var.enforce_equal(&Boolean::constant(false))?; + /// # Ok(()) + /// # } + /// ``` + pub fn constant(b: bool) -> Self { + Boolean::Constant(b) + } + + /// Negates `self`. + /// + /// This *does not* create any new variables or constraints. + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// + /// a.not().enforce_equal(&b)?; + /// b.not().enforce_equal(&a)?; + /// + /// a.not().enforce_equal(&Boolean::FALSE)?; + /// b.not().enforce_equal(&Boolean::TRUE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + pub fn not(&self) -> Self { + match *self { + Boolean::Constant(c) => Boolean::Constant(!c), + Boolean::Is(ref v) => Boolean::Not(v.clone()), + Boolean::Not(ref v) => Boolean::Is(v.clone()), + } + } + + /// Outputs `self ^ other`. + /// + /// If at least one of `self` and `other` are constants, then this method + /// *does not* create any constraints or variables. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// + /// a.xor(&b)?.enforce_equal(&Boolean::TRUE)?; + /// b.xor(&a)?.enforce_equal(&Boolean::TRUE)?; + /// + /// a.xor(&a)?.enforce_equal(&Boolean::FALSE)?; + /// b.xor(&b)?.enforce_equal(&Boolean::FALSE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn xor<'a>(&'a self, other: &'a Self) -> Result { + use Boolean::*; + match (self, other) { + (&Constant(false), x) | (x, &Constant(false)) => Ok(x.clone()), + (&Constant(true), x) | (x, &Constant(true)) => Ok(x.not()), + // a XOR (NOT b) = NOT(a XOR b) + (is @ &Is(_), not @ &Not(_)) | (not @ &Not(_), is @ &Is(_)) => { + Ok(is.xor(¬.not())?.not()) + } + // a XOR b = (NOT a) XOR (NOT b) + (&Is(ref a), &Is(ref b)) | (&Not(ref a), &Not(ref b)) => Ok(Is(a.xor(b)?)), + } + } + + /// Outputs `self | other`. + /// + /// If at least one of `self` and `other` are constants, then this method + /// *does not* create any constraints or variables. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// + /// a.or(&b)?.enforce_equal(&Boolean::TRUE)?; + /// b.or(&a)?.enforce_equal(&Boolean::TRUE)?; + /// + /// a.or(&a)?.enforce_equal(&Boolean::TRUE)?; + /// b.or(&b)?.enforce_equal(&Boolean::FALSE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn or<'a>(&'a self, other: &'a Self) -> Result { + use Boolean::*; + match (self, other) { + (&Constant(false), x) | (x, &Constant(false)) => Ok(x.clone()), + (&Constant(true), _) | (_, &Constant(true)) => Ok(Constant(true)), + // a OR b = NOT ((NOT a) AND (NOT b)) + (a @ &Is(_), b @ &Not(_)) | (b @ &Not(_), a @ &Is(_)) | (b @ &Not(_), a @ &Not(_)) => { + Ok(a.not().and(&b.not())?.not()) + } + (&Is(ref a), &Is(ref b)) => a.or(b).map(From::from), + } + } + + /// Outputs `self & other`. + /// + /// If at least one of `self` and `other` are constants, then this method + /// *does not* create any constraints or variables. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// + /// a.and(&a)?.enforce_equal(&Boolean::TRUE)?; + /// + /// a.and(&b)?.enforce_equal(&Boolean::FALSE)?; + /// b.and(&a)?.enforce_equal(&Boolean::FALSE)?; + /// b.and(&b)?.enforce_equal(&Boolean::FALSE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn and<'a>(&'a self, other: &'a Self) -> Result { + use Boolean::*; + match (self, other) { + // false AND x is always false + (&Constant(false), _) | (_, &Constant(false)) => Ok(Constant(false)), + // true AND x is always x + (&Constant(true), x) | (x, &Constant(true)) => Ok(x.clone()), + // a AND (NOT b) + (&Is(ref is), &Not(ref not)) | (&Not(ref not), &Is(ref is)) => Ok(Is(is.and_not(not)?)), + // (NOT a) AND (NOT b) = a NOR b + (&Not(ref a), &Not(ref b)) => Ok(Is(a.nor(b)?)), + // a AND b + (&Is(ref a), &Is(ref b)) => Ok(Is(a.and(b)?)), + } + } + + /// Outputs `bits[0] & bits[1] & ... & bits.last().unwrap()`. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// let c = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// + /// Boolean::kary_and(&[a.clone(), b.clone(), c.clone()])?.enforce_equal(&Boolean::FALSE)?; + /// Boolean::kary_and(&[a.clone(), c.clone()])?.enforce_equal(&Boolean::TRUE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn kary_and(bits: &[Self]) -> Result { + assert!(!bits.is_empty()); + let mut cur: Option = None; + for next in bits { + cur = if let Some(b) = cur { + Some(b.and(next)?) + } else { + Some(next.clone()) + }; + } + + Ok(cur.expect("should not be 0")) + } + + /// Outputs `bits[0] | bits[1] | ... | bits.last().unwrap()`. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// let c = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// + /// Boolean::kary_or(&[a.clone(), b.clone(), c.clone()])?.enforce_equal(&Boolean::TRUE)?; + /// Boolean::kary_or(&[a.clone(), c.clone()])?.enforce_equal(&Boolean::TRUE)?; + /// Boolean::kary_or(&[b.clone(), c.clone()])?.enforce_equal(&Boolean::FALSE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn kary_or(bits: &[Self]) -> Result { + assert!(!bits.is_empty()); + let mut cur: Option = None; + for next in bits { + cur = if let Some(b) = cur { + Some(b.or(next)?) + } else { + Some(next.clone()) + }; + } + + Ok(cur.expect("should not be 0")) + } + + /// Outputs `(bits[0] & bits[1] & ... & bits.last().unwrap()).not()`. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// let c = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// + /// Boolean::kary_nand(&[a.clone(), b.clone(), c.clone()])?.enforce_equal(&Boolean::TRUE)?; + /// Boolean::kary_nand(&[a.clone(), c.clone()])?.enforce_equal(&Boolean::FALSE)?; + /// Boolean::kary_nand(&[b.clone(), c.clone()])?.enforce_equal(&Boolean::TRUE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn kary_nand(bits: &[Self]) -> Result { + Ok(Self::kary_and(bits)?.not()) + } + + /// Enforces that `Self::kary_nand(bits).is_eq(&Boolean::TRUE)`. + /// + /// Informally, this means that at least one element in `bits` must be + /// `false`. + #[tracing::instrument(target = "r1cs")] + fn enforce_kary_nand(bits: &[Self]) -> Result<(), SynthesisError> { + use Boolean::*; + let r = Self::kary_nand(bits)?; + match r { + Constant(true) => Ok(()), + Constant(false) => Err(SynthesisError::AssignmentMissing), + Is(_) | Not(_) => { + r.cs() + .enforce_constraint(r.lc(), lc!() + Variable::One, lc!() + Variable::One) + } + } + } + + /// Convert a little-endian bitwise representation of a field element to `FpVar` + #[tracing::instrument(target = "r1cs", skip(bits))] + pub fn le_bits_to_fp_var(bits: &[Self]) -> Result, SynthesisError> + where + F: PrimeField, + { + // Compute the value of the `FpVar` variable via double-and-add. + let mut value = None; + let cs = bits.cs(); + // Assign a value only when `cs` is in setup mode, or if we are constructing + // a constant. + let should_construct_value = (!cs.is_in_setup_mode()) || bits.is_constant(); + if should_construct_value { + let bits = bits.iter().map(|b| b.value().unwrap()).collect::>(); + let bytes = bits + .chunks(8) + .map(|c| { + let mut value = 0u8; + for (i, &bit) in c.iter().enumerate() { + value += (bit as u8) << i; + } + value + }) + .collect::>(); + value = Some(F::from_le_bytes_mod_order(&bytes)); + } + + if bits.is_constant() { + Ok(FpVar::constant(value.unwrap())) + } else { + let mut power = F::one(); + // Compute a linear combination for the new field variable, again + // via double and add. + let mut combined_lc = LinearCombination::zero(); + bits.iter().for_each(|b| { + combined_lc = &combined_lc + (power, b.lc()); + power.double_in_place(); + }); + // Allocate the new variable as a SymbolicLc + let variable = cs.new_lc(combined_lc)?; + // If the number of bits is less than the size of the field, + // then we do not need to enforce that the element is less than + // the modulus. + if bits.len() >= F::Params::MODULUS_BITS as usize { + Self::enforce_in_field_le(bits)?; + } + Ok(crate::fields::fp::AllocatedFp::new(value, variable, cs.clone()).into()) + } + } + + /// Enforces that `bits`, when interpreted as a integer, is less than + /// `F::characteristic()`, That is, interpret bits as a little-endian + /// integer, and enforce that this integer is "in the field Z_p", where + /// `p = F::characteristic()` . + #[tracing::instrument(target = "r1cs")] + pub fn enforce_in_field_le(bits: &[Self]) -> Result<(), SynthesisError> { + // `bits` < F::characteristic() <==> `bits` <= F::characteristic() -1 + let mut b = F::characteristic().to_vec(); + assert_eq!(b[0] % 2, 1); + b[0] -= 1; // This works, because the LSB is one, so there's no borrows. + let run = Self::enforce_smaller_or_equal_than_le(bits, b)?; + + // We should always end in a "run" of zeros, because + // the characteristic is an odd prime. So, this should + // be empty. + assert!(run.is_empty()); + + Ok(()) + } + + /// Enforces that `bits` is less than or equal to `element`, + /// when both are interpreted as (little-endian) integers. + #[tracing::instrument(target = "r1cs", skip(element))] + pub fn enforce_smaller_or_equal_than_le<'a>( + bits: &[Self], + element: impl AsRef<[u64]>, + ) -> Result, SynthesisError> { + let b: &[u64] = element.as_ref(); + + let mut bits_iter = bits.iter().rev(); // Iterate in big-endian + + // Runs of ones in r + let mut last_run = Boolean::constant(true); + let mut current_run = vec![]; + + let mut element_num_bits = 0; + for _ in BitIteratorBE::without_leading_zeros(b) { + element_num_bits += 1; + } + + if bits.len() > element_num_bits { + let mut or_result = Boolean::constant(false); + for should_be_zero in &bits[element_num_bits..] { + or_result = or_result.or(should_be_zero)?; + let _ = bits_iter.next().unwrap(); + } + or_result.enforce_equal(&Boolean::constant(false))?; + } + + for (b, a) in BitIteratorBE::without_leading_zeros(b).zip(bits_iter.by_ref()) { + if b { + // This is part of a run of ones. + current_run.push(a.clone()); + } else { + if !current_run.is_empty() { + // This is the start of a run of zeros, but we need + // to k-ary AND against `last_run` first. + + current_run.push(last_run.clone()); + last_run = Self::kary_and(¤t_run)?; + current_run.truncate(0); + } + + // If `last_run` is true, `a` must be false, or it would + // not be in the field. + // + // If `last_run` is false, `a` can be true or false. + // + // Ergo, at least one of `last_run` and `a` must be false. + Self::enforce_kary_nand(&[last_run.clone(), a.clone()])?; + } + } + assert!(bits_iter.next().is_none()); + + Ok(current_run) + } + + /// Conditionally selects one of `first` and `second` based on the value of + /// `self`: + /// + /// If `self.is_eq(&Boolean::TRUE)`, this outputs `first`; else, it outputs + /// `second`. + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// + /// let a = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// let b = Boolean::new_witness(cs.clone(), || Ok(false))?; + /// + /// let cond = Boolean::new_witness(cs.clone(), || Ok(true))?; + /// + /// cond.select(&a, &b)?.enforce_equal(&Boolean::TRUE)?; + /// cond.select(&b, &a)?.enforce_equal(&Boolean::FALSE)?; + /// + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + /// + #[tracing::instrument(target = "r1cs", skip(first, second))] + pub fn select>( + &self, + first: &T, + second: &T, + ) -> Result { + T::conditionally_select(&self, first, second) + } +} + +impl From> for Boolean { + fn from(b: AllocatedBool) -> Self { + Boolean::Is(b) + } +} + +impl AllocVar for Boolean { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + if mode == AllocationMode::Constant { + Ok(Boolean::Constant(*f()?.borrow())) + } else { + AllocatedBool::new_variable(cs, f, mode).map(Boolean::from) + } + } +} + +impl EqGadget for Boolean { + #[tracing::instrument(target = "r1cs")] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + // self | other | XNOR(self, other) | self == other + // -----|-------|-------------------|-------------- + // 0 | 0 | 1 | 1 + // 0 | 1 | 0 | 0 + // 1 | 0 | 0 | 0 + // 1 | 1 | 1 | 1 + Ok(self.xor(other)?.not()) + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + use Boolean::*; + let one = Variable::One; + let difference = match (self, other) { + // 1 == 1; 0 == 0 + (Constant(true), Constant(true)) | (Constant(false), Constant(false)) => return Ok(()), + // false != true + (Constant(_), Constant(_)) => return Err(SynthesisError::AssignmentMissing), + // 1 - a + (Constant(true), Is(a)) | (Is(a), Constant(true)) => lc!() + one - a.variable(), + // a - 0 = a + (Constant(false), Is(a)) | (Is(a), Constant(false)) => lc!() + a.variable(), + // 1 - !a = 1 - (1 - a) = a + (Constant(true), Not(a)) | (Not(a), Constant(true)) => lc!() + a.variable(), + // !a - 0 = !a = 1 - a + (Constant(false), Not(a)) | (Not(a), Constant(false)) => lc!() + one - a.variable(), + // b - a, + (Is(a), Is(b)) => lc!() + b.variable() - a.variable(), + // !b - a = (1 - b) - a + (Is(a), Not(b)) | (Not(b), Is(a)) => lc!() + one - b.variable() - a.variable(), + // !b - !a = (1 - b) - (1 - a) = a - b, + (Not(a), Not(b)) => lc!() + a.variable() - b.variable(), + }; + + if condition != &Constant(false) { + let cs = self.cs().or(other.cs()).or(condition.cs()); + cs.enforce_constraint(lc!() + difference, condition.lc(), lc!())?; + } + Ok(()) + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + use Boolean::*; + let one = Variable::One; + let difference = match (self, other) { + // 1 != 0; 0 != 1 + (Constant(true), Constant(false)) | (Constant(false), Constant(true)) => return Ok(()), + // false == false and true == true + (Constant(_), Constant(_)) => return Err(SynthesisError::AssignmentMissing), + // 1 - a + (Constant(true), Is(a)) | (Is(a), Constant(true)) => lc!() + one - a.variable(), + // a - 0 = a + (Constant(false), Is(a)) | (Is(a), Constant(false)) => lc!() + a.variable(), + // 1 - !a = 1 - (1 - a) = a + (Constant(true), Not(a)) | (Not(a), Constant(true)) => lc!() + a.variable(), + // !a - 0 = !a = 1 - a + (Constant(false), Not(a)) | (Not(a), Constant(false)) => lc!() + one - a.variable(), + // b - a, + (Is(a), Is(b)) => lc!() + b.variable() - a.variable(), + // !b - a = (1 - b) - a + (Is(a), Not(b)) | (Not(b), Is(a)) => lc!() + one - b.variable() - a.variable(), + // !b - !a = (1 - b) - (1 - a) = a - b, + (Not(a), Not(b)) => lc!() + a.variable() - b.variable(), + }; + + if should_enforce != &Constant(false) { + let cs = self.cs().or(other.cs()).or(should_enforce.cs()); + cs.enforce_constraint(difference, should_enforce.lc(), should_enforce.lc())?; + } + Ok(()) + } +} + +impl ToBytesGadget for Boolean { + /// Outputs `1u8` if `self` is true, and `0u8` otherwise. + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let value = self.value().map(u8::from).ok(); + let mut bits = [Boolean::FALSE; 8]; + bits[0] = self.clone(); + Ok(vec![UInt8 { bits, value }]) + } +} + +impl ToConstraintFieldGadget for Boolean { + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> Result>, SynthesisError> { + let var = From::from(self.clone()); + Ok(vec![var]) + } +} + +impl CondSelectGadget for Boolean { + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean, + true_val: &Self, + false_val: &Self, + ) -> Result { + use Boolean::*; + match cond { + Constant(true) => Ok(true_val.clone()), + Constant(false) => Ok(false_val.clone()), + cond @ Not(_) => Self::conditionally_select(&cond.not(), false_val, true_val), + cond @ Is(_) => match (true_val, false_val) { + (x, &Constant(false)) => cond.and(x), + (&Constant(false), x) => cond.not().and(x), + (&Constant(true), x) => cond.or(x), + (x, &Constant(true)) => cond.not().or(x), + (a, b) => { + let cs = cond.cs(); + let result: Boolean = + AllocatedBool::new_witness_without_booleanity_check(cs.clone(), || { + let cond = cond.value()?; + Ok(if cond { a.value()? } else { b.value()? }) + })? + .into(); + // a = self; b = other; c = cond; + // + // r = c * a + (1 - c) * b + // r = b + c * (a - b) + // c * (a - b) = r - b + // + // If a, b, cond are all boolean, so is r. + // + // self | other | cond | result + // -----|-------|---------------- + // 0 | 0 | 1 | 0 + // 0 | 1 | 1 | 0 + // 1 | 0 | 1 | 1 + // 1 | 1 | 1 | 1 + // 0 | 0 | 0 | 0 + // 0 | 1 | 0 | 1 + // 1 | 0 | 0 | 0 + // 1 | 1 | 0 | 1 + cs.enforce_constraint( + cond.lc(), + lc!() + a.lc() - b.lc(), + lc!() + result.lc() - b.lc(), + )?; + + Ok(result) + } + }, + } + } +} + +#[cfg(test)] +mod test { + use super::{AllocatedBool, Boolean}; + use crate::prelude::*; + use ark_ff::{BitIteratorBE, BitIteratorLE, Field, One, PrimeField, UniformRand, Zero}; + use ark_relations::r1cs::{ConstraintSystem, Namespace, SynthesisError}; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn test_boolean_to_byte() -> Result<(), SynthesisError> { + for val in [true, false].iter() { + let cs = ConstraintSystem::::new_ref(); + let a = Boolean::new_witness(cs.clone(), || Ok(*val))?; + let bytes = a.to_bytes()?; + assert_eq!(bytes.len(), 1); + let byte = &bytes[0]; + assert_eq!(byte.value()?, *val as u8); + + for (i, bit) in byte.bits.iter().enumerate() { + assert_eq!(bit.value()?, (byte.value()? >> i) & 1 == 1); + } + } + Ok(()) + } + + #[test] + fn test_xor() -> Result<(), SynthesisError> { + for a_val in [false, true].iter().copied() { + for b_val in [false, true].iter().copied() { + let cs = ConstraintSystem::::new_ref(); + let a = AllocatedBool::new_witness(cs.clone(), || Ok(a_val))?; + let b = AllocatedBool::new_witness(cs.clone(), || Ok(b_val))?; + let c = AllocatedBool::xor(&a, &b)?; + assert_eq!(c.value()?, a_val ^ b_val); + + assert!(cs.is_satisfied().unwrap()); + assert_eq!(a.value()?, (a_val)); + assert_eq!(b.value()?, (b_val)); + assert_eq!(c.value()?, (a_val ^ b_val)); + } + } + Ok(()) + } + + #[test] + fn test_or() -> Result<(), SynthesisError> { + for a_val in [false, true].iter().copied() { + for b_val in [false, true].iter().copied() { + let cs = ConstraintSystem::::new_ref(); + let a = AllocatedBool::new_witness(cs.clone(), || Ok(a_val))?; + let b = AllocatedBool::new_witness(cs.clone(), || Ok(b_val))?; + let c = AllocatedBool::or(&a, &b)?; + assert_eq!(c.value()?, a_val | b_val); + + assert!(cs.is_satisfied().unwrap()); + assert_eq!(a.value()?, (a_val)); + assert_eq!(b.value()?, (b_val)); + assert_eq!(c.value()?, (a_val | b_val)); + } + } + Ok(()) + } + + #[test] + fn test_and() -> Result<(), SynthesisError> { + for a_val in [false, true].iter().copied() { + for b_val in [false, true].iter().copied() { + let cs = ConstraintSystem::::new_ref(); + let a = AllocatedBool::new_witness(cs.clone(), || Ok(a_val))?; + let b = AllocatedBool::new_witness(cs.clone(), || Ok(b_val))?; + let c = AllocatedBool::and(&a, &b)?; + assert_eq!(c.value()?, a_val & b_val); + + assert!(cs.is_satisfied().unwrap()); + assert_eq!(a.value()?, (a_val)); + assert_eq!(b.value()?, (b_val)); + assert_eq!(c.value()?, (a_val & b_val)); + } + } + Ok(()) + } + + #[test] + fn test_and_not() -> Result<(), SynthesisError> { + for a_val in [false, true].iter().copied() { + for b_val in [false, true].iter().copied() { + let cs = ConstraintSystem::::new_ref(); + let a = AllocatedBool::new_witness(cs.clone(), || Ok(a_val))?; + let b = AllocatedBool::new_witness(cs.clone(), || Ok(b_val))?; + let c = AllocatedBool::and_not(&a, &b)?; + assert_eq!(c.value()?, a_val & !b_val); + + assert!(cs.is_satisfied().unwrap()); + assert_eq!(a.value()?, (a_val)); + assert_eq!(b.value()?, (b_val)); + assert_eq!(c.value()?, (a_val & !b_val)); + } + } + Ok(()) + } + + #[test] + fn test_nor() -> Result<(), SynthesisError> { + for a_val in [false, true].iter().copied() { + for b_val in [false, true].iter().copied() { + let cs = ConstraintSystem::::new_ref(); + let a = AllocatedBool::new_witness(cs.clone(), || Ok(a_val))?; + let b = AllocatedBool::new_witness(cs.clone(), || Ok(b_val))?; + let c = AllocatedBool::nor(&a, &b)?; + assert_eq!(c.value()?, !a_val & !b_val); + + assert!(cs.is_satisfied().unwrap()); + assert_eq!(a.value()?, (a_val)); + assert_eq!(b.value()?, (b_val)); + assert_eq!(c.value()?, (!a_val & !b_val)); + } + } + Ok(()) + } + + #[test] + fn test_enforce_equal() -> Result<(), SynthesisError> { + for a_bool in [false, true].iter().cloned() { + for b_bool in [false, true].iter().cloned() { + for a_neg in [false, true].iter().cloned() { + for b_neg in [false, true].iter().cloned() { + let cs = ConstraintSystem::::new_ref(); + + let mut a = Boolean::new_witness(cs.clone(), || Ok(a_bool))?; + let mut b = Boolean::new_witness(cs.clone(), || Ok(b_bool))?; + + if a_neg { + a = a.not(); + } + if b_neg { + b = b.not(); + } + + a.enforce_equal(&b)?; + + assert_eq!( + cs.is_satisfied().unwrap(), + (a_bool ^ a_neg) == (b_bool ^ b_neg) + ); + } + } + } + } + Ok(()) + } + + #[test] + fn test_conditional_enforce_equal() -> Result<(), SynthesisError> { + for a_bool in [false, true].iter().cloned() { + for b_bool in [false, true].iter().cloned() { + for a_neg in [false, true].iter().cloned() { + for b_neg in [false, true].iter().cloned() { + let cs = ConstraintSystem::::new_ref(); + + // First test if constraint system is satisfied + // when we do want to enforce the condition. + let mut a = Boolean::new_witness(cs.clone(), || Ok(a_bool))?; + let mut b = Boolean::new_witness(cs.clone(), || Ok(b_bool))?; + + if a_neg { + a = a.not(); + } + if b_neg { + b = b.not(); + } + + a.conditional_enforce_equal(&b, &Boolean::constant(true))?; + + assert_eq!( + cs.is_satisfied().unwrap(), + (a_bool ^ a_neg) == (b_bool ^ b_neg) + ); + + // Now test if constraint system is satisfied even + // when we don't want to enforce the condition. + let cs = ConstraintSystem::::new_ref(); + + let mut a = Boolean::new_witness(cs.clone(), || Ok(a_bool))?; + let mut b = Boolean::new_witness(cs.clone(), || Ok(b_bool))?; + + if a_neg { + a = a.not(); + } + if b_neg { + b = b.not(); + } + + let false_cond = + Boolean::new_witness(ark_relations::ns!(cs, "cond"), || Ok(false))?; + a.conditional_enforce_equal(&b, &false_cond)?; + + assert!(cs.is_satisfied().unwrap()); + } + } + } + } + Ok(()) + } + + #[test] + fn test_boolean_negation() -> Result<(), SynthesisError> { + let cs = ConstraintSystem::::new_ref(); + + let mut b = Boolean::new_witness(cs.clone(), || Ok(true))?; + assert!(matches!(b, Boolean::Is(_))); + + b = b.not(); + assert!(matches!(b, Boolean::Not(_))); + + b = b.not(); + assert!(matches!(b, Boolean::Is(_))); + + b = Boolean::Constant(true); + assert!(matches!(b, Boolean::Constant(true))); + + b = b.not(); + assert!(matches!(b, Boolean::Constant(false))); + + b = b.not(); + assert!(matches!(b, Boolean::Constant(true))); + Ok(()) + } + + #[derive(Eq, PartialEq, Copy, Clone, Debug)] + enum OpType { + True, + False, + AllocatedTrue, + AllocatedFalse, + NegatedAllocatedTrue, + NegatedAllocatedFalse, + } + + const VARIANTS: [OpType; 6] = [ + OpType::True, + OpType::False, + OpType::AllocatedTrue, + OpType::AllocatedFalse, + OpType::NegatedAllocatedTrue, + OpType::NegatedAllocatedFalse, + ]; + + fn construct( + ns: Namespace, + operand: OpType, + ) -> Result, SynthesisError> { + let cs = ns.cs(); + + let b = match operand { + OpType::True => Boolean::constant(true), + OpType::False => Boolean::constant(false), + OpType::AllocatedTrue => Boolean::new_witness(cs, || Ok(true))?, + OpType::AllocatedFalse => Boolean::new_witness(cs, || Ok(false))?, + OpType::NegatedAllocatedTrue => Boolean::new_witness(cs, || Ok(true))?.not(), + OpType::NegatedAllocatedFalse => Boolean::new_witness(cs, || Ok(false))?.not(), + }; + Ok(b) + } + + #[test] + fn test_boolean_xor() -> Result<(), SynthesisError> { + for first_operand in VARIANTS.iter().cloned() { + for second_operand in VARIANTS.iter().cloned() { + let cs = ConstraintSystem::::new_ref(); + + let a = construct(ark_relations::ns!(cs, "a"), first_operand)?; + let b = construct(ark_relations::ns!(cs, "b"), second_operand)?; + let c = Boolean::xor(&a, &b)?; + + assert!(cs.is_satisfied().unwrap()); + + match (first_operand, second_operand, c) { + (OpType::True, OpType::True, Boolean::Constant(false)) => (), + (OpType::True, OpType::False, Boolean::Constant(true)) => (), + (OpType::True, OpType::AllocatedTrue, Boolean::Not(_)) => (), + (OpType::True, OpType::AllocatedFalse, Boolean::Not(_)) => (), + (OpType::True, OpType::NegatedAllocatedTrue, Boolean::Is(_)) => (), + (OpType::True, OpType::NegatedAllocatedFalse, Boolean::Is(_)) => (), + + (OpType::False, OpType::True, Boolean::Constant(true)) => (), + (OpType::False, OpType::False, Boolean::Constant(false)) => (), + (OpType::False, OpType::AllocatedTrue, Boolean::Is(_)) => (), + (OpType::False, OpType::AllocatedFalse, Boolean::Is(_)) => (), + (OpType::False, OpType::NegatedAllocatedTrue, Boolean::Not(_)) => (), + (OpType::False, OpType::NegatedAllocatedFalse, Boolean::Not(_)) => (), + + (OpType::AllocatedTrue, OpType::True, Boolean::Not(_)) => (), + (OpType::AllocatedTrue, OpType::False, Boolean::Is(_)) => (), + (OpType::AllocatedTrue, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedTrue, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(v.value(), Ok(true)); + } + (OpType::AllocatedTrue, OpType::NegatedAllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedTrue, OpType::NegatedAllocatedFalse, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(true)); + } + (OpType::AllocatedFalse, OpType::True, Boolean::Not(_)) => (), + (OpType::AllocatedFalse, OpType::False, Boolean::Is(_)) => (), + (OpType::AllocatedFalse, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + (OpType::AllocatedFalse, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedFalse, OpType::NegatedAllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + ( + OpType::AllocatedFalse, + OpType::NegatedAllocatedFalse, + Boolean::Not(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + + (OpType::NegatedAllocatedTrue, OpType::True, Boolean::Is(_)) => (), + (OpType::NegatedAllocatedTrue, OpType::False, Boolean::Not(_)) => (), + (OpType::NegatedAllocatedTrue, OpType::AllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::NegatedAllocatedTrue, OpType::AllocatedFalse, Boolean::Not(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + ( + OpType::NegatedAllocatedTrue, + OpType::NegatedAllocatedTrue, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedTrue, + OpType::NegatedAllocatedFalse, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + + (OpType::NegatedAllocatedFalse, OpType::True, Boolean::Is(_)) => (), + (OpType::NegatedAllocatedFalse, OpType::False, Boolean::Not(_)) => (), + (OpType::NegatedAllocatedFalse, OpType::AllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::AllocatedFalse, + Boolean::Not(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::NegatedAllocatedTrue, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::NegatedAllocatedFalse, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + + _ => unreachable!(), + } + } + } + Ok(()) + } + + #[test] + fn test_boolean_cond_select() -> Result<(), SynthesisError> { + for condition in VARIANTS.iter().cloned() { + for first_operand in VARIANTS.iter().cloned() { + for second_operand in VARIANTS.iter().cloned() { + let cs = ConstraintSystem::::new_ref(); + + let cond = construct(ark_relations::ns!(cs, "cond"), condition)?; + let a = construct(ark_relations::ns!(cs, "a"), first_operand)?; + let b = construct(ark_relations::ns!(cs, "b"), second_operand)?; + let c = cond.select(&a, &b)?; + + assert!( + cs.is_satisfied().unwrap(), + "failed with operands: cond: {:?}, a: {:?}, b: {:?}", + condition, + first_operand, + second_operand, + ); + assert_eq!( + c.value()?, + if cond.value()? { + a.value()? + } else { + b.value()? + } + ); + } + } + } + Ok(()) + } + + #[test] + fn test_boolean_or() -> Result<(), SynthesisError> { + for first_operand in VARIANTS.iter().cloned() { + for second_operand in VARIANTS.iter().cloned() { + let cs = ConstraintSystem::::new_ref(); + + let a = construct(ark_relations::ns!(cs, "a"), first_operand)?; + let b = construct(ark_relations::ns!(cs, "b"), second_operand)?; + let c = a.or(&b)?; + + assert!(cs.is_satisfied().unwrap()); + + match (first_operand, second_operand, c.clone()) { + (OpType::True, OpType::True, Boolean::Constant(true)) => (), + (OpType::True, OpType::False, Boolean::Constant(true)) => (), + (OpType::True, OpType::AllocatedTrue, Boolean::Constant(true)) => (), + (OpType::True, OpType::AllocatedFalse, Boolean::Constant(true)) => (), + (OpType::True, OpType::NegatedAllocatedTrue, Boolean::Constant(true)) => (), + (OpType::True, OpType::NegatedAllocatedFalse, Boolean::Constant(true)) => (), + + (OpType::False, OpType::True, Boolean::Constant(true)) => (), + (OpType::False, OpType::False, Boolean::Constant(false)) => (), + (OpType::False, OpType::AllocatedTrue, Boolean::Is(_)) => (), + (OpType::False, OpType::AllocatedFalse, Boolean::Is(_)) => (), + (OpType::False, OpType::NegatedAllocatedTrue, Boolean::Not(_)) => (), + (OpType::False, OpType::NegatedAllocatedFalse, Boolean::Not(_)) => (), + + (OpType::AllocatedTrue, OpType::True, Boolean::Constant(true)) => (), + (OpType::AllocatedTrue, OpType::False, Boolean::Is(_)) => (), + (OpType::AllocatedTrue, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(v.value(), Ok(true)); + } + (OpType::AllocatedTrue, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(v.value(), Ok(true)); + } + (OpType::AllocatedTrue, OpType::NegatedAllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedTrue, OpType::NegatedAllocatedFalse, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(false)); + } + + (OpType::AllocatedFalse, OpType::True, Boolean::Constant(true)) => (), + (OpType::AllocatedFalse, OpType::False, Boolean::Is(_)) => (), + (OpType::AllocatedFalse, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(v.value(), Ok(true)); + } + (OpType::AllocatedFalse, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedFalse, OpType::NegatedAllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(true)); + } + ( + OpType::AllocatedFalse, + OpType::NegatedAllocatedFalse, + Boolean::Not(ref v), + ) => { + assert_eq!(v.value(), Ok(false)); + } + + (OpType::NegatedAllocatedTrue, OpType::True, Boolean::Constant(true)) => (), + (OpType::NegatedAllocatedTrue, OpType::False, Boolean::Not(_)) => (), + (OpType::NegatedAllocatedTrue, OpType::AllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(false)); + } + (OpType::NegatedAllocatedTrue, OpType::AllocatedFalse, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(true)); + } + ( + OpType::NegatedAllocatedTrue, + OpType::NegatedAllocatedTrue, + Boolean::Not(ref v), + ) => { + assert_eq!(v.value(), Ok(true)); + } + ( + OpType::NegatedAllocatedTrue, + OpType::NegatedAllocatedFalse, + Boolean::Not(ref v), + ) => { + assert_eq!(v.value(), Ok(false)); + } + + (OpType::NegatedAllocatedFalse, OpType::True, Boolean::Constant(true)) => (), + (OpType::NegatedAllocatedFalse, OpType::False, Boolean::Not(_)) => (), + (OpType::NegatedAllocatedFalse, OpType::AllocatedTrue, Boolean::Not(ref v)) => { + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::AllocatedFalse, + Boolean::Not(ref v), + ) => { + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::NegatedAllocatedTrue, + Boolean::Not(ref v), + ) => { + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::NegatedAllocatedFalse, + Boolean::Not(ref v), + ) => { + assert_eq!(v.value(), Ok(false)); + } + + _ => panic!( + "this should never be encountered, in case: (a = {:?}, b = {:?}, c = {:?})", + a, b, c + ), + } + } + } + Ok(()) + } + + #[test] + fn test_boolean_and() -> Result<(), SynthesisError> { + for first_operand in VARIANTS.iter().cloned() { + for second_operand in VARIANTS.iter().cloned() { + let cs = ConstraintSystem::::new_ref(); + + let a = construct(ark_relations::ns!(cs, "a"), first_operand)?; + let b = construct(ark_relations::ns!(cs, "b"), second_operand)?; + let c = a.and(&b)?; + + assert!(cs.is_satisfied().unwrap()); + + match (first_operand, second_operand, c) { + (OpType::True, OpType::True, Boolean::Constant(true)) => (), + (OpType::True, OpType::False, Boolean::Constant(false)) => (), + (OpType::True, OpType::AllocatedTrue, Boolean::Is(_)) => (), + (OpType::True, OpType::AllocatedFalse, Boolean::Is(_)) => (), + (OpType::True, OpType::NegatedAllocatedTrue, Boolean::Not(_)) => (), + (OpType::True, OpType::NegatedAllocatedFalse, Boolean::Not(_)) => (), + + (OpType::False, OpType::True, Boolean::Constant(false)) => (), + (OpType::False, OpType::False, Boolean::Constant(false)) => (), + (OpType::False, OpType::AllocatedTrue, Boolean::Constant(false)) => (), + (OpType::False, OpType::AllocatedFalse, Boolean::Constant(false)) => (), + (OpType::False, OpType::NegatedAllocatedTrue, Boolean::Constant(false)) => (), + (OpType::False, OpType::NegatedAllocatedFalse, Boolean::Constant(false)) => (), + + (OpType::AllocatedTrue, OpType::True, Boolean::Is(_)) => (), + (OpType::AllocatedTrue, OpType::False, Boolean::Constant(false)) => (), + (OpType::AllocatedTrue, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + (OpType::AllocatedTrue, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedTrue, OpType::NegatedAllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedTrue, OpType::NegatedAllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + + (OpType::AllocatedFalse, OpType::True, Boolean::Is(_)) => (), + (OpType::AllocatedFalse, OpType::False, Boolean::Constant(false)) => (), + (OpType::AllocatedFalse, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedFalse, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedFalse, OpType::NegatedAllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::AllocatedFalse, OpType::NegatedAllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + + (OpType::NegatedAllocatedTrue, OpType::True, Boolean::Not(_)) => (), + (OpType::NegatedAllocatedTrue, OpType::False, Boolean::Constant(false)) => (), + (OpType::NegatedAllocatedTrue, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + (OpType::NegatedAllocatedTrue, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedTrue, + OpType::NegatedAllocatedTrue, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedTrue, + OpType::NegatedAllocatedFalse, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + + (OpType::NegatedAllocatedFalse, OpType::True, Boolean::Not(_)) => (), + (OpType::NegatedAllocatedFalse, OpType::False, Boolean::Constant(false)) => (), + (OpType::NegatedAllocatedFalse, OpType::AllocatedTrue, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + (OpType::NegatedAllocatedFalse, OpType::AllocatedFalse, Boolean::Is(ref v)) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::NegatedAllocatedTrue, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::zero()); + assert_eq!(v.value(), Ok(false)); + } + ( + OpType::NegatedAllocatedFalse, + OpType::NegatedAllocatedFalse, + Boolean::Is(ref v), + ) => { + assert_eq!(cs.assigned_value(v.variable()).unwrap(), Fr::one()); + assert_eq!(v.value(), Ok(true)); + } + + _ => { + panic!( + "unexpected behavior at {:?} AND {:?}", + first_operand, second_operand + ); + } + } + } + } + Ok(()) + } + + #[test] + fn test_smaller_than_or_equal_to() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + for _ in 0..1000 { + let mut r = Fr::rand(&mut rng); + let mut s = Fr::rand(&mut rng); + if r > s { + core::mem::swap(&mut r, &mut s) + } + + let cs = ConstraintSystem::::new_ref(); + + let native_bits: Vec<_> = BitIteratorLE::new(r.into_repr()).collect(); + let bits = Vec::new_witness(cs.clone(), || Ok(native_bits))?; + Boolean::enforce_smaller_or_equal_than_le(&bits, s.into_repr())?; + + assert!(cs.is_satisfied().unwrap()); + } + + for _ in 0..1000 { + let r = Fr::rand(&mut rng); + if r == -Fr::one() { + continue; + } + let s = r + Fr::one(); + let s2 = r.double(); + let cs = ConstraintSystem::::new_ref(); + + let native_bits: Vec<_> = BitIteratorLE::new(r.into_repr()).collect(); + let bits = Vec::new_witness(cs.clone(), || Ok(native_bits))?; + Boolean::enforce_smaller_or_equal_than_le(&bits, s.into_repr())?; + if r < s2 { + Boolean::enforce_smaller_or_equal_than_le(&bits, s2.into_repr())?; + } + + assert!(cs.is_satisfied().unwrap()); + } + Ok(()) + } + + #[test] + fn test_enforce_in_field() -> Result<(), SynthesisError> { + { + let cs = ConstraintSystem::::new_ref(); + + let mut bits = vec![]; + for b in BitIteratorBE::new(Fr::characteristic()).skip(1) { + bits.push(Boolean::new_witness(cs.clone(), || Ok(b))?); + } + bits.reverse(); + + Boolean::enforce_in_field_le(&bits)?; + + assert!(!cs.is_satisfied().unwrap()); + } + + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let r = Fr::rand(&mut rng); + let cs = ConstraintSystem::::new_ref(); + + let mut bits = vec![]; + for b in BitIteratorBE::new(r.into_repr()).skip(1) { + bits.push(Boolean::new_witness(cs.clone(), || Ok(b))?); + } + bits.reverse(); + + Boolean::enforce_in_field_le(&bits)?; + + assert!(cs.is_satisfied().unwrap()); + } + Ok(()) + } + + #[test] + fn test_enforce_nand() -> Result<(), SynthesisError> { + { + let cs = ConstraintSystem::::new_ref(); + + assert!( + Boolean::enforce_kary_nand(&[Boolean::new_constant(cs.clone(), false)?]).is_ok() + ); + assert!( + Boolean::enforce_kary_nand(&[Boolean::new_constant(cs.clone(), true)?]).is_err() + ); + } + + for i in 1..5 { + // with every possible assignment for them + for mut b in 0..(1 << i) { + // with every possible negation + for mut n in 0..(1 << i) { + let cs = ConstraintSystem::::new_ref(); + + let mut expected = true; + + let mut bits = vec![]; + for _ in 0..i { + expected &= b & 1 == 1; + + let bit = if n & 1 == 1 { + Boolean::new_witness(cs.clone(), || Ok(b & 1 == 1))? + } else { + Boolean::new_witness(cs.clone(), || Ok(b & 1 == 0))?.not() + }; + bits.push(bit); + + b >>= 1; + n >>= 1; + } + + let expected = !expected; + + Boolean::enforce_kary_nand(&bits)?; + + if expected { + assert!(cs.is_satisfied().unwrap()); + } else { + assert!(!cs.is_satisfied().unwrap()); + } + } + } + } + Ok(()) + } + + #[test] + fn test_kary_and() -> Result<(), SynthesisError> { + // test different numbers of operands + for i in 1..15 { + // with every possible assignment for them + for mut b in 0..(1 << i) { + let cs = ConstraintSystem::::new_ref(); + + let mut expected = true; + + let mut bits = vec![]; + for _ in 0..i { + expected &= b & 1 == 1; + bits.push(Boolean::new_witness(cs.clone(), || Ok(b & 1 == 1))?); + b >>= 1; + } + + let r = Boolean::kary_and(&bits)?; + + assert!(cs.is_satisfied().unwrap()); + + if let Boolean::Is(ref r) = r { + assert_eq!(r.value()?, expected); + } + } + } + Ok(()) + } + + #[test] + fn test_bits_to_fp() -> Result<(), SynthesisError> { + use AllocationMode::*; + let rng = &mut ark_std::test_rng(); + let cs = ConstraintSystem::::new_ref(); + + let modes = [Input, Witness, Constant]; + for &mode in modes.iter() { + for _ in 0..1000 { + let f = Fr::rand(rng); + let bits = BitIteratorLE::new(f.into_repr()).collect::>(); + let bits: Vec<_> = + AllocVar::new_variable(cs.clone(), || Ok(bits.as_slice()), mode)?; + let f = AllocVar::new_variable(cs.clone(), || Ok(f), mode)?; + let claimed_f = Boolean::le_bits_to_fp_var(&bits)?; + claimed_f.enforce_equal(&f)?; + } + + for _ in 0..1000 { + let f = Fr::from(u64::rand(rng)); + let bits = BitIteratorLE::new(f.into_repr()).collect::>(); + let bits: Vec<_> = + AllocVar::new_variable(cs.clone(), || Ok(bits.as_slice()), mode)?; + let f = AllocVar::new_variable(cs.clone(), || Ok(f), mode)?; + let claimed_f = Boolean::le_bits_to_fp_var(&bits)?; + claimed_f.enforce_equal(&f)?; + } + assert!(cs.is_satisfied().unwrap()); + } + + Ok(()) + } +} diff --git a/arkworks/r1cs-std/src/bits/mod.rs b/arkworks/r1cs-std/src/bits/mod.rs new file mode 100644 index 00000000..3878c84f --- /dev/null +++ b/arkworks/r1cs-std/src/bits/mod.rs @@ -0,0 +1,129 @@ +use crate::{ + bits::{boolean::Boolean, uint8::UInt8}, + Vec, +}; +use ark_ff::Field; +use ark_relations::r1cs::SynthesisError; + +/// This module contains `Boolean`, a R1CS equivalent of the `bool` type. +pub mod boolean; +/// This module contains `UInt8`, a R1CS equivalent of the `u8` type. +pub mod uint8; +/// This module contains a macro for generating `UIntN` types, which are R1CS +/// equivalents of `N`-bit unsigned integers. +#[macro_use] +pub mod uint; + +make_uint!(UInt16, 16, u16, uint16, "`U16`", "`u16`", "16"); +make_uint!(UInt32, 32, u32, uint32, "`U32`", "`u32`", "32"); +make_uint!(UInt64, 64, u64, uint64, "`U64`", "`u64`", "64"); +make_uint!(UInt128, 128, u128, uint128, "`U128`", "`u128`", "128"); + +/// Specifies constraints for conversion to a little-endian bit representation +/// of `self`. +pub trait ToBitsGadget { + /// Outputs the canonical little-endian bit-wise representation of `self`. + /// + /// This is the correct default for 99% of use cases. + fn to_bits_le(&self) -> Result>, SynthesisError>; + + /// Outputs a possibly non-unique little-endian bit-wise representation of + /// `self`. + /// + /// If you're not absolutely certain that your usecase can get away with a + /// non-canonical representation, please use `self.to_bits()` instead. + fn to_non_unique_bits_le(&self) -> Result>, SynthesisError> { + self.to_bits_le() + } + + /// Outputs the canonical big-endian bit-wise representation of `self`. + fn to_bits_be(&self) -> Result>, SynthesisError> { + let mut res = self.to_bits_le()?; + res.reverse(); + Ok(res) + } + + /// Outputs a possibly non-unique big-endian bit-wise representation of + /// `self`. + fn to_non_unique_bits_be(&self) -> Result>, SynthesisError> { + let mut res = self.to_non_unique_bits_le()?; + res.reverse(); + Ok(res) + } +} + +impl ToBitsGadget for Boolean { + fn to_bits_le(&self) -> Result>, SynthesisError> { + Ok(vec![self.clone()]) + } +} + +impl ToBitsGadget for [Boolean] { + /// Outputs `self`. + fn to_bits_le(&self) -> Result>, SynthesisError> { + Ok(self.to_vec()) + } +} + +impl ToBitsGadget for UInt8 { + fn to_bits_le(&self) -> Result>, SynthesisError> { + Ok(self.bits.to_vec()) + } +} + +impl ToBitsGadget for [UInt8] { + /// Interprets `self` as an integer, and outputs the little-endian + /// bit-wise decomposition of that integer. + fn to_bits_le(&self) -> Result>, SynthesisError> { + let bits = self.iter().flat_map(|b| &b.bits).cloned().collect(); + Ok(bits) + } +} + +impl ToBitsGadget for Vec +where + [T]: ToBitsGadget, +{ + fn to_bits_le(&self) -> Result>, SynthesisError> { + self.as_slice().to_bits_le().map(|v| v.to_vec()) + } + + fn to_non_unique_bits_le(&self) -> Result>, SynthesisError> { + self.as_slice().to_non_unique_bits_le().map(|v| v.to_vec()) + } +} + +/// Specifies constraints for conversion to a little-endian byte representation +/// of `self`. +pub trait ToBytesGadget { + /// Outputs a canonical, little-endian, byte decomposition of `self`. + /// + /// This is the correct default for 99% of use cases. + fn to_bytes(&self) -> Result>, SynthesisError>; + + /// Outputs a possibly non-unique byte decomposition of `self`. + /// + /// If you're not absolutely certain that your usecase can get away with a + /// non-canonical representation, please use `self.to_bytes(cs)` instead. + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + self.to_bytes() + } +} + +impl ToBytesGadget for [UInt8] { + fn to_bytes(&self) -> Result>, SynthesisError> { + Ok(self.to_vec()) + } +} + +impl<'a, F: Field, T: 'a + ToBytesGadget> ToBytesGadget for &'a T { + fn to_bytes(&self) -> Result>, SynthesisError> { + (*self).to_bytes() + } +} + +impl<'a, F: Field> ToBytesGadget for &'a [UInt8] { + fn to_bytes(&self) -> Result>, SynthesisError> { + Ok(self.to_vec()) + } +} diff --git a/arkworks/r1cs-std/src/bits/uint.rs b/arkworks/r1cs-std/src/bits/uint.rs new file mode 100644 index 00000000..cc5053e8 --- /dev/null +++ b/arkworks/r1cs-std/src/bits/uint.rs @@ -0,0 +1,568 @@ +macro_rules! make_uint { + ($name:ident, $size:expr, $native:ident, $mod_name:ident, $r1cs_doc_name:expr, $native_doc_name:expr, $num_bits_doc:expr) => { + #[doc = "This module contains the "] + #[doc = $r1cs_doc_name] + #[doc = "type, which is the R1CS equivalent of the "] + #[doc = $native_doc_name] + #[doc = " type."] + pub mod $mod_name { + use ark_ff::{Field, FpParameters, One, PrimeField, Zero}; + use core::borrow::Borrow; + use core::convert::TryFrom; + use num_bigint::BigUint; + use num_traits::cast::ToPrimitive; + + use ark_relations::r1cs::{ + ConstraintSystemRef, LinearCombination, Namespace, SynthesisError, Variable, + }; + + use crate::{ + boolean::{AllocatedBool, Boolean}, + prelude::*, + Assignment, Vec, + }; + + #[doc = "This struct represent an unsigned"] + #[doc = $num_bits_doc] + #[doc = " bit integer as a sequence of "] + #[doc = $num_bits_doc] + #[doc = " `Boolean`s. \n"] + #[doc = "This is the R1CS equivalent of the native "] + #[doc = $native_doc_name] + #[doc = " unsigned integer type."] + #[derive(Clone, Debug)] + pub struct $name { + // Least significant bit first + bits: [Boolean; $size], + value: Option<$native>, + } + + impl R1CSVar for $name { + type Value = $native; + + fn cs(&self) -> ConstraintSystemRef { + self.bits.as_ref().cs() + } + + fn value(&self) -> Result { + let mut value = None; + for (i, bit) in self.bits.iter().enumerate() { + let b = $native::from(bit.value()?); + value = match value { + Some(value) => Some(value + (b << i)), + None => Some(b << i), + }; + } + debug_assert_eq!(self.value, value); + value.get() + } + } + + impl $name { + #[doc = "Construct a constant "] + #[doc = $r1cs_doc_name] + #[doc = " from the native "] + #[doc = $native_doc_name] + #[doc = " type."] + pub fn constant(value: $native) -> Self { + let mut bits = [Boolean::FALSE; $size]; + + let mut tmp = value; + for i in 0..$size { + bits[i] = Boolean::constant((tmp & 1) == 1); + tmp >>= 1; + } + + $name { + bits, + value: Some(value), + } + } + + /// Turns `self` into the underlying little-endian bits. + pub fn to_bits_le(&self) -> Vec> { + self.bits.to_vec() + } + + /// Construct `Self` from a slice of `Boolean`s. + /// + /// # Panics + /// + #[doc = "This method panics if `bits.len() != "] + #[doc = $num_bits_doc] + #[doc = "`."] + pub fn from_bits_le(bits: &[Boolean]) -> Self { + assert_eq!(bits.len(), $size); + + let bits = <&[Boolean; $size]>::try_from(bits).unwrap().clone(); + + let mut value = Some(0); + for b in bits.iter().rev() { + value.as_mut().map(|v| *v <<= 1); + + match *b { + Boolean::Constant(b) => { + value.as_mut().map(|v| *v |= $native::from(b)); + } + Boolean::Is(ref b) => match b.value() { + Ok(b) => { + value.as_mut().map(|v| *v |= $native::from(b)); + } + Err(_) => value = None, + }, + Boolean::Not(ref b) => match b.value() { + Ok(b) => { + value.as_mut().map(|v| *v |= $native::from(!b)); + } + Err(_) => value = None, + }, + } + } + + Self { value, bits } + } + + /// Rotates `self` to the right by `by` steps, wrapping around. + #[tracing::instrument(target = "r1cs", skip(self))] + pub fn rotr(&self, by: usize) -> Self { + let mut result = self.clone(); + let by = by % $size; + + let new_bits = self.bits.iter().skip(by).chain(&self.bits).take($size); + + for (res, new) in result.bits.iter_mut().zip(new_bits) { + *res = new.clone(); + } + + result.value = self + .value + .map(|v| v.rotate_right(u32::try_from(by).unwrap())); + result + } + + /// Outputs `self ^ other`. + /// + /// If at least one of `self` and `other` are constants, then this method + /// *does not* create any constraints or variables. + #[tracing::instrument(target = "r1cs", skip(self, other))] + pub fn xor(&self, other: &Self) -> Result { + let mut result = self.clone(); + result.value = match (self.value, other.value) { + (Some(a), Some(b)) => Some(a ^ b), + _ => None, + }; + + let new_bits = self.bits.iter().zip(&other.bits).map(|(a, b)| a.xor(b)); + + for (res, new) in result.bits.iter_mut().zip(new_bits) { + *res = new?; + } + + Ok(result) + } + + /// Perform modular addition of `operands`. + /// + /// The user must ensure that overflow does not occur. + #[tracing::instrument(target = "r1cs", skip(operands))] + pub fn addmany(operands: &[Self]) -> Result + where + F: PrimeField, + { + // Make some arbitrary bounds for ourselves to avoid overflows + // in the scalar field + assert!(F::Params::MODULUS_BITS >= 2 * $size); + + // Support up to 128 + assert!($size <= 128); + + assert!(operands.len() >= 1); + assert!($size * operands.len() <= F::Params::MODULUS_BITS as usize); + + if operands.len() == 1 { + return Ok(operands[0].clone()); + } + + // Compute the maximum value of the sum so we allocate enough bits for + // the result + let mut max_value = + BigUint::from($native::max_value()) * BigUint::from(operands.len()); + + // Keep track of the resulting value + let mut result_value = Some(BigUint::zero()); + + // This is a linear combination that we will enforce to be "zero" + let mut lc = LinearCombination::zero(); + + let mut all_constants = true; + + // Iterate over the operands + for op in operands { + // Accumulate the value + match op.value { + Some(val) => { + result_value.as_mut().map(|v| *v += BigUint::from(val)); + } + + None => { + // If any of our operands have unknown value, we won't + // know the value of the result + result_value = None; + } + } + + // Iterate over each bit_gadget of the operand and add the operand to + // the linear combination + let mut coeff = F::one(); + for bit in &op.bits { + match *bit { + Boolean::Is(ref bit) => { + all_constants = false; + + // Add coeff * bit_gadget + lc += (coeff, bit.variable()); + } + Boolean::Not(ref bit) => { + all_constants = false; + + // Add coeff * (1 - bit_gadget) = coeff * ONE - coeff * bit_gadget + lc = lc + (coeff, Variable::One) - (coeff, bit.variable()); + } + Boolean::Constant(bit) => { + if bit { + lc += (coeff, Variable::One); + } + } + } + + coeff.double_in_place(); + } + } + + // The value of the actual result is modulo 2^$size + let modular_value = result_value.clone().map(|v| { + let modulus = BigUint::from(1u64) << ($size as u32); + (v % modulus).to_u128().unwrap() as $native + }); + + if all_constants && modular_value.is_some() { + // We can just return a constant, rather than + // unpacking the result into allocated bits. + + return Ok($name::constant(modular_value.unwrap())); + } + let cs = operands.cs(); + + // Storage area for the resulting bits + let mut result_bits = vec![]; + + // Allocate each bit_gadget of the result + let mut coeff = F::one(); + let mut i = 0; + while max_value != BigUint::zero() { + // Allocate the bit_gadget + let b = AllocatedBool::new_witness(cs.clone(), || { + result_value + .clone() + .map(|v| (v >> i) & BigUint::one() == BigUint::one()) + .get() + })?; + + // Subtract this bit_gadget from the linear combination to ensure the sums + // balance out + lc = lc - (coeff, b.variable()); + + result_bits.push(b.into()); + + max_value >>= 1; + i += 1; + coeff.double_in_place(); + } + + // Enforce that the linear combination equals zero + cs.enforce_constraint(lc!(), lc!(), lc)?; + + // Discard carry bits that we don't care about + result_bits.truncate($size); + let bits = TryFrom::try_from(result_bits).unwrap(); + + Ok($name { + bits, + value: modular_value, + }) + } + } + + impl ToBytesGadget for $name { + #[tracing::instrument(target = "r1cs", skip(self))] + fn to_bytes(&self) -> Result>, SynthesisError> { + Ok(self + .to_bits_le() + .chunks(8) + .map(UInt8::from_bits_le) + .collect()) + } + } + + impl EqGadget for $name { + #[tracing::instrument(target = "r1cs", skip(self))] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + self.bits.as_ref().is_eq(&other.bits) + } + + #[tracing::instrument(target = "r1cs", skip(self))] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + self.bits.conditional_enforce_equal(&other.bits, condition) + } + + #[tracing::instrument(target = "r1cs", skip(self))] + fn conditional_enforce_not_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + self.bits + .conditional_enforce_not_equal(&other.bits, condition) + } + } + + impl CondSelectGadget for $name { + #[tracing::instrument(target = "r1cs", skip(cond, true_value, false_value))] + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result { + let selected_bits = true_value + .bits + .iter() + .zip(&false_value.bits) + .map(|(t, f)| cond.select(t, f)); + let mut bits = [Boolean::FALSE; $size]; + for (result, new) in bits.iter_mut().zip(selected_bits) { + *result = new?; + } + + let value = cond.value().ok().and_then(|cond| { + if cond { + true_value.value().ok() + } else { + false_value.value().ok() + } + }); + Ok(Self { bits, value }) + } + } + + impl AllocVar<$native, ConstraintF> for $name { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let value = f().map(|f| *f.borrow()).ok(); + + let mut values = [None; $size]; + if let Some(val) = value { + values + .iter_mut() + .enumerate() + .for_each(|(i, v)| *v = Some((val >> i) & 1 == 1)); + } + + let mut bits = [Boolean::FALSE; $size]; + for (b, v) in bits.iter_mut().zip(&values) { + *b = Boolean::new_variable(cs.clone(), || v.get(), mode)?; + } + Ok(Self { bits, value }) + } + } + + #[cfg(test)] + mod test { + use super::$name; + use crate::{bits::boolean::Boolean, prelude::*, Vec}; + use ark_relations::r1cs::{ConstraintSystem, SynthesisError}; + use ark_std::rand::Rng; + use ark_test_curves::mnt4_753::Fr; + + #[test] + fn test_from_bits() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let v = (0..$size) + .map(|_| Boolean::constant(rng.gen())) + .collect::>>(); + + let b = $name::from_bits_le(&v); + + for (i, bit) in b.bits.iter().enumerate() { + match bit { + &Boolean::Constant(bit) => { + assert_eq!(bit, ((b.value()? >> i) & 1 == 1)); + } + _ => unreachable!(), + } + } + + let expected_to_be_same = b.to_bits_le(); + + for x in v.iter().zip(expected_to_be_same.iter()) { + match x { + (&Boolean::Constant(true), &Boolean::Constant(true)) => {} + (&Boolean::Constant(false), &Boolean::Constant(false)) => {} + _ => unreachable!(), + } + } + } + Ok(()) + } + + #[test] + fn test_xor() -> Result<(), SynthesisError> { + use Boolean::*; + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let cs = ConstraintSystem::::new_ref(); + + let a: $native = rng.gen(); + let b: $native = rng.gen(); + let c: $native = rng.gen(); + + let mut expected = a ^ b ^ c; + + let a_bit = $name::new_witness(cs.clone(), || Ok(a))?; + let b_bit = $name::constant(b); + let c_bit = $name::new_witness(cs.clone(), || Ok(c))?; + + let r = a_bit.xor(&b_bit).unwrap(); + let r = r.xor(&c_bit).unwrap(); + + assert!(cs.is_satisfied().unwrap()); + + assert!(r.value == Some(expected)); + + for b in r.bits.iter() { + match b { + Is(b) => assert_eq!(b.value()?, (expected & 1 == 1)), + Not(b) => assert_eq!(!b.value()?, (expected & 1 == 1)), + Constant(b) => assert_eq!(*b, (expected & 1 == 1)), + } + + expected >>= 1; + } + } + Ok(()) + } + + #[test] + fn test_addmany_constants() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let cs = ConstraintSystem::::new_ref(); + + let a: $native = rng.gen(); + let b: $native = rng.gen(); + let c: $native = rng.gen(); + + let a_bit = $name::new_constant(cs.clone(), a)?; + let b_bit = $name::new_constant(cs.clone(), b)?; + let c_bit = $name::new_constant(cs.clone(), c)?; + + let mut expected = a.wrapping_add(b).wrapping_add(c); + + let r = $name::addmany(&[a_bit, b_bit, c_bit]).unwrap(); + + assert!(r.value == Some(expected)); + + for b in r.bits.iter() { + match b { + Boolean::Is(_) => unreachable!(), + Boolean::Not(_) => unreachable!(), + Boolean::Constant(b) => assert_eq!(*b, (expected & 1 == 1)), + } + + expected >>= 1; + } + } + Ok(()) + } + + #[test] + fn test_addmany() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let cs = ConstraintSystem::::new_ref(); + + let a: $native = rng.gen(); + let b: $native = rng.gen(); + let c: $native = rng.gen(); + let d: $native = rng.gen(); + + let mut expected = (a ^ b).wrapping_add(c).wrapping_add(d); + + let a_bit = $name::new_witness(ark_relations::ns!(cs, "a_bit"), || Ok(a))?; + let b_bit = $name::constant(b); + let c_bit = $name::constant(c); + let d_bit = $name::new_witness(ark_relations::ns!(cs, "d_bit"), || Ok(d))?; + + let r = a_bit.xor(&b_bit).unwrap(); + let r = $name::addmany(&[r, c_bit, d_bit]).unwrap(); + + assert!(cs.is_satisfied().unwrap()); + assert!(r.value == Some(expected)); + + for b in r.bits.iter() { + match b { + Boolean::Is(b) => assert_eq!(b.value()?, (expected & 1 == 1)), + Boolean::Not(b) => assert_eq!(!b.value()?, (expected & 1 == 1)), + Boolean::Constant(_) => unreachable!(), + } + + expected >>= 1; + } + } + Ok(()) + } + + #[test] + fn test_rotr() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + + let mut num = rng.gen(); + + let a: $name = $name::constant(num); + + for i in 0..$size { + let b = a.rotr(i); + + assert!(b.value.unwrap() == num); + + let mut tmp = num; + for b in &b.bits { + match b { + Boolean::Constant(b) => assert_eq!(*b, tmp & 1 == 1), + _ => unreachable!(), + } + + tmp >>= 1; + } + + num = num.rotate_right(1); + } + Ok(()) + } + } + } + }; +} diff --git a/arkworks/r1cs-std/src/bits/uint8.rs b/arkworks/r1cs-std/src/bits/uint8.rs new file mode 100644 index 00000000..dd479d0a --- /dev/null +++ b/arkworks/r1cs-std/src/bits/uint8.rs @@ -0,0 +1,543 @@ +use ark_ff::{Field, FpParameters, PrimeField, ToConstraintField}; + +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; + +use crate::fields::fp::{AllocatedFp, FpVar}; +use crate::{prelude::*, Assignment, ToConstraintFieldGadget, Vec}; +use core::{borrow::Borrow, convert::TryFrom}; + +/// Represents an interpretation of 8 `Boolean` objects as an +/// unsigned integer. +#[derive(Clone, Debug)] +pub struct UInt8 { + /// Little-endian representation: least significant bit first + pub(crate) bits: [Boolean; 8], + pub(crate) value: Option, +} + +impl R1CSVar for UInt8 { + type Value = u8; + + fn cs(&self) -> ConstraintSystemRef { + self.bits.as_ref().cs() + } + + fn value(&self) -> Result { + let mut value = None; + for (i, bit) in self.bits.iter().enumerate() { + let b = u8::from(bit.value()?); + value = match value { + Some(value) => Some(value + (b << i)), + None => Some(b << i), + }; + } + debug_assert_eq!(self.value, value); + value.get() + } +} + +impl UInt8 { + /// Construct a constant vector of `UInt8` from a vector of `u8` + /// + /// This *does not* create any new variables or constraints. + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// let var = vec![UInt8::new_witness(cs.clone(), || Ok(2))?]; + /// + /// let constant = UInt8::constant_vec(&[2]); + /// var.enforce_equal(&constant)?; + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + pub fn constant_vec(values: &[u8]) -> Vec { + let mut result = Vec::new(); + for value in values { + result.push(UInt8::constant(*value)); + } + result + } + + /// Construct a constant `UInt8` from a `u8` + /// + /// This *does not* create new variables or constraints. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// let var = UInt8::new_witness(cs.clone(), || Ok(2))?; + /// + /// let constant = UInt8::constant(2); + /// var.enforce_equal(&constant)?; + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + pub fn constant(value: u8) -> Self { + let mut bits = [Boolean::FALSE; 8]; + + let mut tmp = value; + for i in 0..8 { + // If last bit is one, push one. + bits[i] = Boolean::constant((tmp & 1) == 1); + tmp >>= 1; + } + + Self { + bits, + value: Some(value), + } + } + + /// Allocates a slice of `u8`'s as private witnesses. + pub fn new_witness_vec( + cs: impl Into>, + values: &[impl Into> + Copy], + ) -> Result, SynthesisError> { + let ns = cs.into(); + let cs = ns.cs(); + let mut output_vec = Vec::with_capacity(values.len()); + for value in values { + let byte: Option = Into::into(*value); + output_vec.push(Self::new_witness(cs.clone(), || byte.get())?); + } + Ok(output_vec) + } + + /// Allocates a slice of `u8`'s as public inputs by first packing them into + /// elements of `F`, (thus reducing the number of input allocations), + /// allocating these elements as public inputs, and then converting + /// these field variables `FpVar` variables back into bytes. + /// + /// From a user perspective, this trade-off adds constraints, but improves + /// verifier time and verification key size. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// let two = UInt8::new_witness(cs.clone(), || Ok(2))?; + /// let var = vec![two.clone(); 32]; + /// + /// let c = UInt8::new_input_vec(cs.clone(), &[2; 32])?; + /// var.enforce_equal(&c)?; + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + pub fn new_input_vec( + cs: impl Into>, + values: &[u8], + ) -> Result, SynthesisError> + where + F: PrimeField, + { + let ns = cs.into(); + let cs = ns.cs(); + let values_len = values.len(); + let field_elements: Vec = ToConstraintField::::to_field_elements(values).unwrap(); + + let max_size = 8 * (F::Params::CAPACITY / 8) as usize; + let mut allocated_bits = Vec::new(); + for field_element in field_elements.into_iter() { + let fe = AllocatedFp::new_input(cs.clone(), || Ok(field_element))?; + let fe_bits = fe.to_bits_le()?; + + // Remove the most significant bit, because we know it should be zero + // because `values.to_field_elements()` only + // packs field elements up to the penultimate bit. + // That is, the most significant bit (`ConstraintF::NUM_BITS`-th bit) is + // unset, so we can just pop it off. + allocated_bits.extend_from_slice(&fe_bits[0..max_size]); + } + + // Chunk up slices of 8 bit into bytes. + Ok(allocated_bits[0..(8 * values_len)] + .chunks(8) + .map(Self::from_bits_le) + .collect()) + } + + /// Converts a little-endian byte order representation of bits into a + /// `UInt8`. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// let var = UInt8::new_witness(cs.clone(), || Ok(128))?; + /// + /// let f = Boolean::FALSE; + /// let t = Boolean::TRUE; + /// + /// // Construct [0, 0, 0, 0, 0, 0, 0, 1] + /// let mut bits = vec![f.clone(); 7]; + /// bits.push(t); + /// + /// let mut c = UInt8::from_bits_le(&bits); + /// var.enforce_equal(&c)?; + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn from_bits_le(bits: &[Boolean]) -> Self { + assert_eq!(bits.len(), 8); + let bits = <&[Boolean; 8]>::try_from(bits).unwrap().clone(); + + let mut value = Some(0u8); + for (i, b) in bits.iter().enumerate() { + value = match b.value().ok() { + Some(b) => value.map(|v| v + (u8::from(b) << i)), + None => None, + } + } + + Self { value, bits } + } + + /// Outputs `self ^ other`. + /// + /// If at least one of `self` and `other` are constants, then this method + /// *does not* create any constraints or variables. + /// + /// ``` + /// # fn main() -> Result<(), ark_relations::r1cs::SynthesisError> { + /// // We'll use the BLS12-381 scalar field for our constraints. + /// use ark_test_curves::bls12_381::Fr; + /// use ark_relations::r1cs::*; + /// use ark_r1cs_std::prelude::*; + /// + /// let cs = ConstraintSystem::::new_ref(); + /// let a = UInt8::new_witness(cs.clone(), || Ok(16))?; + /// let b = UInt8::new_witness(cs.clone(), || Ok(17))?; + /// let c = UInt8::new_witness(cs.clone(), || Ok(1))?; + /// + /// a.xor(&b)?.enforce_equal(&c)?; + /// assert!(cs.is_satisfied().unwrap()); + /// # Ok(()) + /// # } + /// ``` + #[tracing::instrument(target = "r1cs")] + pub fn xor(&self, other: &Self) -> Result { + let mut result = self.clone(); + result.value = match (self.value, other.value) { + (Some(a), Some(b)) => Some(a ^ b), + _ => None, + }; + + let new_bits = self.bits.iter().zip(&other.bits).map(|(a, b)| a.xor(b)); + + for (res, new) in result.bits.iter_mut().zip(new_bits) { + *res = new?; + } + + Ok(result) + } +} + +impl EqGadget for UInt8 { + #[tracing::instrument(target = "r1cs")] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + self.bits.as_ref().is_eq(&other.bits) + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + self.bits.conditional_enforce_equal(&other.bits, condition) + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + self.bits + .conditional_enforce_not_equal(&other.bits, condition) + } +} + +impl CondSelectGadget for UInt8 { + #[tracing::instrument(target = "r1cs", skip(cond, true_value, false_value))] + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result { + let selected_bits = true_value + .bits + .iter() + .zip(&false_value.bits) + .map(|(t, f)| cond.select(t, f)); + let mut bits = [Boolean::FALSE; 8]; + for (result, new) in bits.iter_mut().zip(selected_bits) { + *result = new?; + } + + let value = cond.value().ok().and_then(|cond| { + if cond { + true_value.value().ok() + } else { + false_value.value().ok() + } + }); + Ok(Self { bits, value }) + } +} + +impl AllocVar for UInt8 { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let value = f().map(|f| *f.borrow()).ok(); + + let mut values = [None; 8]; + if let Some(val) = value { + values + .iter_mut() + .enumerate() + .for_each(|(i, v)| *v = Some((val >> i) & 1 == 1)); + } + + let mut bits = [Boolean::FALSE; 8]; + for (b, v) in bits.iter_mut().zip(&values) { + *b = Boolean::new_variable(cs.clone(), || v.get(), mode)?; + } + Ok(Self { bits, value }) + } +} + +/// Parses the `Vec>` in fixed-sized `ConstraintF::Params::CAPACITY` chunks and +/// converts each chunk, which is assumed to be little-endian, to its `FpVar` +/// representation. +/// This is the gadget counterpart to the `[u8]` implementation of +/// [ToConstraintField](ark_ff::ToConstraintField). +impl ToConstraintFieldGadget for [UInt8] { + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> Result>, SynthesisError> { + let max_size = (ConstraintF::Params::CAPACITY / 8) as usize; + self.chunks(max_size) + .map(|chunk| Boolean::le_bits_to_fp_var(chunk.to_bits_le()?.as_slice())) + .collect::, SynthesisError>>() + } +} + +impl ToConstraintFieldGadget for Vec> { + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> Result>, SynthesisError> { + self.as_slice().to_constraint_field() + } +} + +#[cfg(test)] +mod test { + use super::UInt8; + use crate::fields::fp::FpVar; + use crate::prelude::AllocationMode::{Constant, Input, Witness}; + use crate::{prelude::*, ToConstraintFieldGadget, Vec}; + use ark_ff::{FpParameters, PrimeField, ToConstraintField}; + use ark_relations::r1cs::{ConstraintSystem, SynthesisError}; + use ark_std::rand::distributions::Uniform; + use ark_std::rand::Rng; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn test_uint8_from_bits_to_bits() -> Result<(), SynthesisError> { + let cs = ConstraintSystem::::new_ref(); + let byte_val = 0b01110001; + let byte = + UInt8::new_witness(ark_relations::ns!(cs, "alloc value"), || Ok(byte_val)).unwrap(); + let bits = byte.to_bits_le()?; + for (i, bit) in bits.iter().enumerate() { + assert_eq!(bit.value()?, (byte_val >> i) & 1 == 1) + } + Ok(()) + } + + #[test] + fn test_uint8_new_input_vec() -> Result<(), SynthesisError> { + let cs = ConstraintSystem::::new_ref(); + let byte_vals = (64u8..128u8).collect::>(); + let bytes = + UInt8::new_input_vec(ark_relations::ns!(cs, "alloc value"), &byte_vals).unwrap(); + dbg!(bytes.value())?; + for (native, variable) in byte_vals.into_iter().zip(bytes) { + let bits = variable.to_bits_le()?; + for (i, bit) in bits.iter().enumerate() { + assert_eq!( + bit.value()?, + (native >> i) & 1 == 1, + "native value {}: bit {:?}", + native, + i + ) + } + } + Ok(()) + } + + #[test] + fn test_uint8_from_bits() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let v = (0..8) + .map(|_| Boolean::::Constant(rng.gen())) + .collect::>(); + + let val = UInt8::from_bits_le(&v); + + for (i, bit) in val.bits.iter().enumerate() { + match bit { + Boolean::Constant(b) => assert!(*b == ((val.value()? >> i) & 1 == 1)), + _ => unreachable!(), + } + } + + let expected_to_be_same = val.to_bits_le()?; + + for x in v.iter().zip(expected_to_be_same.iter()) { + match x { + (&Boolean::Constant(true), &Boolean::Constant(true)) => {} + (&Boolean::Constant(false), &Boolean::Constant(false)) => {} + _ => unreachable!(), + } + } + } + Ok(()) + } + + #[test] + fn test_uint8_xor() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + + for _ in 0..1000 { + let cs = ConstraintSystem::::new_ref(); + + let a: u8 = rng.gen(); + let b: u8 = rng.gen(); + let c: u8 = rng.gen(); + + let mut expected = a ^ b ^ c; + + let a_bit = UInt8::new_witness(ark_relations::ns!(cs, "a_bit"), || Ok(a)).unwrap(); + let b_bit = UInt8::constant(b); + let c_bit = UInt8::new_witness(ark_relations::ns!(cs, "c_bit"), || Ok(c)).unwrap(); + + let r = a_bit.xor(&b_bit).unwrap(); + let r = r.xor(&c_bit).unwrap(); + + assert!(cs.is_satisfied().unwrap()); + + assert!(r.value == Some(expected)); + + for b in r.bits.iter() { + match b { + Boolean::Is(b) => assert!(b.value()? == (expected & 1 == 1)), + Boolean::Not(b) => assert!(!b.value()? == (expected & 1 == 1)), + Boolean::Constant(b) => assert!(*b == (expected & 1 == 1)), + } + + expected >>= 1; + } + } + Ok(()) + } + + #[test] + fn test_uint8_to_constraint_field() -> Result<(), SynthesisError> { + let mut rng = ark_std::test_rng(); + let max_size = (::Params::CAPACITY / 8) as usize; + + let modes = [Input, Witness, Constant]; + for mode in &modes { + for _ in 0..1000 { + let cs = ConstraintSystem::::new_ref(); + + let bytes: Vec = (&mut rng) + .sample_iter(&Uniform::new_inclusive(0, u8::max_value())) + .take(max_size * 3 + 5) + .collect(); + + let bytes_var = bytes + .iter() + .map(|byte| UInt8::new_variable(cs.clone(), || Ok(*byte), *mode)) + .collect::, SynthesisError>>()?; + + let f_vec: Vec = bytes.to_field_elements().unwrap(); + let f_var_vec: Vec> = bytes_var.to_constraint_field()?; + + assert!(cs.is_satisfied().unwrap()); + assert_eq!(f_vec, f_var_vec.value()?); + } + } + + Ok(()) + } + + #[test] + fn test_uint8_random_access() { + let mut rng = ark_std::test_rng(); + + for _ in 0..100 { + let cs = ConstraintSystem::::new_ref(); + + // value array + let values: Vec = (0..128).map(|_| rng.gen()).collect(); + let values_const: Vec> = values.iter().map(|x| UInt8::constant(*x)).collect(); + + // index array + let position: Vec = (0..7).map(|_| rng.gen()).collect(); + let position_var: Vec> = position + .iter() + .map(|b| { + Boolean::new_witness(ark_relations::ns!(cs, "index_arr_element"), || Ok(*b)) + .unwrap() + }) + .collect(); + + // index + let mut index = 0; + for x in position { + index *= 2; + index += if x { 1 } else { 0 }; + } + + assert_eq!( + UInt8::conditionally_select_power_of_two_vector(&position_var, &values_const) + .unwrap() + .value() + .unwrap(), + values[index] + ) + } + } +} diff --git a/arkworks/r1cs-std/src/eq.rs b/arkworks/r1cs-std/src/eq.rs new file mode 100644 index 00000000..f1184619 --- /dev/null +++ b/arkworks/r1cs-std/src/eq.rs @@ -0,0 +1,130 @@ +use crate::{prelude::*, Vec}; +use ark_ff::Field; +use ark_relations::r1cs::SynthesisError; + +/// Specifies how to generate constraints that check for equality for two +/// variables of type `Self`. +pub trait EqGadget { + /// Output a `Boolean` value representing whether `self.value() == + /// other.value()`. + fn is_eq(&self, other: &Self) -> Result, SynthesisError>; + + /// Output a `Boolean` value representing whether `self.value() != + /// other.value()`. + /// + /// By default, this is defined as `self.is_eq(other)?.not()`. + fn is_neq(&self, other: &Self) -> Result, SynthesisError> { + Ok(self.is_eq(other)?.not()) + } + + /// If `should_enforce == true`, enforce that `self` and `other` are equal; + /// else, enforce a vacuously true statement. + /// + /// A safe default implementation is provided that generates the following + /// constraints: `self.is_eq(other)?.conditional_enforce_equal(&Boolean: + /// :TRUE, should_enforce)`. + /// + /// More efficient specialized implementation may be possible; implementors + /// are encouraged to carefully analyze the efficiency and safety of these. + #[tracing::instrument(target = "r1cs", skip(self, other))] + fn conditional_enforce_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + self.is_eq(&other)? + .conditional_enforce_equal(&Boolean::constant(true), should_enforce) + } + + /// Enforce that `self` and `other` are equal. + /// + /// A safe default implementation is provided that generates the following + /// constraints: `self.conditional_enforce_equal(other, + /// &Boolean::TRUE)`. + /// + /// More efficient specialized implementation may be possible; implementors + /// are encouraged to carefully analyze the efficiency and safety of these. + #[tracing::instrument(target = "r1cs", skip(self, other))] + fn enforce_equal(&self, other: &Self) -> Result<(), SynthesisError> { + self.conditional_enforce_equal(other, &Boolean::constant(true)) + } + + /// If `should_enforce == true`, enforce that `self` and `other` are *not* + /// equal; else, enforce a vacuously true statement. + /// + /// A safe default implementation is provided that generates the following + /// constraints: `self.is_neq(other)?.conditional_enforce_equal(& + /// Boolean::TRUE, should_enforce)`. + /// + /// More efficient specialized implementation may be possible; implementors + /// are encouraged to carefully analyze the efficiency and safety of these. + #[tracing::instrument(target = "r1cs", skip(self, other))] + fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + self.is_neq(&other)? + .conditional_enforce_equal(&Boolean::constant(true), should_enforce) + } + + /// Enforce that `self` and `other` are *not* equal. + /// + /// A safe default implementation is provided that generates the following + /// constraints: `self.conditional_enforce_not_equal(other, + /// &Boolean::TRUE)`. + /// + /// More efficient specialized implementation may be possible; implementors + /// are encouraged to carefully analyze the efficiency and safety of these. + #[tracing::instrument(target = "r1cs", skip(self, other))] + fn enforce_not_equal(&self, other: &Self) -> Result<(), SynthesisError> { + self.conditional_enforce_not_equal(other, &Boolean::constant(true)) + } +} + +impl + R1CSVar, F: Field> EqGadget for [T] { + #[tracing::instrument(target = "r1cs", skip(self, other))] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + assert_eq!(self.len(), other.len()); + assert!(!self.is_empty()); + let mut results = Vec::with_capacity(self.len()); + for (a, b) in self.iter().zip(other) { + results.push(a.is_eq(b)?); + } + Boolean::kary_and(&results) + } + + #[tracing::instrument(target = "r1cs", skip(self, other))] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + assert_eq!(self.len(), other.len()); + for (a, b) in self.iter().zip(other) { + a.conditional_enforce_equal(b, condition)?; + } + Ok(()) + } + + #[tracing::instrument(target = "r1cs", skip(self, other))] + fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + assert_eq!(self.len(), other.len()); + let some_are_different = self.is_neq(other)?; + if [&some_are_different, should_enforce].is_constant() { + assert!(some_are_different.value().unwrap()); + Ok(()) + } else { + let cs = [&some_are_different, should_enforce].cs(); + cs.enforce_constraint( + some_are_different.lc(), + should_enforce.lc(), + should_enforce.lc(), + ) + } + } +} diff --git a/arkworks/r1cs-std/src/fields/cubic_extension.rs b/arkworks/r1cs-std/src/fields/cubic_extension.rs new file mode 100644 index 00000000..5a5f5aff --- /dev/null +++ b/arkworks/r1cs-std/src/fields/cubic_extension.rs @@ -0,0 +1,579 @@ +use ark_ff::{ + fields::{CubicExtField, CubicExtParameters, Field}, + Zero, +}; +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; +use core::{borrow::Borrow, marker::PhantomData}; + +use crate::{ + fields::{fp::FpVar, FieldOpsBounds, FieldVar}, + prelude::*, + ToConstraintFieldGadget, Vec, +}; + +/// This struct is the `R1CS` equivalent of the cubic extension field type +/// in `ark-ff`, i.e. `ark_ff::CubicExtField`. +#[derive(Derivative)] +#[derivative(Debug(bound = "BF: core::fmt::Debug"), Clone(bound = "BF: Clone"))] +#[must_use] +pub struct CubicExtVar, P: CubicExtVarParams> +where + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, +{ + /// The zero-th coefficient of this field element. + pub c0: BF, + /// The first coefficient of this field element. + pub c1: BF, + /// The second coefficient of this field element. + pub c2: BF, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +/// This trait describes parameters that are used to implement arithmetic for +/// `CubicExtVar`. +pub trait CubicExtVarParams>: + CubicExtParameters +where + for<'a> &'a BF: FieldOpsBounds<'a, Self::BaseField, BF>, +{ + /// Multiply the base field of the `CubicExtVar` by the appropriate + /// Frobenius coefficient. This is equivalent to + /// `Self::mul_base_field_by_frob_coeff(c1, c2, power)`. + fn mul_base_field_vars_by_frob_coeff(c1: &mut BF, c2: &mut BF, power: usize); +} + +impl, P: CubicExtVarParams> CubicExtVar +where + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, +{ + /// Constructs a `CubicExtVar` from the underlying coefficients. + #[inline] + pub fn new(c0: BF, c1: BF, c2: BF) -> Self { + let _params = PhantomData; + Self { + c0, + c1, + c2, + _params, + } + } + + /// Multiplies a variable of the base field by the cubic nonresidue + /// `P::NONRESIDUE` that is used to construct the extension field. + #[inline] + pub fn mul_base_field_by_nonresidue(fe: &BF) -> Result { + Ok(fe * P::NONRESIDUE) + } + + /// Multiplies `self` by a constant from the base field. + #[inline] + pub fn mul_by_base_field_constant(&self, fe: P::BaseField) -> Self { + let c0 = &self.c0 * fe; + let c1 = &self.c1 * fe; + let c2 = &self.c2 * fe; + Self::new(c0, c1, c2) + } + + /// Sets `self = self.mul_by_base_field_constant(fe)`. + #[inline] + pub fn mul_assign_by_base_field_constant(&mut self, fe: P::BaseField) { + *self = (&*self).mul_by_base_field_constant(fe); + } +} + +impl R1CSVar for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + type Value = CubicExtField

; + + fn cs(&self) -> ConstraintSystemRef { + [&self.c0, &self.c1, &self.c2].cs() + } + + #[inline] + fn value(&self) -> Result { + match (self.c0.value(), self.c1.value(), self.c2.value()) { + (Ok(c0), Ok(c1), Ok(c2)) => Ok(CubicExtField::new(c0, c1, c2)), + (..) => Err(SynthesisError::AssignmentMissing), + } + } +} + +impl From> for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + fn from(other: Boolean) -> Self { + let c0 = BF::from(other); + let c1 = BF::zero(); + let c2 = BF::zero(); + Self::new(c0, c1, c2) + } +} + +impl<'a, BF, P> FieldOpsBounds<'a, CubicExtField

, CubicExtVar> for CubicExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: CubicExtVarParams, +{ +} +impl<'a, BF, P> FieldOpsBounds<'a, CubicExtField

, CubicExtVar> for &'a CubicExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: CubicExtVarParams, +{ +} + +impl FieldVar, P::BasePrimeField> for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + fn constant(other: CubicExtField

) -> Self { + let c0 = BF::constant(other.c0); + let c1 = BF::constant(other.c1); + let c2 = BF::constant(other.c2); + Self::new(c0, c1, c2) + } + + fn zero() -> Self { + let c0 = BF::zero(); + let c1 = BF::zero(); + let c2 = BF::zero(); + Self::new(c0, c1, c2) + } + + fn one() -> Self { + let c0 = BF::one(); + let c1 = BF::zero(); + let c2 = BF::zero(); + Self::new(c0, c1, c2) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn double(&self) -> Result { + let c0 = self.c0.double()?; + let c1 = self.c1.double()?; + let c2 = self.c2.double()?; + Ok(Self::new(c0, c1, c2)) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn negate(&self) -> Result { + let mut result = self.clone(); + result.c0.negate_in_place()?; + result.c1.negate_in_place()?; + result.c2.negate_in_place()?; + Ok(result) + } + + /// Use the Chung-Hasan asymmetric squaring formula. + /// + /// (Devegili OhEig Scott Dahab --- Multiplication and Squaring on + /// Abstract Pairing-Friendly + /// Fields.pdf; Section 4 (CH-SQR2)) + #[inline] + #[tracing::instrument(target = "r1cs")] + fn square(&self) -> Result { + let a = self.c0.clone(); + let b = self.c1.clone(); + let c = self.c2.clone(); + + let s0 = a.square()?; + let ab = &a * &b; + let s1 = ab.double()?; + let s2 = (&a - &b + &c).square()?; + let s3 = (&b * &c).double()?; + let s4 = c.square()?; + + let c0 = Self::mul_base_field_by_nonresidue(&s3)? + &s0; + let c1 = Self::mul_base_field_by_nonresidue(&s4)? + &s1; + let c2 = s1 + &s2 + &s3 - &s0 - &s4; + + Ok(Self::new(c0, c1, c2)) + } + + #[tracing::instrument(target = "r1cs")] + fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> { + // Karatsuba multiplication for cubic extensions: + // v0 = A.c0 * B.c0 + // v1 = A.c1 * B.c1 + // v2 = A.c2 * B.c2 + // result.c0 = v0 + β((a1 + a2)(b1 + b2) − v1 − v2) + // result.c1 = (a0 + a1)(b0 + b1) − v0 − v1 + βv2 + // result.c2 = (a0 + a2)(b0 + b2) − v0 + v1 − v2, + // We enforce this with six constraints: + // + // v0 = A.c0 * B.c0 + // v1 = A.c1 * B.c1 + // v2 = A.c2 * B.c2 + // + // result.c0 - v0 + \beta*(v1 + v2) = β(a1 + a2)(b1 + b2)) + // result.c1 + v0 + v1 - βv2 = (a0 + a1)(b0 + b1) + // result.c2 + v0 - v1 + v2 = (a0 + a2)(b0 + b2) + // Reference: + // "Multiplication and Squaring on Pairing-Friendly Fields" + // Devegili, OhEigeartaigh, Scott, Dahab + // + // This implementation adapted from + // https://github.com/ZencashOfficial/ginger-lib/blob/development/r1cs/gadgets/std/src/fields/fp3.rs + let v0 = &self.c0 * &other.c0; + let v1 = &self.c1 * &other.c1; + let v2 = &self.c2 * &other.c2; + + // Check c0 + let nr_a1_plus_a2 = (&self.c1 + &self.c2) * P::NONRESIDUE; + let b1_plus_b2 = &other.c1 + &other.c2; + let nr_v1 = &v1 * P::NONRESIDUE; + let nr_v2 = &v2 * P::NONRESIDUE; + let to_check = &result.c0 - &v0 + &nr_v1 + &nr_v2; + nr_a1_plus_a2.mul_equals(&b1_plus_b2, &to_check)?; + + // Check c1 + let a0_plus_a1 = &self.c0 + &self.c1; + let b0_plus_b1 = &other.c0 + &other.c1; + let to_check = &result.c1 - &nr_v2 + &v0 + &v1; + a0_plus_a1.mul_equals(&b0_plus_b1, &to_check)?; + + // Check c2 + let a0_plus_a2 = &self.c0 + &self.c2; + let b0_plus_b2 = &other.c0 + &other.c2; + let to_check = &result.c2 + &v0 - &v1 + &v2; + a0_plus_a2.mul_equals(&b0_plus_b2, &to_check)?; + Ok(()) + } + + #[tracing::instrument(target = "r1cs")] + fn frobenius_map(&self, power: usize) -> Result { + let mut result = self.clone(); + result.c0.frobenius_map_in_place(power)?; + result.c1.frobenius_map_in_place(power)?; + result.c2.frobenius_map_in_place(power)?; + + P::mul_base_field_vars_by_frob_coeff(&mut result.c1, &mut result.c2, power); + Ok(result) + } + + #[tracing::instrument(target = "r1cs")] + fn inverse(&self) -> Result { + let mode = if self.is_constant() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + let inverse = Self::new_variable( + self.cs(), + || { + self.value() + .map(|f| f.inverse().unwrap_or_else(CubicExtField::zero)) + }, + mode, + )?; + self.mul_equals(&inverse, &Self::one())?; + Ok(inverse) + } +} + +impl_bounded_ops!( + CubicExtVar, + CubicExtField

, + Add, + add, + AddAssign, + add_assign, + |this: &'a CubicExtVar, other: &'a CubicExtVar| { + let c0 = &this.c0 + &other.c0; + let c1 = &this.c1 + &other.c1; + let c2 = &this.c2 + &other.c2; + CubicExtVar::new(c0, c1, c2) + }, + |this: &'a CubicExtVar, other: CubicExtField

| { + this + CubicExtVar::constant(other) + }, + (BF: FieldVar, P: CubicExtVarParams), + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, +); +impl_bounded_ops!( + CubicExtVar, + CubicExtField

, + Sub, + sub, + SubAssign, + sub_assign, + |this: &'a CubicExtVar, other: &'a CubicExtVar| { + let c0 = &this.c0 - &other.c0; + let c1 = &this.c1 - &other.c1; + let c2 = &this.c2 - &other.c2; + CubicExtVar::new(c0, c1, c2) + }, + |this: &'a CubicExtVar, other: CubicExtField

| { + this - CubicExtVar::constant(other) + }, + (BF: FieldVar, P: CubicExtVarParams), + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, +); +impl_bounded_ops!( + CubicExtVar, + CubicExtField

, + Mul, + mul, + MulAssign, + mul_assign, + |this: &'a CubicExtVar, other: &'a CubicExtVar| { + // Karatsuba multiplication for cubic extensions: + // v0 = A.c0 * B.c0 + // v1 = A.c1 * B.c1 + // v2 = A.c2 * B.c2 + // result.c0 = v0 + β((a1 + a2)(b1 + b2) − v1 − v2) + // result.c1 = (a0 + a1)(b0 + b1) − v0 − v1 + βv2 + // result.c2 = (a0 + a2)(b0 + b2) − v0 + v1 − v2, + // + // Reference: + // "Multiplication and Squaring on Pairing-Friendly Fields" + // Devegili, OhEigeartaigh, Scott, Dahab + let v0 = &this.c0 * &other.c0; + let v1 = &this.c1 * &other.c1; + let v2 = &this.c2 * &other.c2; + let c0 = + (((&this.c1 + &this.c2) * (&other.c1 + &other.c2) - &v1 - &v2) * P::NONRESIDUE) + &v0 ; + let c1 = + (&this.c0 + &this.c1) * (&other.c0 + &other.c1) - &v0 - &v1 + (&v2 * P::NONRESIDUE); + let c2 = + (&this.c0 + &this.c2) * (&other.c0 + &other.c2) - &v0 + &v1 - &v2; + + CubicExtVar::new(c0, c1, c2) + }, + |this: &'a CubicExtVar, other: CubicExtField

| { + this * CubicExtVar::constant(other) + }, + (BF: FieldVar, P: CubicExtVarParams), + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, +); + +impl EqGadget for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + #[tracing::instrument(target = "r1cs")] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + let b0 = self.c0.is_eq(&other.c0)?; + let b1 = self.c1.is_eq(&other.c1)?; + let b2 = self.c2.is_eq(&other.c2)?; + b0.and(&b1)?.and(&b2) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + self.c0.conditional_enforce_equal(&other.c0, condition)?; + self.c1.conditional_enforce_equal(&other.c1, condition)?; + self.c2.conditional_enforce_equal(&other.c2, condition)?; + Ok(()) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + let is_equal = self.is_eq(other)?; + is_equal + .and(condition)? + .enforce_equal(&Boolean::Constant(false)) + } +} + +impl ToBitsGadget for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bits_le(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_bits_le()?; + let mut c1 = self.c1.to_bits_le()?; + let mut c2 = self.c2.to_bits_le()?; + c0.append(&mut c1); + c0.append(&mut c2); + Ok(c0) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bits_le(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_non_unique_bits_le()?; + let mut c1 = self.c1.to_non_unique_bits_le()?; + let mut c2 = self.c2.to_non_unique_bits_le()?; + c0.append(&mut c1); + c0.append(&mut c2); + Ok(c0) + } +} + +impl ToBytesGadget for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_bytes()?; + let mut c1 = self.c1.to_bytes()?; + let mut c2 = self.c2.to_bytes()?; + c0.append(&mut c1); + c0.append(&mut c2); + + Ok(c0) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_non_unique_bytes()?; + let mut c1 = self.c1.to_non_unique_bytes()?; + let mut c2 = self.c2.to_non_unique_bytes()?; + + c0.append(&mut c1); + c0.append(&mut c2); + + Ok(c0) + } +} + +impl ToConstraintFieldGadget for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, + BF: ToConstraintFieldGadget, +{ + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> Result>, SynthesisError> { + let mut res = Vec::new(); + + res.extend_from_slice(&self.c0.to_constraint_field()?); + res.extend_from_slice(&self.c1.to_constraint_field()?); + res.extend_from_slice(&self.c2.to_constraint_field()?); + + Ok(res) + } +} + +impl CondSelectGadget for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result { + let c0 = BF::conditionally_select(cond, &true_value.c0, &false_value.c0)?; + let c1 = BF::conditionally_select(cond, &true_value.c1, &false_value.c1)?; + let c2 = BF::conditionally_select(cond, &true_value.c2, &false_value.c2)?; + Ok(Self::new(c0, c1, c2)) + } +} + +impl TwoBitLookupGadget for CubicExtVar +where + BF: FieldVar + + TwoBitLookupGadget, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + type TableConstant = CubicExtField

; + + #[tracing::instrument(target = "r1cs")] + fn two_bit_lookup( + b: &[Boolean], + c: &[Self::TableConstant], + ) -> Result { + let c0s = c.iter().map(|f| f.c0).collect::>(); + let c1s = c.iter().map(|f| f.c1).collect::>(); + let c2s = c.iter().map(|f| f.c2).collect::>(); + let c0 = BF::two_bit_lookup(b, &c0s)?; + let c1 = BF::two_bit_lookup(b, &c1s)?; + let c2 = BF::two_bit_lookup(b, &c2s)?; + Ok(Self::new(c0, c1, c2)) + } +} + +impl ThreeBitCondNegLookupGadget for CubicExtVar +where + BF: FieldVar + + ThreeBitCondNegLookupGadget, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + type TableConstant = CubicExtField

; + + #[tracing::instrument(target = "r1cs")] + fn three_bit_cond_neg_lookup( + b: &[Boolean], + b0b1: &Boolean, + c: &[Self::TableConstant], + ) -> Result { + let c0s = c.iter().map(|f| f.c0).collect::>(); + let c1s = c.iter().map(|f| f.c1).collect::>(); + let c2s = c.iter().map(|f| f.c2).collect::>(); + let c0 = BF::three_bit_cond_neg_lookup(b, b0b1, &c0s)?; + let c1 = BF::three_bit_cond_neg_lookup(b, b0b1, &c1s)?; + let c2 = BF::three_bit_cond_neg_lookup(b, b0b1, &c2s)?; + Ok(Self::new(c0, c1, c2)) + } +} + +impl AllocVar, P::BasePrimeField> for CubicExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: CubicExtVarParams, +{ + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + use SynthesisError::*; + let (c0, c1, c2) = match f() { + Ok(fe) => (Ok(fe.borrow().c0), Ok(fe.borrow().c1), Ok(fe.borrow().c2)), + Err(_) => ( + Err(AssignmentMissing), + Err(AssignmentMissing), + Err(AssignmentMissing), + ), + }; + + let c0 = BF::new_variable(ark_relations::ns!(cs, "c0"), || c0, mode)?; + let c1 = BF::new_variable(ark_relations::ns!(cs, "c1"), || c1, mode)?; + let c2 = BF::new_variable(ark_relations::ns!(cs, "c2"), || c2, mode)?; + Ok(Self::new(c0, c1, c2)) + } +} diff --git a/arkworks/r1cs-std/src/fields/fp/cmp.rs b/arkworks/r1cs-std/src/fields/fp/cmp.rs new file mode 100644 index 00000000..5a826359 --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp/cmp.rs @@ -0,0 +1,243 @@ +use crate::{ + boolean::Boolean, + fields::{fp::FpVar, FieldVar}, + prelude::*, + ToBitsGadget, +}; +use ark_ff::PrimeField; +use ark_relations::r1cs::{SynthesisError, Variable}; +use core::cmp::Ordering; + +impl FpVar { + /// This function enforces the ordering between `self` and `other`. The + /// constraint system will not be satisfied otherwise. If `self` should + /// also be checked for equality, e.g. `self <= other` instead of `self < + /// other`, set `should_also_check_quality` to `true`. This variant + /// verifies `self` and `other` are `<= (p-1)/2`. + #[tracing::instrument(target = "r1cs")] + pub fn enforce_cmp( + &self, + other: &FpVar, + ordering: Ordering, + should_also_check_equality: bool, + ) -> Result<(), SynthesisError> { + let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?; + left.enforce_smaller_than(&right) + } + + /// This function enforces the ordering between `self` and `other`. The + /// constraint system will not be satisfied otherwise. If `self` should + /// also be checked for equality, e.g. `self <= other` instead of `self < + /// other`, set `should_also_check_quality` to `true`. This variant + /// assumes `self` and `other` are `<= (p-1)/2` and does not generate + /// constraints to verify that. + #[tracing::instrument(target = "r1cs")] + pub fn enforce_cmp_unchecked( + &self, + other: &FpVar, + ordering: Ordering, + should_also_check_equality: bool, + ) -> Result<(), SynthesisError> { + let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?; + left.enforce_smaller_than_unchecked(&right) + } + + /// This function checks the ordering between `self` and `other`. It outputs + /// self `Boolean` that contains the result - `1` if true, `0` + /// otherwise. The constraint system will be satisfied in any case. If + /// `self` should also be checked for equality, e.g. `self <= other` + /// instead of `self < other`, set `should_also_check_quality` to + /// `true`. This variant verifies `self` and `other` are `<= (p-1)/2`. + #[tracing::instrument(target = "r1cs")] + pub fn is_cmp( + &self, + other: &FpVar, + ordering: Ordering, + should_also_check_equality: bool, + ) -> Result, SynthesisError> { + let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?; + left.is_smaller_than(&right) + } + + /// This function checks the ordering between `self` and `other`. It outputs + /// a `Boolean` that contains the result - `1` if true, `0` otherwise. + /// The constraint system will be satisfied in any case. If `self` + /// should also be checked for equality, e.g. `self <= other` instead of + /// `self < other`, set `should_also_check_quality` to `true`. This + /// variant assumes `self` and `other` are `<= (p-1)/2` and does not + /// generate constraints to verify that. + #[tracing::instrument(target = "r1cs")] + pub fn is_cmp_unchecked( + &self, + other: &FpVar, + ordering: Ordering, + should_also_check_equality: bool, + ) -> Result, SynthesisError> { + let (left, right) = self.process_cmp_inputs(other, ordering, should_also_check_equality)?; + left.is_smaller_than_unchecked(&right) + } + + fn process_cmp_inputs( + &self, + other: &Self, + ordering: Ordering, + should_also_check_equality: bool, + ) -> Result<(Self, Self), SynthesisError> { + let (left, right) = match ordering { + Ordering::Less => (self, other), + Ordering::Greater => (other, self), + Ordering::Equal => return Err(SynthesisError::Unsatisfiable), + }; + let right_for_check = if should_also_check_equality { + right + F::one() + } else { + right.clone() + }; + + Ok((left.clone(), right_for_check)) + } + + /// Helper function to enforce that `self <= (p-1)/2`. + #[tracing::instrument(target = "r1cs")] + pub fn enforce_smaller_or_equal_than_mod_minus_one_div_two( + &self, + ) -> Result<(), SynthesisError> { + // It's okay to use `to_non_unique_bits` bits here because we're enforcing + // self <= (p-1)/2, which implies self < p. + let _ = Boolean::enforce_smaller_or_equal_than_le( + &self.to_non_unique_bits_le()?, + F::modulus_minus_one_div_two(), + )?; + Ok(()) + } + + /// Helper function to check `self < other` and output a result bit. This + /// function verifies `self` and `other` are `<= (p-1)/2`. + fn is_smaller_than(&self, other: &FpVar) -> Result, SynthesisError> { + self.enforce_smaller_or_equal_than_mod_minus_one_div_two()?; + other.enforce_smaller_or_equal_than_mod_minus_one_div_two()?; + self.is_smaller_than_unchecked(other) + } + + /// Helper function to check `self < other` and output a result bit. This + /// function assumes `self` and `other` are `<= (p-1)/2` and does not + /// generate constraints to verify that. + fn is_smaller_than_unchecked(&self, other: &FpVar) -> Result, SynthesisError> { + Ok((self - other) + .double()? + .to_bits_le()? + .first() + .unwrap() + .clone()) + } + + /// Helper function to enforce `self < other`. This function verifies `self` + /// and `other` are `<= (p-1)/2`. + fn enforce_smaller_than(&self, other: &FpVar) -> Result<(), SynthesisError> { + self.enforce_smaller_or_equal_than_mod_minus_one_div_two()?; + other.enforce_smaller_or_equal_than_mod_minus_one_div_two()?; + self.enforce_smaller_than_unchecked(other) + } + + /// Helper function to enforce `self < other`. This function assumes `self` + /// and `other` are `<= (p-1)/2` and does not generate constraints to + /// verify that. + fn enforce_smaller_than_unchecked(&self, other: &FpVar) -> Result<(), SynthesisError> { + let is_smaller_than = self.is_smaller_than_unchecked(other)?; + let lc_one = lc!() + Variable::One; + [self, other] + .cs() + .enforce_constraint(is_smaller_than.lc(), lc_one.clone(), lc_one) + } +} + +#[cfg(test)] +mod test { + use ark_std::cmp::Ordering; + use ark_std::rand::Rng; + + use crate::{alloc::AllocVar, fields::fp::FpVar}; + use ark_ff::{PrimeField, UniformRand}; + use ark_relations::r1cs::ConstraintSystem; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn test_cmp() { + let mut rng = ark_std::test_rng(); + fn rand_in_range(rng: &mut R) -> Fr { + let pminusonedivtwo: Fr = Fr::modulus_minus_one_div_two().into(); + let mut r; + loop { + r = Fr::rand(rng); + if r <= pminusonedivtwo { + break; + } + } + r + } + for i in 0..10 { + let cs = ConstraintSystem::::new_ref(); + let a = rand_in_range(&mut rng); + let a_var = FpVar::::new_witness(cs.clone(), || Ok(a)).unwrap(); + let b = rand_in_range(&mut rng); + let b_var = FpVar::::new_witness(cs.clone(), || Ok(b)).unwrap(); + + match a.cmp(&b) { + Ordering::Less => { + a_var.enforce_cmp(&b_var, Ordering::Less, false).unwrap(); + a_var.enforce_cmp(&b_var, Ordering::Less, true).unwrap(); + } + Ordering::Greater => { + a_var.enforce_cmp(&b_var, Ordering::Greater, false).unwrap(); + a_var.enforce_cmp(&b_var, Ordering::Greater, true).unwrap(); + } + _ => {} + } + + if i == 0 { + println!("number of constraints: {}", cs.num_constraints()); + } + assert!(cs.is_satisfied().unwrap()); + } + println!("Finished with satisfaction tests"); + + for _i in 0..10 { + let cs = ConstraintSystem::::new_ref(); + let a = rand_in_range(&mut rng); + let a_var = FpVar::::new_witness(cs.clone(), || Ok(a)).unwrap(); + let b = rand_in_range(&mut rng); + let b_var = FpVar::::new_witness(cs.clone(), || Ok(b)).unwrap(); + + match b.cmp(&a) { + Ordering::Less => { + a_var.enforce_cmp(&b_var, Ordering::Less, false).unwrap(); + a_var.enforce_cmp(&b_var, Ordering::Less, true).unwrap(); + } + Ordering::Greater => { + a_var.enforce_cmp(&b_var, Ordering::Greater, false).unwrap(); + a_var.enforce_cmp(&b_var, Ordering::Greater, true).unwrap(); + } + _ => {} + } + + assert!(!cs.is_satisfied().unwrap()); + } + + for _i in 0..10 { + let cs = ConstraintSystem::::new_ref(); + let a = rand_in_range(&mut rng); + let a_var = FpVar::::new_witness(cs.clone(), || Ok(a)).unwrap(); + a_var.enforce_cmp(&a_var, Ordering::Less, false).unwrap(); + + assert!(!cs.is_satisfied().unwrap()); + } + + for _i in 0..10 { + let cs = ConstraintSystem::::new_ref(); + let a = rand_in_range(&mut rng); + let a_var = FpVar::::new_witness(cs.clone(), || Ok(a)).unwrap(); + a_var.enforce_cmp(&a_var, Ordering::Less, true).unwrap(); + assert!(cs.is_satisfied().unwrap()); + } + } +} diff --git a/arkworks/r1cs-std/src/fields/fp/mod.rs b/arkworks/r1cs-std/src/fields/fp/mod.rs new file mode 100644 index 00000000..e3535dfc --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp/mod.rs @@ -0,0 +1,1004 @@ +use ark_ff::{BigInteger, FpParameters, PrimeField}; +use ark_relations::r1cs::{ + ConstraintSystemRef, LinearCombination, Namespace, SynthesisError, Variable, +}; + +use core::borrow::Borrow; + +use crate::{ + fields::{FieldOpsBounds, FieldVar}, + prelude::*, + Assignment, ToConstraintFieldGadget, Vec, +}; + +mod cmp; + +/// Represents a variable in the constraint system whose +/// value can be an arbitrary field element. +#[derive(Debug, Clone)] +#[must_use] +pub struct AllocatedFp { + pub(crate) value: Option, + /// The allocated variable corresponding to `self` in `self.cs`. + pub variable: Variable, + /// The constraint system that `self` was allocated in. + pub cs: ConstraintSystemRef, +} + +impl AllocatedFp { + /// Constructs a new `AllocatedFp` from a (optional) value, a low-level + /// Variable, and a `ConstraintSystemRef`. + pub fn new(value: Option, variable: Variable, cs: ConstraintSystemRef) -> Self { + Self { + value, + variable, + cs, + } + } +} + +/// Represent variables corresponding to a field element in `F`. +#[derive(Clone, Debug)] +#[must_use] +pub enum FpVar { + /// Represents a constant in the constraint system, which means that + /// it does not have a corresponding variable. + Constant(F), + /// Represents an allocated variable constant in the constraint system. + Var(AllocatedFp), +} + +impl R1CSVar for FpVar { + type Value = F; + + fn cs(&self) -> ConstraintSystemRef { + match self { + Self::Constant(_) => ConstraintSystemRef::None, + Self::Var(a) => a.cs.clone(), + } + } + + fn value(&self) -> Result { + match self { + Self::Constant(v) => Ok(*v), + Self::Var(v) => v.value(), + } + } +} + +impl From> for FpVar { + fn from(other: Boolean) -> Self { + if let Boolean::Constant(b) = other { + Self::Constant(F::from(b as u8)) + } else { + // `other` is a variable + let cs = other.cs(); + let variable = cs.new_lc(other.lc()).unwrap(); + Self::Var(AllocatedFp::new( + other.value().ok().map(|b| F::from(b as u8)), + variable, + cs, + )) + } + } +} + +impl From> for FpVar { + fn from(other: AllocatedFp) -> Self { + Self::Var(other) + } +} + +impl<'a, F: PrimeField> FieldOpsBounds<'a, F, Self> for FpVar {} +impl<'a, F: PrimeField> FieldOpsBounds<'a, F, FpVar> for &'a FpVar {} + +impl AllocatedFp { + /// Constructs `Self` from a `Boolean`: if `other` is false, this outputs + /// `zero`, else it outputs `one`. + pub fn from(other: Boolean) -> Self { + let cs = other.cs(); + let variable = cs.new_lc(other.lc()).unwrap(); + Self::new(other.value().ok().map(|b| F::from(b as u8)), variable, cs) + } + + /// Returns the value assigned to `self` in the underlying constraint system + /// (if a value was assigned). + pub fn value(&self) -> Result { + self.cs.assigned_value(self.variable).get() + } + + /// Outputs `self + other`. + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn add(&self, other: &Self) -> Self { + let value = match (self.value, other.value) { + (Some(val1), Some(val2)) => Some(val1 + &val2), + (..) => None, + }; + + let variable = self + .cs + .new_lc(lc!() + self.variable + other.variable) + .unwrap(); + AllocatedFp::new(value, variable, self.cs.clone()) + } + + /// Outputs `self - other`. + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn sub(&self, other: &Self) -> Self { + let value = match (self.value, other.value) { + (Some(val1), Some(val2)) => Some(val1 - &val2), + (..) => None, + }; + + let variable = self + .cs + .new_lc(lc!() + self.variable - other.variable) + .unwrap(); + AllocatedFp::new(value, variable, self.cs.clone()) + } + + /// Outputs `self * other`. + /// + /// This requires *one* constraint. + #[tracing::instrument(target = "r1cs")] + pub fn mul(&self, other: &Self) -> Self { + let product = AllocatedFp::new_witness(self.cs.clone(), || { + Ok(self.value.get()? * &other.value.get()?) + }) + .unwrap(); + self.cs + .enforce_constraint( + lc!() + self.variable, + lc!() + other.variable, + lc!() + product.variable, + ) + .unwrap(); + product + } + + /// Output `self + other` + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn add_constant(&self, other: F) -> Self { + if other.is_zero() { + self.clone() + } else { + let value = self.value.map(|val| val + other); + let variable = self + .cs + .new_lc(lc!() + self.variable + (other, Variable::One)) + .unwrap(); + AllocatedFp::new(value, variable, self.cs.clone()) + } + } + + /// Output `self - other` + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn sub_constant(&self, other: F) -> Self { + self.add_constant(-other) + } + + /// Output `self * other` + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn mul_constant(&self, other: F) -> Self { + if other.is_one() { + self.clone() + } else { + let value = self.value.map(|val| val * other); + let variable = self.cs.new_lc(lc!() + (other, self.variable)).unwrap(); + AllocatedFp::new(value, variable, self.cs.clone()) + } + } + + /// Output `self + self` + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn double(&self) -> Result { + let value = self.value.map(|val| val.double()); + let variable = self.cs.new_lc(lc!() + self.variable + self.variable)?; + Ok(Self::new(value, variable, self.cs.clone())) + } + + /// Output `-self` + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn negate(&self) -> Self { + let mut result = self.clone(); + result.negate_in_place(); + result + } + + /// Sets `self = -self` + /// + /// This does not create any constraints. + #[tracing::instrument(target = "r1cs")] + pub fn negate_in_place(&mut self) -> &mut Self { + if let Some(val) = self.value.as_mut() { + *val = -(*val); + } + self.variable = self.cs.new_lc(lc!() - self.variable).unwrap(); + self + } + + /// Outputs `self * self` + /// + /// This requires *one* constraint. + #[tracing::instrument(target = "r1cs")] + pub fn square(&self) -> Result { + Ok(self.mul(self)) + } + + /// Outputs `result` such that `result * self = 1`. + /// + /// This requires *one* constraint. + #[tracing::instrument(target = "r1cs")] + pub fn inverse(&self) -> Result { + let inverse = Self::new_witness(self.cs.clone(), || { + Ok(self.value.get()?.inverse().unwrap_or_else(F::zero)) + })?; + + self.cs.enforce_constraint( + lc!() + self.variable, + lc!() + inverse.variable, + lc!() + Variable::One, + )?; + Ok(inverse) + } + + /// This is a no-op for prime fields. + #[tracing::instrument(target = "r1cs")] + pub fn frobenius_map(&self, _: usize) -> Result { + Ok(self.clone()) + } + + /// Enforces that `self * other = result`. + /// + /// This requires *one* constraint. + #[tracing::instrument(target = "r1cs")] + pub fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> { + self.cs.enforce_constraint( + lc!() + self.variable, + lc!() + other.variable, + lc!() + result.variable, + ) + } + + /// Enforces that `self * self = result`. + /// + /// This requires *one* constraint. + #[tracing::instrument(target = "r1cs")] + pub fn square_equals(&self, result: &Self) -> Result<(), SynthesisError> { + self.cs.enforce_constraint( + lc!() + self.variable, + lc!() + self.variable, + lc!() + result.variable, + ) + } + + /// Outputs the bit `self == other`. + /// + /// This requires three constraints. + #[tracing::instrument(target = "r1cs")] + pub fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + Ok(self.is_neq(other)?.not()) + } + + /// Outputs the bit `self != other`. + /// + /// This requires three constraints. + #[tracing::instrument(target = "r1cs")] + pub fn is_neq(&self, other: &Self) -> Result, SynthesisError> { + let is_not_equal = Boolean::new_witness(self.cs.clone(), || { + Ok(self.value.get()? != other.value.get()?) + })?; + let multiplier = self.cs.new_witness_variable(|| { + if is_not_equal.value()? { + (self.value.get()? - other.value.get()?).inverse().get() + } else { + Ok(F::one()) + } + })?; + + // Completeness: + // Case 1: self != other: + // ---------------------- + // constraint 1: + // (self - other) * multiplier = is_not_equal + // => (non_zero) * multiplier = 1 (satisfied, because multiplier = 1/(self - + // other) + // + // constraint 2: + // (self - other) * not(is_not_equal) = 0 + // => (non_zero) * not(1) = 0 + // => (non_zero) * 0 = 0 + // + // Case 2: self == other: + // ---------------------- + // constraint 1: + // (self - other) * multiplier = is_not_equal + // => 0 * multiplier = 0 (satisfied, because multiplier = 1 + // + // constraint 2: + // (self - other) * not(is_not_equal) = 0 + // => 0 * not(0) = 0 + // => 0 * 1 = 0 + // + // -------------------------------------------------------------------- + // + // Soundness: + // Case 1: self != other, but is_not_equal = 0. + // -------------------------------------------- + // constraint 1: + // (self - other) * multiplier = is_not_equal + // => non_zero * multiplier = 0 (only satisfiable if multiplier == 0) + // + // constraint 2: + // (self - other) * not(is_not_equal) = 0 + // => (non_zero) * 1 = 0 (impossible) + // + // Case 2: self == other, but is_not_equal = 1. + // -------------------------------------------- + // constraint 1: + // (self - other) * multiplier = is_not_equal + // 0 * multiplier = 1 (unsatisfiable) + self.cs.enforce_constraint( + lc!() + self.variable - other.variable, + lc!() + multiplier, + is_not_equal.lc(), + )?; + self.cs.enforce_constraint( + lc!() + self.variable - other.variable, + is_not_equal.not().lc(), + lc!(), + )?; + Ok(is_not_equal) + } + + /// Enforces that self == other if `should_enforce.is_eq(&Boolean::TRUE)`. + /// + /// This requires one constraint. + #[tracing::instrument(target = "r1cs")] + pub fn conditional_enforce_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + self.cs.enforce_constraint( + lc!() + self.variable - other.variable, + lc!() + should_enforce.lc(), + lc!(), + ) + } + + /// Enforces that self != other if `should_enforce.is_eq(&Boolean::TRUE)`. + /// + /// This requires one constraint. + #[tracing::instrument(target = "r1cs")] + pub fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + let multiplier = Self::new_witness(self.cs.clone(), || { + if should_enforce.value()? { + (self.value.get()? - other.value.get()?).inverse().get() + } else { + Ok(F::zero()) + } + })?; + + self.cs.enforce_constraint( + lc!() + self.variable - other.variable, + lc!() + multiplier.variable, + should_enforce.lc(), + )?; + Ok(()) + } +} + +/// ************************************************************************* +/// ************************************************************************* + +impl ToBitsGadget for AllocatedFp { + /// Outputs the unique bit-wise decomposition of `self` in *little-endian* + /// form. + /// + /// This method enforces that the output is in the field, i.e. + /// it invokes `Boolean::enforce_in_field_le` on the bit decomposition. + #[tracing::instrument(target = "r1cs")] + fn to_bits_le(&self) -> Result>, SynthesisError> { + let bits = self.to_non_unique_bits_le()?; + Boolean::enforce_in_field_le(&bits)?; + Ok(bits) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bits_le(&self) -> Result>, SynthesisError> { + let cs = self.cs.clone(); + use ark_ff::BitIteratorBE; + let mut bits = if let Some(value) = self.value { + let field_char = BitIteratorBE::new(F::characteristic()); + let bits: Vec<_> = BitIteratorBE::new(value.into_repr()) + .zip(field_char) + .skip_while(|(_, c)| !c) + .map(|(b, _)| Some(b)) + .collect(); + assert_eq!(bits.len(), F::Params::MODULUS_BITS as usize); + bits + } else { + vec![None; F::Params::MODULUS_BITS as usize] + }; + + // Convert to little-endian + bits.reverse(); + + let bits: Vec<_> = bits + .into_iter() + .map(|b| Boolean::new_witness(cs.clone(), || b.get())) + .collect::>()?; + + let mut lc = LinearCombination::zero(); + let mut coeff = F::one(); + + for bit in bits.iter() { + lc = &lc + bit.lc() * coeff; + + coeff.double_in_place(); + } + + lc = lc - &self.variable; + + cs.enforce_constraint(lc!(), lc!(), lc)?; + + Ok(bits) + } +} + +impl ToBytesGadget for AllocatedFp { + /// Outputs the unique byte decomposition of `self` in *little-endian* + /// form. + /// + /// This method enforces that the decomposition represents + /// an integer that is less than `F::MODULUS`. + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let num_bits = F::BigInt::NUM_LIMBS * 64; + let mut bits = self.to_bits_le()?; + let remainder = core::iter::repeat(Boolean::constant(false)).take(num_bits - bits.len()); + bits.extend(remainder); + let bytes = bits + .chunks(8) + .map(|chunk| UInt8::from_bits_le(chunk)) + .collect(); + Ok(bytes) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let num_bits = F::BigInt::NUM_LIMBS * 64; + let mut bits = self.to_non_unique_bits_le()?; + let remainder = core::iter::repeat(Boolean::constant(false)).take(num_bits - bits.len()); + bits.extend(remainder); + let bytes = bits + .chunks(8) + .map(|chunk| UInt8::from_bits_le(chunk)) + .collect(); + Ok(bytes) + } +} + +impl ToConstraintFieldGadget for AllocatedFp { + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> Result>, SynthesisError> { + Ok(vec![self.clone().into()]) + } +} + +impl CondSelectGadget for AllocatedFp { + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean, + true_val: &Self, + false_val: &Self, + ) -> Result { + match cond { + Boolean::Constant(true) => Ok(true_val.clone()), + Boolean::Constant(false) => Ok(false_val.clone()), + _ => { + let cs = cond.cs(); + let result = Self::new_witness(cs.clone(), || { + cond.value() + .and_then(|c| if c { true_val } else { false_val }.value.get()) + })?; + // a = self; b = other; c = cond; + // + // r = c * a + (1 - c) * b + // r = b + c * (a - b) + // c * (a - b) = r - b + cs.enforce_constraint( + cond.lc(), + lc!() + true_val.variable - false_val.variable, + lc!() + result.variable - false_val.variable, + )?; + + Ok(result) + } + } + } +} + +/// Uses two bits to perform a lookup into a table +/// `b` is little-endian: `b[0]` is LSB. +impl TwoBitLookupGadget for AllocatedFp { + type TableConstant = F; + #[tracing::instrument(target = "r1cs")] + fn two_bit_lookup(b: &[Boolean], c: &[Self::TableConstant]) -> Result { + debug_assert_eq!(b.len(), 2); + debug_assert_eq!(c.len(), 4); + let result = Self::new_witness(b.cs(), || { + let lsb = usize::from(b[0].value()?); + let msb = usize::from(b[1].value()?); + let index = lsb + (msb << 1); + Ok(c[index]) + })?; + let one = Variable::One; + b.cs().enforce_constraint( + lc!() + b[1].lc() * (c[3] - &c[2] - &c[1] + &c[0]) + (c[1] - &c[0], one), + lc!() + b[0].lc(), + lc!() + result.variable - (c[0], one) + b[1].lc() * (c[0] - &c[2]), + )?; + + Ok(result) + } +} + +impl ThreeBitCondNegLookupGadget for AllocatedFp { + type TableConstant = F; + + #[tracing::instrument(target = "r1cs")] + fn three_bit_cond_neg_lookup( + b: &[Boolean], + b0b1: &Boolean, + c: &[Self::TableConstant], + ) -> Result { + debug_assert_eq!(b.len(), 3); + debug_assert_eq!(c.len(), 4); + let result = Self::new_witness(b.cs(), || { + let lsb = usize::from(b[0].value()?); + let msb = usize::from(b[1].value()?); + let index = lsb + (msb << 1); + let intermediate = c[index]; + + let is_negative = b[2].value()?; + let y = if is_negative { + -intermediate + } else { + intermediate + }; + Ok(y) + })?; + + let y_lc = b0b1.lc() * (c[3] - &c[2] - &c[1] + &c[0]) + + b[0].lc() * (c[1] - &c[0]) + + b[1].lc() * (c[2] - &c[0]) + + (c[0], Variable::One); + // enforce y * (1 - 2 * b_2) == res + b.cs().enforce_constraint( + y_lc.clone(), + b[2].lc() * F::from(2u64).neg() + (F::one(), Variable::One), + lc!() + result.variable, + )?; + + Ok(result) + } +} + +impl AllocVar for AllocatedFp { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + if mode == AllocationMode::Constant { + let v = *f()?.borrow(); + let lc = cs.new_lc(lc!() + (v, Variable::One))?; + Ok(Self::new(Some(v), lc, cs)) + } else { + let mut value = None; + let value_generator = || { + value = Some(*f()?.borrow()); + value.ok_or(SynthesisError::AssignmentMissing) + }; + let variable = if mode == AllocationMode::Input { + cs.new_input_variable(value_generator)? + } else { + cs.new_witness_variable(value_generator)? + }; + Ok(Self::new(value, variable, cs)) + } + } +} + +impl FieldVar for FpVar { + fn constant(f: F) -> Self { + Self::Constant(f) + } + + fn zero() -> Self { + Self::Constant(F::zero()) + } + + fn one() -> Self { + Self::Constant(F::one()) + } + + #[tracing::instrument(target = "r1cs")] + fn double(&self) -> Result { + match self { + Self::Constant(c) => Ok(Self::Constant(c.double())), + Self::Var(v) => Ok(Self::Var(v.double()?)), + } + } + + #[tracing::instrument(target = "r1cs")] + fn negate(&self) -> Result { + match self { + Self::Constant(c) => Ok(Self::Constant(-*c)), + Self::Var(v) => Ok(Self::Var(v.negate())), + } + } + + #[tracing::instrument(target = "r1cs")] + fn square(&self) -> Result { + match self { + Self::Constant(c) => Ok(Self::Constant(c.square())), + Self::Var(v) => Ok(Self::Var(v.square()?)), + } + } + + /// Enforce that `self * other == result`. + #[tracing::instrument(target = "r1cs")] + fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> { + use FpVar::*; + match (self, other, result) { + (Constant(_), Constant(_), Constant(_)) => Ok(()), + (Constant(_), Constant(_), _) | (Constant(_), Var(_), _) | (Var(_), Constant(_), _) => { + result.enforce_equal(&(self * other)) + } // this multiplication should be free + (Var(v1), Var(v2), Var(v3)) => v1.mul_equals(v2, v3), + (Var(v1), Var(v2), Constant(f)) => { + let cs = v1.cs.clone(); + let v3 = AllocatedFp::new_constant(cs, f).unwrap(); + v1.mul_equals(v2, &v3) + } + } + } + + /// Enforce that `self * self == result`. + #[tracing::instrument(target = "r1cs")] + fn square_equals(&self, result: &Self) -> Result<(), SynthesisError> { + use FpVar::*; + match (self, result) { + (Constant(_), Constant(_)) => Ok(()), + (Constant(f), Var(r)) => { + let cs = r.cs.clone(); + let v = AllocatedFp::new_witness(cs, || Ok(f))?; + v.square_equals(&r) + } + (Var(v), Constant(f)) => { + let cs = v.cs.clone(); + let r = AllocatedFp::new_witness(cs, || Ok(f))?; + v.square_equals(&r) + } + (Var(v1), Var(v2)) => v1.square_equals(v2), + } + } + + #[tracing::instrument(target = "r1cs")] + fn inverse(&self) -> Result { + match self { + FpVar::Var(v) => v.inverse().map(FpVar::Var), + FpVar::Constant(f) => f.inverse().get().map(FpVar::Constant), + } + } + + #[tracing::instrument(target = "r1cs")] + fn frobenius_map(&self, power: usize) -> Result { + match self { + FpVar::Var(v) => v.frobenius_map(power).map(FpVar::Var), + FpVar::Constant(f) => { + let mut f = *f; + f.frobenius_map(power); + Ok(FpVar::Constant(f)) + } + } + } + + #[tracing::instrument(target = "r1cs")] + fn frobenius_map_in_place(&mut self, power: usize) -> Result<&mut Self, SynthesisError> { + *self = self.frobenius_map(power)?; + Ok(self) + } +} + +impl_ops!( + FpVar, + F, + Add, + add, + AddAssign, + add_assign, + |this: &'a FpVar, other: &'a FpVar| { + use FpVar::*; + match (this, other) { + (Constant(c1), Constant(c2)) => Constant(*c1 + *c2), + (Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.add_constant(*c)), + (Var(v1), Var(v2)) => Var(v1.add(v2)), + } + }, + |this: &'a FpVar, other: F| { this + &FpVar::Constant(other) }, + F: PrimeField, +); + +impl_ops!( + FpVar, + F, + Sub, + sub, + SubAssign, + sub_assign, + |this: &'a FpVar, other: &'a FpVar| { + use FpVar::*; + match (this, other) { + (Constant(c1), Constant(c2)) => Constant(*c1 - *c2), + (Var(v), Constant(c)) => Var(v.sub_constant(*c)), + (Constant(c), Var(v)) => Var(v.sub_constant(*c).negate()), + (Var(v1), Var(v2)) => Var(v1.sub(v2)), + } + }, + |this: &'a FpVar, other: F| { this - &FpVar::Constant(other) }, + F: PrimeField +); + +impl_ops!( + FpVar, + F, + Mul, + mul, + MulAssign, + mul_assign, + |this: &'a FpVar, other: &'a FpVar| { + use FpVar::*; + match (this, other) { + (Constant(c1), Constant(c2)) => Constant(*c1 * *c2), + (Constant(c), Var(v)) | (Var(v), Constant(c)) => Var(v.mul_constant(*c)), + (Var(v1), Var(v2)) => Var(v1.mul(v2)), + } + }, + |this: &'a FpVar, other: F| { + if other.is_zero() { + FpVar::zero() + } else { + this * &FpVar::Constant(other) + } + }, + F: PrimeField +); + +/// ************************************************************************* +/// ************************************************************************* + +impl EqGadget for FpVar { + #[tracing::instrument(target = "r1cs")] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + match (self, other) { + (Self::Constant(c1), Self::Constant(c2)) => Ok(Boolean::Constant(c1 == c2)), + (Self::Constant(c), Self::Var(v)) | (Self::Var(v), Self::Constant(c)) => { + let cs = v.cs.clone(); + let c = AllocatedFp::new_constant(cs, c)?; + c.is_eq(v) + } + (Self::Var(v1), Self::Var(v2)) => v1.is_eq(v2), + } + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + match (self, other) { + (Self::Constant(_), Self::Constant(_)) => Ok(()), + (Self::Constant(c), Self::Var(v)) | (Self::Var(v), Self::Constant(c)) => { + let cs = v.cs.clone(); + let c = AllocatedFp::new_constant(cs, c)?; + c.conditional_enforce_equal(v, should_enforce) + } + (Self::Var(v1), Self::Var(v2)) => v1.conditional_enforce_equal(v2, should_enforce), + } + } + + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + should_enforce: &Boolean, + ) -> Result<(), SynthesisError> { + match (self, other) { + (Self::Constant(_), Self::Constant(_)) => Ok(()), + (Self::Constant(c), Self::Var(v)) | (Self::Var(v), Self::Constant(c)) => { + let cs = v.cs.clone(); + let c = AllocatedFp::new_constant(cs, c)?; + c.conditional_enforce_not_equal(v, should_enforce) + } + (Self::Var(v1), Self::Var(v2)) => v1.conditional_enforce_not_equal(v2, should_enforce), + } + } +} + +impl ToBitsGadget for FpVar { + #[tracing::instrument(target = "r1cs")] + fn to_bits_le(&self) -> Result>, SynthesisError> { + match self { + Self::Constant(_) => self.to_non_unique_bits_le(), + Self::Var(v) => v.to_bits_le(), + } + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bits_le(&self) -> Result>, SynthesisError> { + use ark_ff::BitIteratorLE; + match self { + Self::Constant(c) => Ok(BitIteratorLE::new(&c.into_repr()) + .take((F::Params::MODULUS_BITS) as usize) + .map(Boolean::constant) + .collect::>()), + Self::Var(v) => v.to_non_unique_bits_le(), + } + } +} + +impl ToBytesGadget for FpVar { + /// Outputs the unique byte decomposition of `self` in *little-endian* + /// form. + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + match self { + Self::Constant(c) => Ok(UInt8::constant_vec(&ark_ff::to_bytes![c].unwrap())), + Self::Var(v) => v.to_bytes(), + } + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + match self { + Self::Constant(c) => Ok(UInt8::constant_vec(&ark_ff::to_bytes![c].unwrap())), + Self::Var(v) => v.to_non_unique_bytes(), + } + } +} + +impl ToConstraintFieldGadget for FpVar { + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> Result>, SynthesisError> { + Ok(vec![self.clone()]) + } +} + +impl CondSelectGadget for FpVar { + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result { + match cond { + Boolean::Constant(true) => Ok(true_value.clone()), + Boolean::Constant(false) => Ok(false_value.clone()), + _ => { + match (true_value, false_value) { + (Self::Constant(t), Self::Constant(f)) => { + let is = AllocatedFp::from(cond.clone()); + let not = AllocatedFp::from(cond.not()); + // cond * t + (1 - cond) * f + Ok(is.mul_constant(*t).add(¬.mul_constant(*f)).into()) + } + (..) => { + let cs = cond.cs(); + let true_value = match true_value { + Self::Constant(f) => AllocatedFp::new_constant(cs.clone(), f)?, + Self::Var(v) => v.clone(), + }; + let false_value = match false_value { + Self::Constant(f) => AllocatedFp::new_constant(cs, f)?, + Self::Var(v) => v.clone(), + }; + cond.select(&true_value, &false_value).map(Self::Var) + } + } + } + } + } +} + +/// Uses two bits to perform a lookup into a table +/// `b` is little-endian: `b[0]` is LSB. +impl TwoBitLookupGadget for FpVar { + type TableConstant = F; + + #[tracing::instrument(target = "r1cs")] + fn two_bit_lookup(b: &[Boolean], c: &[Self::TableConstant]) -> Result { + debug_assert_eq!(b.len(), 2); + debug_assert_eq!(c.len(), 4); + if b.is_constant() { + let lsb = usize::from(b[0].value()?); + let msb = usize::from(b[1].value()?); + let index = lsb + (msb << 1); + Ok(Self::Constant(c[index])) + } else { + AllocatedFp::two_bit_lookup(b, c).map(Self::Var) + } + } +} + +impl ThreeBitCondNegLookupGadget for FpVar { + type TableConstant = F; + + #[tracing::instrument(target = "r1cs")] + fn three_bit_cond_neg_lookup( + b: &[Boolean], + b0b1: &Boolean, + c: &[Self::TableConstant], + ) -> Result { + debug_assert_eq!(b.len(), 3); + debug_assert_eq!(c.len(), 4); + + if b.cs().or(b0b1.cs()).is_none() { + // We only have constants + + let lsb = usize::from(b[0].value()?); + let msb = usize::from(b[1].value()?); + let index = lsb + (msb << 1); + let intermediate = c[index]; + + let is_negative = b[2].value()?; + let y = if is_negative { + -intermediate + } else { + intermediate + }; + Ok(Self::Constant(y)) + } else { + AllocatedFp::three_bit_cond_neg_lookup(b, b0b1, c).map(Self::Var) + } + } +} + +impl AllocVar for FpVar { + fn new_variable>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + if mode == AllocationMode::Constant { + Ok(Self::Constant(*f()?.borrow())) + } else { + AllocatedFp::new_variable(cs, f, mode).map(Self::Var) + } + } +} diff --git a/arkworks/r1cs-std/src/fields/fp12.rs b/arkworks/r1cs-std/src/fields/fp12.rs new file mode 100644 index 00000000..3f60c202 --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp12.rs @@ -0,0 +1,171 @@ +use crate::fields::{fp2::Fp2Var, fp6_3over2::Fp6Var, quadratic_extension::*, FieldVar}; +use ark_ff::fields::{fp12_2over3over2::*, fp6_3over2::Fp6Parameters, Field, QuadExtParameters}; +use ark_relations::r1cs::SynthesisError; + +/// A degree-12 extension field constructed as the tower of a +/// quadratic extension over a cubic extension over a quadratic extension field. +/// This is the R1CS equivalent of `ark_ff::fp12_2over3over2::Fp12

`. +pub type Fp12Var

= QuadExtVar::Fp6Params>, Fp12ParamsWrapper

>; + +type Fp2Params

= <

::Fp6Params as Fp6Parameters>::Fp2Params; + +impl QuadExtVarParams> for Fp12ParamsWrapper

{ + fn mul_base_field_var_by_frob_coeff(fe: &mut Fp6Var, power: usize) { + fe.c0 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + fe.c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + fe.c2 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} + +impl Fp12Var

{ + /// Multiplies by a sparse element of the form `(c0 = (c0, c1, 0), c1 = (0, + /// d1, 0))`. + #[inline] + pub fn mul_by_014( + &self, + c0: &Fp2Var>, + c1: &Fp2Var>, + d1: &Fp2Var>, + ) -> Result { + let v0 = self.c0.mul_by_c0_c1_0(&c0, &c1)?; + let v1 = self.c1.mul_by_0_c1_0(&d1)?; + let new_c0 = Self::mul_base_field_by_nonresidue(&v1)? + &v0; + + let new_c1 = (&self.c0 + &self.c1).mul_by_c0_c1_0(&c0, &(c1 + d1))? - &v0 - &v1; + Ok(Self::new(new_c0, new_c1)) + } + + /// Multiplies by a sparse element of the form `(c0 = (c0, 0, 0), c1 = (d0, + /// d1, 0))`. + #[inline] + pub fn mul_by_034( + &self, + c0: &Fp2Var>, + d0: &Fp2Var>, + d1: &Fp2Var>, + ) -> Result { + let a0 = &self.c0.c0 * c0; + let a1 = &self.c0.c1 * c0; + let a2 = &self.c0.c2 * c0; + let a = Fp6Var::new(a0, a1, a2); + let b = self.c1.mul_by_c0_c1_0(&d0, &d1)?; + + let c0 = c0 + d0; + let c1 = d1; + let e = (&self.c0 + &self.c1).mul_by_c0_c1_0(&c0, &c1)?; + let new_c1 = e - (&a + &b); + let new_c0 = Self::mul_base_field_by_nonresidue(&b)? + &a; + + Ok(Self::new(new_c0, new_c1)) + } + + /// Squares `self` when `self` is in the cyclotomic subgroup. + pub fn cyclotomic_square(&self) -> Result { + if characteristic_square_mod_6_is_one(Fp12::

::characteristic()) { + let fp2_nr = ::NONRESIDUE; + + let z0 = &self.c0.c0; + let z4 = &self.c0.c1; + let z3 = &self.c0.c2; + let z2 = &self.c1.c0; + let z1 = &self.c1.c1; + let z5 = &self.c1.c2; + + // t0 + t1*y = (z0 + z1*y)^2 = a^2 + let tmp = z0 * z1; + let t0 = { + let tmp1 = z0 + z1; + let tmp2 = z1 * fp2_nr + z0; + let tmp4 = &tmp * fp2_nr + &tmp; + tmp1 * tmp2 - tmp4 + }; + let t1 = tmp.double()?; + + // t2 + t3*y = (z2 + z3*y)^2 = b^2 + let tmp = z2 * z3; + let t2 = { + // (z2 + &z3) * &(z2 + &(fp2_nr * &z3)) - &tmp - &(tmp * &fp2_nr); + let tmp1 = z2 + z3; + let tmp2 = z3 * fp2_nr + z2; + let tmp4 = &tmp * fp2_nr + &tmp; + tmp1 * tmp2 - tmp4 + }; + let t3 = tmp.double()?; + + // t4 + t5*y = (z4 + z5*y)^2 = c^2 + let tmp = z4 * z5; + let t4 = { + // (z4 + &z5) * &(z4 + &(fp2_nr * &z5)) - &tmp - &(tmp * &fp2_nr); + let tmp1 = z4 + z5; + let tmp2 = (z5 * fp2_nr) + z4; + let tmp4 = (&tmp * fp2_nr) + &tmp; + (tmp1 * tmp2) - tmp4 + }; + let t5 = tmp.double()?; + + // for A + + // z0 = 3 * t0 - 2 * z0 + let c0_c0 = (&t0 - z0).double()? + &t0; + + // z1 = 3 * t1 + 2 * z1 + let c1_c1 = (&t1 + z1).double()? + &t1; + + // for B + + // z2 = 3 * (xi * t5) + 2 * z2 + let c1_c0 = { + let tmp = &t5 * fp2_nr; + (z2 + &tmp).double()? + &tmp + }; + + // z3 = 3 * t4 - 2 * z3 + let c0_c2 = (&t4 - z3).double()? + &t4; + + // for C + + // z4 = 3 * t2 - 2 * z4 + let c0_c1 = (&t2 - z4).double()? + &t2; + + // z5 = 3 * t3 + 2 * z5 + let c1_c2 = (&t3 + z5).double()? + &t3; + let c0 = Fp6Var::new(c0_c0, c0_c1, c0_c2); + let c1 = Fp6Var::new(c1_c0, c1_c1, c1_c2); + + Ok(Self::new(c0, c1)) + } else { + self.square() + } + } + + /// Like `Self::cyclotomic_exp`, but additionally uses cyclotomic squaring. + pub fn optimized_cyclotomic_exp( + &self, + exponent: impl AsRef<[u64]>, + ) -> Result { + use ark_ff::biginteger::arithmetic::find_wnaf; + let mut res = Self::one(); + let self_inverse = self.unitary_inverse()?; + + let mut found_nonzero = false; + let naf = find_wnaf(exponent.as_ref()); + + for &value in naf.iter().rev() { + if found_nonzero { + res = res.cyclotomic_square()?; + } + + if value != 0 { + found_nonzero = true; + + if value > 0 { + res *= self; + } else { + res *= &self_inverse; + } + } + } + + Ok(res) + } +} diff --git a/arkworks/r1cs-std/src/fields/fp2.rs b/arkworks/r1cs-std/src/fields/fp2.rs new file mode 100644 index 00000000..f1183df3 --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp2.rs @@ -0,0 +1,12 @@ +use crate::fields::{fp::FpVar, quadratic_extension::*}; +use ark_ff::fields::{Fp2Parameters, Fp2ParamsWrapper, QuadExtParameters}; + +/// A quadratic extension field constructed over a prime field. +/// This is the R1CS equivalent of `ark_ff::Fp2

`. +pub type Fp2Var

= QuadExtVar::Fp>, Fp2ParamsWrapper

>; + +impl QuadExtVarParams> for Fp2ParamsWrapper

{ + fn mul_base_field_var_by_frob_coeff(fe: &mut FpVar, power: usize) { + *fe *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} diff --git a/arkworks/r1cs-std/src/fields/fp3.rs b/arkworks/r1cs-std/src/fields/fp3.rs new file mode 100644 index 00000000..b20b80ca --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp3.rs @@ -0,0 +1,17 @@ +use crate::fields::{cubic_extension::*, fp::FpVar}; +use ark_ff::fields::{CubicExtParameters, Fp3Parameters, Fp3ParamsWrapper}; + +/// A cubic extension field constructed over a prime field. +/// This is the R1CS equivalent of `ark_ff::Fp3

`. +pub type Fp3Var

= CubicExtVar::Fp>, Fp3ParamsWrapper

>; + +impl CubicExtVarParams> for Fp3ParamsWrapper

{ + fn mul_base_field_vars_by_frob_coeff( + c1: &mut FpVar, + c2: &mut FpVar, + power: usize, + ) { + *c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + *c2 *= Self::FROBENIUS_COEFF_C2[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} diff --git a/arkworks/r1cs-std/src/fields/fp4.rs b/arkworks/r1cs-std/src/fields/fp4.rs new file mode 100644 index 00000000..10238730 --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp4.rs @@ -0,0 +1,14 @@ +use crate::fields::{fp2::Fp2Var, quadratic_extension::*}; +use ark_ff::fields::{Fp4Parameters, Fp4ParamsWrapper, QuadExtParameters}; + +/// A quartic extension field constructed as the tower of a +/// quadratic extension over a quadratic extension field. +/// This is the R1CS equivalent of `ark_ff::Fp4

`. +pub type Fp4Var

= QuadExtVar::Fp2Params>, Fp4ParamsWrapper

>; + +impl QuadExtVarParams> for Fp4ParamsWrapper

{ + fn mul_base_field_var_by_frob_coeff(fe: &mut Fp2Var, power: usize) { + fe.c0 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + fe.c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} diff --git a/arkworks/r1cs-std/src/fields/fp6_2over3.rs b/arkworks/r1cs-std/src/fields/fp6_2over3.rs new file mode 100644 index 00000000..07b172b1 --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp6_2over3.rs @@ -0,0 +1,15 @@ +use crate::fields::{fp3::Fp3Var, quadratic_extension::*}; +use ark_ff::fields::{fp6_2over3::*, QuadExtParameters}; + +/// A sextic extension field constructed as the tower of a +/// quadratic extension over a cubic extension field. +/// This is the R1CS equivalent of `ark_ff::fp6_2over3::Fp6

`. +pub type Fp6Var

= QuadExtVar::Fp3Params>, Fp6ParamsWrapper

>; + +impl QuadExtVarParams> for Fp6ParamsWrapper

{ + fn mul_base_field_var_by_frob_coeff(fe: &mut Fp3Var, power: usize) { + fe.c0 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + fe.c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + fe.c2 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} diff --git a/arkworks/r1cs-std/src/fields/fp6_3over2.rs b/arkworks/r1cs-std/src/fields/fp6_3over2.rs new file mode 100644 index 00000000..fdef07dc --- /dev/null +++ b/arkworks/r1cs-std/src/fields/fp6_3over2.rs @@ -0,0 +1,85 @@ +use crate::fields::{cubic_extension::*, fp2::*}; +use ark_ff::fields::{fp6_3over2::*, CubicExtParameters, Fp2}; +use ark_relations::r1cs::SynthesisError; +use core::ops::MulAssign; + +/// A sextic extension field constructed as the tower of a +/// cubic extension over a quadratic extension field. +/// This is the R1CS equivalent of `ark_ff::fp6_3over3::Fp6

`. +pub type Fp6Var

= CubicExtVar::Fp2Params>, Fp6ParamsWrapper

>; + +impl CubicExtVarParams> for Fp6ParamsWrapper

{ + fn mul_base_field_vars_by_frob_coeff( + c1: &mut Fp2Var, + c2: &mut Fp2Var, + power: usize, + ) { + *c1 *= Self::FROBENIUS_COEFF_C1[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + *c2 *= Self::FROBENIUS_COEFF_C2[power % Self::DEGREE_OVER_BASE_PRIME_FIELD]; + } +} + +impl Fp6Var

{ + /// Multiplies `self` by a sparse element which has `c0 == c2 == zero`. + pub fn mul_by_0_c1_0(&self, c1: &Fp2Var) -> Result { + // Karatsuba multiplication + // v0 = a0 * b0 = 0 + + // v1 = a1 * b1 + let v1 = &self.c1 * c1; + + // v2 = a2 * b2 = 0 + + let a1_plus_a2 = &self.c1 + &self.c2; + let b1_plus_b2 = c1.clone(); + + let a0_plus_a1 = &self.c0 + &self.c1; + + // c0 = (NONRESIDUE * ((a1 + a2)*(b1 + b2) - v1 - v2)) + v0 + // = NONRESIDUE * ((a1 + a2) * b1 - v1) + let c0 = &(a1_plus_a2 * &b1_plus_b2 - &v1) * P::NONRESIDUE; + + // c1 = (a0 + a1) * (b0 + b1) - v0 - v1 + NONRESIDUE * v2 + // = (a0 + a1) * b1 - v1 + let c1 = a0_plus_a1 * c1 - &v1; + // c2 = (a0 + a2) * (b0 + b2) - v0 - v2 + v1 + // = v1 + let c2 = v1; + Ok(Self::new(c0, c1, c2)) + } + + /// Multiplies `self` by a sparse element which has `c2 == zero`. + pub fn mul_by_c0_c1_0( + &self, + c0: &Fp2Var, + c1: &Fp2Var, + ) -> Result { + let v0 = &self.c0 * c0; + let v1 = &self.c1 * c1; + // v2 = 0. + + let a1_plus_a2 = &self.c1 + &self.c2; + let a0_plus_a1 = &self.c0 + &self.c1; + let a0_plus_a2 = &self.c0 + &self.c2; + + let b1_plus_b2 = c1.clone(); + let b0_plus_b1 = c0 + c1; + let b0_plus_b2 = c0.clone(); + + let c0 = (&a1_plus_a2 * &b1_plus_b2 - &v1) * P::NONRESIDUE + &v0; + + let c1 = a0_plus_a1 * &b0_plus_b1 - &v0 - &v1; + + let c2 = a0_plus_a2 * &b0_plus_b2 - &v0 + &v1; + + Ok(Self::new(c0, c1, c2)) + } +} + +impl MulAssign> for Fp6Var

{ + fn mul_assign(&mut self, other: Fp2) { + self.c0 *= other; + self.c1 *= other; + self.c2 *= other; + } +} diff --git a/arkworks/r1cs-std/src/fields/mod.rs b/arkworks/r1cs-std/src/fields/mod.rs new file mode 100644 index 00000000..d6294df7 --- /dev/null +++ b/arkworks/r1cs-std/src/fields/mod.rs @@ -0,0 +1,204 @@ +use ark_ff::{prelude::*, BitIteratorBE}; +use ark_relations::r1cs::SynthesisError; +use core::{ + fmt::Debug, + ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign}, +}; + +use crate::prelude::*; + +/// This module contains a generic implementation of cubic extension field +/// variables. That is, it implements the R1CS equivalent of +/// `ark_ff::CubicExtField`. +pub mod cubic_extension; +/// This module contains a generic implementation of quadratic extension field +/// variables. That is, it implements the R1CS equivalent of +/// `ark_ff::QuadExtField`. +pub mod quadratic_extension; + +/// This module contains a generic implementation of prime field variables. +/// That is, it implements the R1CS equivalent of `ark_ff::Fp*`. +pub mod fp; + +/// This module contains a generic implementation of the degree-12 tower +/// extension field. That is, it implements the R1CS equivalent of +/// `ark_ff::Fp12` +pub mod fp12; +/// This module contains a generic implementation of the degree-2 tower +/// extension field. That is, it implements the R1CS equivalent of +/// `ark_ff::Fp2` +pub mod fp2; +/// This module contains a generic implementation of the degree-3 tower +/// extension field. That is, it implements the R1CS equivalent of +/// `ark_ff::Fp3` +pub mod fp3; +/// This module contains a generic implementation of the degree-4 tower +/// extension field. That is, it implements the R1CS equivalent of +/// `ark_ff::Fp4` +pub mod fp4; +/// This module contains a generic implementation of the degree-6 tower +/// extension field. That is, it implements the R1CS equivalent of +/// `ark_ff::fp6_2over3::Fp6` +pub mod fp6_2over3; +/// This module contains a generic implementation of the degree-6 tower +/// extension field. That is, it implements the R1CS equivalent of +/// `ark_ff::fp6_3over2::Fp6` +pub mod fp6_3over2; + +/// This trait is a hack used to work around the lack of implied bounds. +pub trait FieldOpsBounds<'a, F, T: 'a>: + Sized + + Add<&'a T, Output = T> + + Sub<&'a T, Output = T> + + Mul<&'a T, Output = T> + + Add + + Sub + + Mul + + Add + + Sub + + Mul +{ +} + +/// A variable representing a field. Corresponds to the native type `F`. +pub trait FieldVar: + 'static + + Clone + + From> + + R1CSVar + + EqGadget + + ToBitsGadget + + AllocVar + + ToBytesGadget + + CondSelectGadget + + for<'a> FieldOpsBounds<'a, F, Self> + + for<'a> AddAssign<&'a Self> + + for<'a> SubAssign<&'a Self> + + for<'a> MulAssign<&'a Self> + + AddAssign + + SubAssign + + MulAssign + + AddAssign + + SubAssign + + MulAssign + + Debug +{ + /// Returns the constant `F::zero()`. + fn zero() -> Self; + + /// Returns a `Boolean` representing whether `self == Self::zero()`. + fn is_zero(&self) -> Result, SynthesisError> { + self.is_eq(&Self::zero()) + } + + /// Returns the constant `F::one()`. + fn one() -> Self; + + /// Returns a `Boolean` representing whether `self == Self::one()`. + fn is_one(&self) -> Result, SynthesisError> { + self.is_eq(&Self::one()) + } + + /// Returns a constant with value `v`. + /// + /// This *should not* allocate any variables. + fn constant(v: F) -> Self; + + /// Computes `self + self`. + fn double(&self) -> Result { + Ok(self.clone() + self) + } + + /// Sets `self = self + self`. + fn double_in_place(&mut self) -> Result<&mut Self, SynthesisError> { + *self += self.double()?; + Ok(self) + } + + /// Coputes `-self`. + fn negate(&self) -> Result; + + /// Sets `self = -self`. + #[inline] + fn negate_in_place(&mut self) -> Result<&mut Self, SynthesisError> { + *self = self.negate()?; + Ok(self) + } + + /// Computes `self * self`. + /// + /// A default implementation is provided which just invokes the underlying + /// multiplication routine. However, this method should be specialized + /// for extension fields, where faster algorithms exist for squaring. + fn square(&self) -> Result { + Ok(self.clone() * self) + } + + /// Sets `self = self.square()`. + fn square_in_place(&mut self) -> Result<&mut Self, SynthesisError> { + *self = self.square()?; + Ok(self) + } + + /// Enforces that `self * other == result`. + fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> { + let actual_result = self.clone() * other; + result.enforce_equal(&actual_result) + } + + /// Enforces that `self * self == result`. + fn square_equals(&self, result: &Self) -> Result<(), SynthesisError> { + let actual_result = self.square()?; + result.enforce_equal(&actual_result) + } + + /// Computes `result` such that `self * result == Self::one()`. + fn inverse(&self) -> Result; + + /// Returns `(self / d)`. but requires fewer constraints than `self * d.inverse()`. + /// It is up to the caller to ensure that `d` is non-zero, + /// since in that case the result is unconstrained. + fn mul_by_inverse(&self, d: &Self) -> Result { + let d_inv = if self.is_constant() || d.is_constant() { + d.inverse()? + } else { + Self::new_witness(self.cs(), || Ok(d.value()?.inverse().unwrap_or(F::zero())))? + }; + Ok(d_inv * self) + } + + /// Computes the frobenius map over `self`. + fn frobenius_map(&self, power: usize) -> Result; + + /// Sets `self = self.frobenius_map()`. + fn frobenius_map_in_place(&mut self, power: usize) -> Result<&mut Self, SynthesisError> { + *self = self.frobenius_map(power)?; + Ok(self) + } + + /// Comptues `self^bits`, where `bits` is a *little-endian* bit-wise + /// decomposition of the exponent. + fn pow_le(&self, bits: &[Boolean]) -> Result { + let mut res = Self::one(); + let mut power = self.clone(); + for bit in bits { + let tmp = res.clone() * &power; + res = bit.select(&tmp, &res)?; + power.square_in_place()?; + } + Ok(res) + } + + /// Computes `self^S`, where S is interpreted as an little-endian + /// u64-decomposition of an integer. + fn pow_by_constant>(&self, exp: S) -> Result { + let mut res = Self::one(); + for i in BitIteratorBE::without_leading_zeros(exp) { + res.square_in_place()?; + if i { + res *= self; + } + } + Ok(res) + } +} diff --git a/arkworks/r1cs-std/src/fields/quadratic_extension.rs b/arkworks/r1cs-std/src/fields/quadratic_extension.rs new file mode 100644 index 00000000..28b771dc --- /dev/null +++ b/arkworks/r1cs-std/src/fields/quadratic_extension.rs @@ -0,0 +1,561 @@ +use ark_ff::{ + fields::{Field, QuadExtField, QuadExtParameters}, + Zero, +}; +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; +use core::{borrow::Borrow, marker::PhantomData}; + +use crate::{ + fields::{fp::FpVar, FieldOpsBounds, FieldVar}, + prelude::*, + ToConstraintFieldGadget, Vec, +}; + +/// This struct is the `R1CS` equivalent of the quadratic extension field type +/// in `ark-ff`, i.e. `ark_ff::QuadExtField`. +#[derive(Derivative)] +#[derivative(Debug(bound = "BF: core::fmt::Debug"), Clone(bound = "BF: Clone"))] +#[must_use] +pub struct QuadExtVar, P: QuadExtVarParams> +where + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, +{ + /// The zero-th coefficient of this field element. + pub c0: BF, + /// The first coefficient of this field element. + pub c1: BF, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +/// This trait describes parameters that are used to implement arithmetic for +/// `QuadExtVar`. +pub trait QuadExtVarParams>: + QuadExtParameters +where + for<'a> &'a BF: FieldOpsBounds<'a, Self::BaseField, BF>, +{ + /// Multiply the base field of the `QuadExtVar` by the appropriate Frobenius + /// coefficient. This is equivalent to + /// `Self::mul_base_field_by_frob_coeff(power)`. + fn mul_base_field_var_by_frob_coeff(fe: &mut BF, power: usize); +} + +impl, P: QuadExtVarParams> QuadExtVar +where + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, +{ + /// Constructs a `QuadExtVar` from the underlying coefficients. + pub fn new(c0: BF, c1: BF) -> Self { + Self { + c0, + c1, + _params: PhantomData, + } + } + + /// Multiplies a variable of the base field by the quadratic nonresidue + /// `P::NONRESIDUE` that is used to construct the extension field. + #[inline] + pub fn mul_base_field_by_nonresidue(fe: &BF) -> Result { + Ok(fe * P::NONRESIDUE) + } + + /// Multiplies `self` by a constant from the base field. + #[inline] + pub fn mul_by_base_field_constant(&self, fe: P::BaseField) -> Self { + let c0 = self.c0.clone() * fe; + let c1 = self.c1.clone() * fe; + QuadExtVar::new(c0, c1) + } + + /// Sets `self = self.mul_by_base_field_constant(fe)`. + #[inline] + pub fn mul_assign_by_base_field_constant(&mut self, fe: P::BaseField) { + *self = (&*self).mul_by_base_field_constant(fe); + } + + /// This is only to be used when the element is *known* to be in the + /// cyclotomic subgroup. + #[inline] + pub fn unitary_inverse(&self) -> Result { + Ok(Self::new(self.c0.clone(), self.c1.negate()?)) + } + + /// This is only to be used when the element is *known* to be in the + /// cyclotomic subgroup. + #[inline] + #[tracing::instrument(target = "r1cs", skip(exponent))] + pub fn cyclotomic_exp(&self, exponent: impl AsRef<[u64]>) -> Result + where + Self: FieldVar, P::BasePrimeField>, + { + let mut res = Self::one(); + let self_inverse = self.unitary_inverse()?; + + let mut found_nonzero = false; + let naf = ark_ff::biginteger::arithmetic::find_wnaf(exponent.as_ref()); + + for &value in naf.iter().rev() { + if found_nonzero { + res.square_in_place()?; + } + + if value != 0 { + found_nonzero = true; + + if value > 0 { + res *= self; + } else { + res *= &self_inverse; + } + } + } + + Ok(res) + } +} + +impl R1CSVar for QuadExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: QuadExtVarParams, +{ + type Value = QuadExtField

; + + fn cs(&self) -> ConstraintSystemRef { + [&self.c0, &self.c1].cs() + } + + #[inline] + fn value(&self) -> Result { + match (self.c0.value(), self.c1.value()) { + (Ok(c0), Ok(c1)) => Ok(QuadExtField::new(c0, c1)), + (..) => Err(SynthesisError::AssignmentMissing), + } + } +} + +impl From> for QuadExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: QuadExtVarParams, +{ + fn from(other: Boolean) -> Self { + let c0 = BF::from(other); + let c1 = BF::zero(); + Self::new(c0, c1) + } +} + +impl<'a, BF, P> FieldOpsBounds<'a, QuadExtField

, QuadExtVar> for QuadExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ +} +impl<'a, BF, P> FieldOpsBounds<'a, QuadExtField

, QuadExtVar> for &'a QuadExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ +} + +impl FieldVar, P::BasePrimeField> for QuadExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: QuadExtVarParams, +{ + fn constant(other: QuadExtField

) -> Self { + let c0 = BF::constant(other.c0); + let c1 = BF::constant(other.c1); + Self::new(c0, c1) + } + + fn zero() -> Self { + let c0 = BF::zero(); + let c1 = BF::zero(); + Self::new(c0, c1) + } + + fn one() -> Self { + let c0 = BF::one(); + let c1 = BF::zero(); + Self::new(c0, c1) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn double(&self) -> Result { + let c0 = self.c0.double()?; + let c1 = self.c1.double()?; + Ok(Self::new(c0, c1)) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn negate(&self) -> Result { + let mut result = self.clone(); + result.c0.negate_in_place()?; + result.c1.negate_in_place()?; + Ok(result) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn square(&self) -> Result { + // From Libsnark/fp2_gadget.tcc + // Complex multiplication for Fp2: + // "Multiplication and Squaring on Pairing-Friendly Fields" + // Devegili, OhEigeartaigh, Scott, Dahab + + // v0 = c0 - c1 + let mut v0 = &self.c0 - &self.c1; + // v3 = c0 - beta * c1 + let v3 = &self.c0 - &Self::mul_base_field_by_nonresidue(&self.c1)?; + // v2 = c0 * c1 + let v2 = &self.c0 * &self.c1; + + // v0 = (v0 * v3) + v2 + v0 *= &v3; + v0 += &v2; + + let c0 = &v0 + &Self::mul_base_field_by_nonresidue(&v2)?; + let c1 = v2.double()?; + + Ok(Self::new(c0, c1)) + } + + #[tracing::instrument(target = "r1cs")] + fn mul_equals(&self, other: &Self, result: &Self) -> Result<(), SynthesisError> { + // Karatsuba multiplication for Fp2: + // v0 = A.c0 * B.c0 + // v1 = A.c1 * B.c1 + // result.c0 = v0 + non_residue * v1 + // result.c1 = (A.c0 + A.c1) * (B.c0 + B.c1) - v0 - v1 + // Enforced with 3 constraints: + // A.c1 * B.c1 = v1 + // A.c0 * B.c0 = result.c0 - non_residue * v1 + // (A.c0+A.c1)*(B.c0+B.c1) = result.c1 + result.c0 + (1 - non_residue) * v1 + // Reference: + // "Multiplication and Squaring on Pairing-Friendly Fields" + // Devegili, OhEigeartaigh, Scott, Dahab + // Compute v1 + let v1 = &self.c1 * &other.c1; + + // Perform second check + let non_residue_times_v1 = Self::mul_base_field_by_nonresidue(&v1)?; + let rhs = &result.c0 - &non_residue_times_v1; + self.c0.mul_equals(&other.c0, &rhs)?; + + // Last check + let a0_plus_a1 = &self.c0 + &self.c1; + let b0_plus_b1 = &other.c0 + &other.c1; + let one_minus_non_residue_v1 = &v1 - &non_residue_times_v1; + + let tmp = &(&result.c1 + &result.c0) + &one_minus_non_residue_v1; + a0_plus_a1.mul_equals(&b0_plus_b1, &tmp)?; + + Ok(()) + } + + #[tracing::instrument(target = "r1cs")] + fn frobenius_map(&self, power: usize) -> Result { + let mut result = self.clone(); + result.c0.frobenius_map_in_place(power)?; + result.c1.frobenius_map_in_place(power)?; + P::mul_base_field_var_by_frob_coeff(&mut result.c1, power); + Ok(result) + } + + #[tracing::instrument(target = "r1cs")] + fn inverse(&self) -> Result { + let mode = if self.is_constant() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + let inverse = Self::new_variable( + self.cs(), + || { + self.value() + .map(|f| f.inverse().unwrap_or_else(QuadExtField::zero)) + }, + mode, + )?; + self.mul_equals(&inverse, &Self::one())?; + Ok(inverse) + } +} + +impl_bounded_ops!( + QuadExtVar, + QuadExtField

, + Add, + add, + AddAssign, + add_assign, + |this: &'a QuadExtVar, other: &'a QuadExtVar| { + let c0 = &this.c0 + &other.c0; + let c1 = &this.c1 + &other.c1; + QuadExtVar::new(c0, c1) + }, + |this: &'a QuadExtVar, other: QuadExtField

| { + this + QuadExtVar::constant(other) + }, + (BF: FieldVar, P: QuadExtVarParams), + for <'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF> +); +impl_bounded_ops!( + QuadExtVar, + QuadExtField

, + Sub, + sub, + SubAssign, + sub_assign, + |this: &'a QuadExtVar, other: &'a QuadExtVar| { + let c0 = &this.c0 - &other.c0; + let c1 = &this.c1 - &other.c1; + QuadExtVar::new(c0, c1) + }, + |this: &'a QuadExtVar, other: QuadExtField

| { + this - QuadExtVar::constant(other) + }, + (BF: FieldVar, P: QuadExtVarParams), + for <'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF> +); +impl_bounded_ops!( + QuadExtVar, + QuadExtField

, + Mul, + mul, + MulAssign, + mul_assign, + |this: &'a QuadExtVar, other: &'a QuadExtVar| { + // Karatsuba multiplication for Fp2: + // v0 = A.c0 * B.c0 + // v1 = A.c1 * B.c1 + // result.c0 = v0 + non_residue * v1 + // result.c1 = (A.c0 + A.c1) * (B.c0 + B.c1) - v0 - v1 + // Enforced with 3 constraints: + // A.c1 * B.c1 = v1 + // A.c0 * B.c0 = result.c0 - non_residue * v1 + // (A.c0+A.c1)*(B.c0+B.c1) = result.c1 + result.c0 + (1 - non_residue) * v1 + // Reference: + // "Multiplication and Squaring on Pairing-Friendly Fields" + // Devegili, OhEigeartaigh, Scott, Dahab + let mut result = this.clone(); + let v0 = &this.c0 * &other.c0; + let v1 = &this.c1 * &other.c1; + + result.c1 += &this.c0; + result.c1 *= &other.c0 + &other.c1; + result.c1 -= &v0; + result.c1 -= &v1; + result.c0 = v0 + &QuadExtVar::::mul_base_field_by_nonresidue(&v1).unwrap(); + result + }, + |this: &'a QuadExtVar, other: QuadExtField

| { + this * QuadExtVar::constant(other) + }, + (BF: FieldVar, P: QuadExtVarParams), + for <'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF> +); + +impl EqGadget for QuadExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ + #[tracing::instrument(target = "r1cs")] + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + let b0 = self.c0.is_eq(&other.c0)?; + let b1 = self.c1.is_eq(&other.c1)?; + b0.and(&b1) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + self.c0.conditional_enforce_equal(&other.c0, condition)?; + self.c1.conditional_enforce_equal(&other.c1, condition)?; + Ok(()) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + condition: &Boolean, + ) -> Result<(), SynthesisError> { + let is_equal = self.is_eq(other)?; + is_equal + .and(condition)? + .enforce_equal(&Boolean::Constant(false)) + } +} + +impl ToBitsGadget for QuadExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bits_le(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_bits_le()?; + let mut c1 = self.c1.to_bits_le()?; + c0.append(&mut c1); + Ok(c0) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bits_le(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_non_unique_bits_le()?; + let mut c1 = self.c1.to_non_unique_bits_le()?; + c0.append(&mut c1); + Ok(c0) + } +} + +impl ToBytesGadget for QuadExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_bytes()?; + let mut c1 = self.c1.to_bytes()?; + c0.append(&mut c1); + Ok(c0) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut c0 = self.c0.to_non_unique_bytes()?; + let mut c1 = self.c1.to_non_unique_bytes()?; + c0.append(&mut c1); + Ok(c0) + } +} + +impl ToConstraintFieldGadget for QuadExtVar +where + BF: FieldVar, + for<'a> &'a BF: FieldOpsBounds<'a, P::BaseField, BF>, + P: QuadExtVarParams, + BF: ToConstraintFieldGadget, +{ + #[tracing::instrument(target = "r1cs")] + fn to_constraint_field(&self) -> Result>, SynthesisError> { + let mut res = Vec::new(); + + res.extend_from_slice(&self.c0.to_constraint_field()?); + res.extend_from_slice(&self.c1.to_constraint_field()?); + + Ok(res) + } +} + +impl CondSelectGadget for QuadExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ + #[inline] + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result { + let c0 = BF::conditionally_select(cond, &true_value.c0, &false_value.c0)?; + let c1 = BF::conditionally_select(cond, &true_value.c1, &false_value.c1)?; + Ok(Self::new(c0, c1)) + } +} + +impl TwoBitLookupGadget for QuadExtVar +where + BF: FieldVar + + TwoBitLookupGadget, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ + type TableConstant = QuadExtField

; + + #[tracing::instrument(target = "r1cs")] + fn two_bit_lookup( + b: &[Boolean], + c: &[Self::TableConstant], + ) -> Result { + let c0s = c.iter().map(|f| f.c0).collect::>(); + let c1s = c.iter().map(|f| f.c1).collect::>(); + let c0 = BF::two_bit_lookup(b, &c0s)?; + let c1 = BF::two_bit_lookup(b, &c1s)?; + Ok(Self::new(c0, c1)) + } +} + +impl ThreeBitCondNegLookupGadget for QuadExtVar +where + BF: FieldVar + + ThreeBitCondNegLookupGadget, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ + type TableConstant = QuadExtField

; + + #[tracing::instrument(target = "r1cs")] + fn three_bit_cond_neg_lookup( + b: &[Boolean], + b0b1: &Boolean, + c: &[Self::TableConstant], + ) -> Result { + let c0s = c.iter().map(|f| f.c0).collect::>(); + let c1s = c.iter().map(|f| f.c1).collect::>(); + let c0 = BF::three_bit_cond_neg_lookup(b, b0b1, &c0s)?; + let c1 = BF::three_bit_cond_neg_lookup(b, b0b1, &c1s)?; + Ok(Self::new(c0, c1)) + } +} + +impl AllocVar, P::BasePrimeField> for QuadExtVar +where + BF: FieldVar, + for<'b> &'b BF: FieldOpsBounds<'b, P::BaseField, BF>, + P: QuadExtVarParams, +{ + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let (c0, c1) = match f() { + Ok(fe) => (Ok(fe.borrow().c0), Ok(fe.borrow().c1)), + Err(_) => ( + Err(SynthesisError::AssignmentMissing), + Err(SynthesisError::AssignmentMissing), + ), + }; + + let c0 = BF::new_variable(ark_relations::ns!(cs, "c0"), || c0, mode)?; + let c1 = BF::new_variable(ark_relations::ns!(cs, "c1"), || c1, mode)?; + Ok(Self::new(c0, c1)) + } +} diff --git a/arkworks/r1cs-std/src/groups/curves/mod.rs b/arkworks/r1cs-std/src/groups/curves/mod.rs new file mode 100644 index 00000000..b0c12e97 --- /dev/null +++ b/arkworks/r1cs-std/src/groups/curves/mod.rs @@ -0,0 +1,9 @@ +/// This module generically implements arithmetic for Short +/// Weierstrass elliptic curves by following the complete formulae of +/// [[Renes, Costello, Batina 2015]](https://eprint.iacr.org/2015/1060). +pub mod short_weierstrass; + +/// This module generically implements arithmetic for Twisted +/// Edwards elliptic curves by following the complete formulae described in the +/// [EFD](https://www.hyperelliptic.org/EFD/g1p/auto-twisted.html). +pub mod twisted_edwards; diff --git a/arkworks/r1cs-std/src/groups/curves/short_weierstrass/bls12/mod.rs b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/bls12/mod.rs new file mode 100644 index 00000000..a9384a20 --- /dev/null +++ b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/bls12/mod.rs @@ -0,0 +1,247 @@ +use ark_ec::{ + bls12::{Bls12Parameters, G1Prepared, G2Prepared, TwistType}, + short_weierstrass_jacobian::GroupAffine, +}; +use ark_ff::{BitIteratorBE, Field, One}; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use crate::{ + fields::{fp::FpVar, fp2::Fp2Var, FieldVar}, + groups::curves::short_weierstrass::*, + Vec, +}; + +use core::fmt::Debug; + +/// Represents a projective point in G1. +pub type G1Var

= + ProjectiveVar<

::G1Parameters, FpVar<

::Fp>>; + +/// Represents an affine point on G1. Should be used only for comparison and +/// when a canonical representation of a point is required, and not for +/// arithmetic. +pub type G1AffineVar

= + AffineVar<

::G1Parameters, FpVar<

::Fp>>; + +/// Represents a projective point in G2. +pub type G2Var

= ProjectiveVar<

::G2Parameters, Fp2G

>; +/// Represents an affine point on G2. Should be used only for comparison and +/// when a canonical representation of a point is required, and not for +/// arithmetic. +pub type G2AffineVar

= AffineVar<

::G2Parameters, Fp2G

>; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +#[derive(Derivative)] +#[derivative(Clone(bound = "G1Var

: Clone"), Debug(bound = "G1Var

: Debug"))] +pub struct G1PreparedVar(pub AffineVar>); + +impl G1PreparedVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let x = self.0.x.value()?; + let y = self.0.y.value()?; + let infinity = self.0.infinity.value()?; + let g = GroupAffine::new(x, y, infinity); + Ok(g.into()) + } + + /// Constructs `Self` from a `G1Var`. + pub fn from_group_var(q: &G1Var

) -> Result { + let g = q.to_affine()?; + Ok(Self(g)) + } +} + +impl AllocVar, P::Fp> for G1PreparedVar

{ + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let g1_prep = f().map(|b| b.borrow().0); + + let x = FpVar::new_variable(ark_relations::ns!(cs, "x"), || g1_prep.map(|g| g.x), mode)?; + let y = FpVar::new_variable(ark_relations::ns!(cs, "y"), || g1_prep.map(|g| g.y), mode)?; + let infinity = Boolean::new_variable( + ark_relations::ns!(cs, "inf"), + || g1_prep.map(|g| g.infinity), + mode, + )?; + let g = AffineVar::new(x, y, infinity); + Ok(Self(g)) + } +} + +impl ToBytesGadget for G1PreparedVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut bytes = self.0.x.to_bytes()?; + let y_bytes = self.0.y.to_bytes()?; + let inf_bytes = self.0.infinity.to_bytes()?; + bytes.extend_from_slice(&y_bytes); + bytes.extend_from_slice(&inf_bytes); + Ok(bytes) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut bytes = self.0.x.to_non_unique_bytes()?; + let y_bytes = self.0.y.to_non_unique_bytes()?; + let inf_bytes = self.0.infinity.to_non_unique_bytes()?; + bytes.extend_from_slice(&y_bytes); + bytes.extend_from_slice(&inf_bytes); + Ok(bytes) + } +} + +type Fp2G

= Fp2Var<

::Fp2Params>; +type LCoeff

= (Fp2G

, Fp2G

); +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +#[derive(Derivative)] +#[derivative( + Clone(bound = "Fp2Var: Clone"), + Debug(bound = "Fp2Var: Debug") +)] +pub struct G2PreparedVar { + #[doc(hidden)] + pub ell_coeffs: Vec>, +} + +impl AllocVar, P::Fp> for G2PreparedVar

{ + #[tracing::instrument(target = "r1cs", skip(cs, f, mode))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let g2_prep = f().map(|b| { + let projective_coeffs = &b.borrow().ell_coeffs; + let mut z_s = projective_coeffs + .iter() + .map(|(_, _, z)| *z) + .collect::>(); + ark_ff::fields::batch_inversion(&mut z_s); + projective_coeffs + .iter() + .zip(z_s) + .map(|((x, y, _), z_inv)| (*x * &z_inv, *y * &z_inv)) + .collect::>() + }); + + let l = Vec::new_variable( + ark_relations::ns!(cs, "l"), + || { + g2_prep + .clone() + .map(|c| c.iter().map(|(l, _)| *l).collect::>()) + }, + mode, + )?; + let r = Vec::new_variable( + ark_relations::ns!(cs, "r"), + || g2_prep.map(|c| c.iter().map(|(_, r)| *r).collect::>()), + mode, + )?; + let ell_coeffs = l.into_iter().zip(r).collect(); + Ok(Self { ell_coeffs }) + } +} + +impl ToBytesGadget for G2PreparedVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut bytes = Vec::new(); + for coeffs in &self.ell_coeffs { + bytes.extend_from_slice(&coeffs.0.to_bytes()?); + bytes.extend_from_slice(&coeffs.1.to_bytes()?); + } + Ok(bytes) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut bytes = Vec::new(); + for coeffs in &self.ell_coeffs { + bytes.extend_from_slice(&coeffs.0.to_non_unique_bytes()?); + bytes.extend_from_slice(&coeffs.1.to_non_unique_bytes()?); + } + Ok(bytes) + } +} + +impl G2PreparedVar

{ + /// Constructs `Self` from a `G2Var`. + #[tracing::instrument(target = "r1cs")] + pub fn from_group_var(q: &G2Var

) -> Result { + let q = q.to_affine()?; + let two_inv = P::Fp::one().double().inverse().unwrap(); + // Enforce that `q` is not the point at infinity. + q.infinity.enforce_not_equal(&Boolean::Constant(true))?; + let mut ell_coeffs = vec![]; + let mut r = q.clone(); + + for i in BitIteratorBE::new(P::X).skip(1) { + ell_coeffs.push(Self::double(&mut r, &two_inv)?); + + if i { + ell_coeffs.push(Self::add(&mut r, &q)?); + } + } + + Ok(Self { ell_coeffs }) + } + + #[tracing::instrument(target = "r1cs")] + fn double(r: &mut G2AffineVar

, two_inv: &P::Fp) -> Result, SynthesisError> { + let a = r.y.inverse()?; + let mut b = r.x.square()?; + let b_tmp = b.clone(); + b.mul_assign_by_base_field_constant(*two_inv); + b += &b_tmp; + + let c = &a * &b; + let d = r.x.double()?; + let x3 = c.square()? - &d; + let e = &c * &r.x - &r.y; + let c_x3 = &c * &x3; + let y3 = &e - &c_x3; + let mut f = c; + f.negate_in_place()?; + r.x = x3; + r.y = y3; + match P::TWIST_TYPE { + TwistType::M => Ok((e, f)), + TwistType::D => Ok((f, e)), + } + } + + #[tracing::instrument(target = "r1cs")] + fn add(r: &mut G2AffineVar

, q: &G2AffineVar

) -> Result, SynthesisError> { + let a = (&q.x - &r.x).inverse()?; + let b = &q.y - &r.y; + let c = &a * &b; + let d = &r.x + &q.x; + let x3 = c.square()? - &d; + + let e = (&r.x - &x3) * &c; + let y3 = e - &r.y; + let g = &c * &r.x - &r.y; + let mut f = c; + f.negate_in_place()?; + r.x = x3; + r.y = y3; + match P::TWIST_TYPE { + TwistType::M => Ok((g, f)), + TwistType::D => Ok((f, g)), + } + } +} diff --git a/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mnt4/mod.rs b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mnt4/mod.rs new file mode 100644 index 00000000..bd0bdc6a --- /dev/null +++ b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mnt4/mod.rs @@ -0,0 +1,495 @@ +use ark_ec::mnt4::{ + g2::{AteAdditionCoefficients, AteDoubleCoefficients}, + G1Prepared, G2Prepared, MNT4Parameters, +}; +use ark_ff::Field; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use crate::{ + fields::{fp::FpVar, fp2::Fp2Var, FieldVar}, + groups::curves::short_weierstrass::ProjectiveVar, + pairing::mnt4::PairingVar, + prelude::*, + Vec, +}; +use core::borrow::Borrow; + +/// Represents a projective point in G1. +pub type G1Var

= + ProjectiveVar<

::G1Parameters, FpVar<

::Fp>>; + +/// Represents a projective point in G2. +pub type G2Var

= ProjectiveVar<

::G2Parameters, Fp2G

>; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))] +pub struct G1PreparedVar { + #[doc(hidden)] + pub x: FpVar, + #[doc(hidden)] + pub y: FpVar, + #[doc(hidden)] + pub x_twist: Fp2Var, + #[doc(hidden)] + pub y_twist: Fp2Var, +} + +impl AllocVar, P::Fp> for G1PreparedVar

{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let g1_prep = f().map(|b| *b.borrow()); + + let x = FpVar::new_variable(ark_relations::ns!(cs, "x"), || g1_prep.map(|g| g.x), mode)?; + let y = FpVar::new_variable(ark_relations::ns!(cs, "y"), || g1_prep.map(|g| g.y), mode)?; + let x_twist = Fp2Var::new_variable( + ark_relations::ns!(cs, "x_twist"), + || g1_prep.map(|g| g.x_twist), + mode, + )?; + let y_twist = Fp2Var::new_variable( + ark_relations::ns!(cs, "y_twist"), + || g1_prep.map(|g| g.y_twist), + mode, + )?; + Ok(Self { + x, + y, + x_twist, + y_twist, + }) + } +} + +impl G1PreparedVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let (x, y, x_twist, y_twist) = ( + self.x.value()?, + self.y.value()?, + self.x_twist.value()?, + self.y_twist.value()?, + ); + Ok(G1Prepared { + x, + y, + x_twist, + y_twist, + }) + } + + /// Constructs `Self` from a `G1Var`. + #[tracing::instrument(target = "r1cs")] + pub fn from_group_var(q: &G1Var

) -> Result { + let q = q.to_affine()?; + let x_twist = Fp2Var::new(&q.x * P::TWIST.c0, &q.x * P::TWIST.c1); + let y_twist = Fp2Var::new(&q.y * P::TWIST.c0, &q.y * P::TWIST.c1); + Ok(G1PreparedVar { + x: q.x, + y: q.y, + x_twist, + y_twist, + }) + } +} + +impl ToBytesGadget for G1PreparedVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_bytes()?; + let mut y = self.y.to_bytes()?; + let mut x_twist = self.x_twist.to_bytes()?; + let mut y_twist = self.y_twist.to_bytes()?; + + x.append(&mut y); + x.append(&mut x_twist); + x.append(&mut y_twist); + Ok(x) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_non_unique_bytes()?; + let mut y = self.y.to_non_unique_bytes()?; + let mut x_twist = self.x_twist.to_non_unique_bytes()?; + let mut y_twist = self.y_twist.to_non_unique_bytes()?; + + x.append(&mut y); + x.append(&mut x_twist); + x.append(&mut y_twist); + Ok(x) + } +} + +type Fp2G

= Fp2Var<

::Fp2Params>; + +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))] +pub struct G2PreparedVar { + #[doc(hidden)] + pub x: Fp2Var, + #[doc(hidden)] + pub y: Fp2Var, + #[doc(hidden)] + pub x_over_twist: Fp2Var, + #[doc(hidden)] + pub y_over_twist: Fp2Var, + #[doc(hidden)] + pub double_coefficients: Vec>, + #[doc(hidden)] + pub addition_coefficients: Vec>, +} + +impl AllocVar, P::Fp> for G2PreparedVar

{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let g2_prep = f().map(|b| b.borrow().clone()); + let g2 = g2_prep.as_ref().map_err(|e| *e); + + let x = Fp2Var::new_variable(ark_relations::ns!(cs, "x"), || g2.map(|g| g.x), mode)?; + let y = Fp2Var::new_variable(ark_relations::ns!(cs, "y"), || g2.map(|g| g.y), mode)?; + let x_over_twist = Fp2Var::new_variable( + ark_relations::ns!(cs, "x_over_twist"), + || g2.map(|g| g.x_over_twist), + mode, + )?; + let y_over_twist = Fp2Var::new_variable( + ark_relations::ns!(cs, "y_over_twist"), + || g2.map(|g| g.y_over_twist), + mode, + )?; + let double_coefficients = Vec::new_variable( + ark_relations::ns!(cs, "double coeffs"), + || g2.map(|g| g.double_coefficients.clone()), + mode, + )?; + let addition_coefficients = Vec::new_variable( + ark_relations::ns!(cs, "add coeffs"), + || g2.map(|g| g.addition_coefficients.clone()), + mode, + )?; + Ok(Self { + x, + y, + x_over_twist, + y_over_twist, + double_coefficients, + addition_coefficients, + }) + } +} + +impl ToBytesGadget for G2PreparedVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_bytes()?; + let mut y = self.y.to_bytes()?; + let mut x_over_twist = self.x_over_twist.to_bytes()?; + let mut y_over_twist = self.y_over_twist.to_bytes()?; + + x.append(&mut y); + x.append(&mut x_over_twist); + x.append(&mut y_over_twist); + + for coeff in &self.double_coefficients { + x.extend_from_slice(&coeff.to_bytes()?); + } + for coeff in &self.addition_coefficients { + x.extend_from_slice(&coeff.to_bytes()?); + } + Ok(x) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_non_unique_bytes()?; + let mut y = self.y.to_non_unique_bytes()?; + let mut x_over_twist = self.x_over_twist.to_non_unique_bytes()?; + let mut y_over_twist = self.y_over_twist.to_non_unique_bytes()?; + + x.append(&mut y); + x.append(&mut x_over_twist); + x.append(&mut y_over_twist); + + for coeff in &self.double_coefficients { + x.extend_from_slice(&coeff.to_non_unique_bytes()?); + } + for coeff in &self.addition_coefficients { + x.extend_from_slice(&coeff.to_non_unique_bytes()?); + } + Ok(x) + } +} + +impl G2PreparedVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let x = self.x.value()?; + let y = self.y.value()?; + let x_over_twist = self.x_over_twist.value()?; + let y_over_twist = self.y_over_twist.value()?; + let double_coefficients = self + .double_coefficients + .iter() + .map(|coeff| coeff.value()) + .collect::>, _>>()?; + let addition_coefficients = self + .addition_coefficients + .iter() + .map(|coeff| coeff.value()) + .collect::>, _>>()?; + Ok(G2Prepared { + x, + y, + x_over_twist, + y_over_twist, + double_coefficients, + addition_coefficients, + }) + } + + /// Constructs `Self` from a `G2Var`. + #[tracing::instrument(target = "r1cs")] + pub fn from_group_var(q: &G2Var

) -> Result { + let twist_inv = P::TWIST.inverse().unwrap(); + let q = q.to_affine()?; + + let mut g2p = G2PreparedVar { + x: q.x.clone(), + y: q.y.clone(), + x_over_twist: &q.x * twist_inv, + y_over_twist: &q.y * twist_inv, + double_coefficients: vec![], + addition_coefficients: vec![], + }; + + let mut r = G2ProjectiveExtendedVar { + x: q.x.clone(), + y: q.y.clone(), + z: Fp2G::

::one(), + t: Fp2G::

::one(), + }; + + for (idx, value) in P::ATE_LOOP_COUNT.iter().rev().enumerate() { + let mut tmp = *value; + let skip_extraneous_bits = 64 - value.leading_zeros(); + let mut v = Vec::with_capacity(16); + for i in 0..64 { + if idx == 0 && (i == 0 || i >= skip_extraneous_bits) { + continue; + } + v.push(tmp & 1 == 1); + tmp >>= 1; + } + + for bit in v.iter().rev() { + let (r2, coeff) = PairingVar::

::doubling_step_for_flipped_miller_loop(&r)?; + g2p.double_coefficients.push(coeff); + r = r2; + + if *bit { + let (r2, coeff) = PairingVar::

::mixed_addition_step_for_flipped_miller_loop( + &q.x, &q.y, &r, + )?; + g2p.addition_coefficients.push(coeff); + r = r2; + } + + tmp >>= 1; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let rz_inv = r.z.inverse()?; + let rz2_inv = rz_inv.square()?; + let rz3_inv = &rz_inv * &rz2_inv; + + let minus_r_affine_x = &r.x * &rz2_inv; + let minus_r_affine_y = r.y.negate()? * &rz3_inv; + + let add_result = PairingVar::

::mixed_addition_step_for_flipped_miller_loop( + &minus_r_affine_x, + &minus_r_affine_y, + &r, + )?; + g2p.addition_coefficients.push(add_result.1); + } + + Ok(g2p) + } +} + +#[doc(hidden)] +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))] +pub struct AteDoubleCoefficientsVar { + pub c_h: Fp2Var, + pub c_4c: Fp2Var, + pub c_j: Fp2Var, + pub c_l: Fp2Var, +} + +impl AllocVar, P::Fp> for AteDoubleCoefficientsVar

{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let c_prep = f().map(|c| c.borrow().clone()); + let c = c_prep.as_ref().map_err(|e| *e); + + let c_h = Fp2Var::new_variable(ark_relations::ns!(cs, "c_h"), || c.map(|c| c.c_h), mode)?; + let c_4c = + Fp2Var::new_variable(ark_relations::ns!(cs, "c_4c"), || c.map(|c| c.c_4c), mode)?; + let c_j = Fp2Var::new_variable(ark_relations::ns!(cs, "c_j"), || c.map(|c| c.c_j), mode)?; + let c_l = Fp2Var::new_variable(ark_relations::ns!(cs, "c_l"), || c.map(|c| c.c_l), mode)?; + Ok(Self { + c_h, + c_4c, + c_j, + c_l, + }) + } +} + +impl ToBytesGadget for AteDoubleCoefficientsVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut c_h = self.c_h.to_bytes()?; + let mut c_4c = self.c_4c.to_bytes()?; + let mut c_j = self.c_j.to_bytes()?; + let mut c_l = self.c_l.to_bytes()?; + + c_h.append(&mut c_4c); + c_h.append(&mut c_j); + c_h.append(&mut c_l); + Ok(c_h) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut c_h = self.c_h.to_non_unique_bytes()?; + let mut c_4c = self.c_4c.to_non_unique_bytes()?; + let mut c_j = self.c_j.to_non_unique_bytes()?; + let mut c_l = self.c_l.to_non_unique_bytes()?; + + c_h.append(&mut c_4c); + c_h.append(&mut c_j); + c_h.append(&mut c_l); + Ok(c_h) + } +} + +impl AteDoubleCoefficientsVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let (c_h, c_4c, c_j, c_l) = ( + self.c_l.value()?, + self.c_4c.value()?, + self.c_j.value()?, + self.c_l.value()?, + ); + Ok(AteDoubleCoefficients { + c_h, + c_4c, + c_j, + c_l, + }) + } +} + +#[doc(hidden)] +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT4Parameters"), Debug(bound = "P: MNT4Parameters"))] +pub struct AteAdditionCoefficientsVar { + pub c_l1: Fp2Var, + pub c_rz: Fp2Var, +} + +impl AllocVar, P::Fp> + for AteAdditionCoefficientsVar

+{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let c_prep = f().map(|c| c.borrow().clone()); + let c = c_prep.as_ref().map_err(|e| *e); + + let c_l1 = + Fp2Var::new_variable(ark_relations::ns!(cs, "c_l1"), || c.map(|c| c.c_l1), mode)?; + let c_rz = + Fp2Var::new_variable(ark_relations::ns!(cs, "c_rz"), || c.map(|c| c.c_rz), mode)?; + Ok(Self { c_l1, c_rz }) + } +} + +impl ToBytesGadget for AteAdditionCoefficientsVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut c_l1 = self.c_l1.to_bytes()?; + let mut c_rz = self.c_rz.to_bytes()?; + + c_l1.append(&mut c_rz); + Ok(c_l1) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut c_l1 = self.c_l1.to_non_unique_bytes()?; + let mut c_rz = self.c_rz.to_non_unique_bytes()?; + + c_l1.append(&mut c_rz); + Ok(c_l1) + } +} + +impl AteAdditionCoefficientsVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let (c_l1, c_rz) = (self.c_l1.value()?, self.c_rz.value()?); + Ok(AteAdditionCoefficients { c_l1, c_rz }) + } +} + +#[doc(hidden)] +pub struct G2ProjectiveExtendedVar { + pub x: Fp2Var, + pub y: Fp2Var, + pub z: Fp2Var, + pub t: Fp2Var, +} diff --git a/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mnt6/mod.rs b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mnt6/mod.rs new file mode 100644 index 00000000..8234230a --- /dev/null +++ b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mnt6/mod.rs @@ -0,0 +1,494 @@ +use ark_ec::mnt6::{ + g2::{AteAdditionCoefficients, AteDoubleCoefficients}, + G1Prepared, G2Prepared, MNT6Parameters, +}; +use ark_ff::Field; +use ark_relations::r1cs::{Namespace, SynthesisError}; + +use crate::{ + fields::{fp::FpVar, fp3::Fp3Var, FieldVar}, + groups::curves::short_weierstrass::ProjectiveVar, + pairing::mnt6::PairingVar, + prelude::*, + Vec, +}; +use core::borrow::Borrow; + +/// Represents a projective point in G1. +pub type G1Var

= + ProjectiveVar<

::G1Parameters, FpVar<

::Fp>>; + +/// Represents a projective point in G2. +pub type G2Var

= ProjectiveVar<

::G2Parameters, Fp3G

>; + +/// Represents the cached precomputation that can be performed on a G1 element +/// which enables speeding up pairing computation. +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))] +pub struct G1PreparedVar { + #[doc(hidden)] + pub x: FpVar, + #[doc(hidden)] + pub y: FpVar, + #[doc(hidden)] + pub x_twist: Fp3Var, + #[doc(hidden)] + pub y_twist: Fp3Var, +} + +impl G1PreparedVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let x = self.x.value()?; + let y = self.y.value()?; + let x_twist = self.x_twist.value()?; + let y_twist = self.y_twist.value()?; + Ok(G1Prepared { + x, + y, + x_twist, + y_twist, + }) + } + + /// Constructs `Self` from a `G1Var`. + #[tracing::instrument(target = "r1cs")] + pub fn from_group_var(q: &G1Var

) -> Result { + let q = q.to_affine()?; + let zero = FpVar::::zero(); + let x_twist = Fp3Var::new(q.x.clone(), zero.clone(), zero.clone()) * P::TWIST; + let y_twist = Fp3Var::new(q.y.clone(), zero.clone(), zero) * P::TWIST; + let result = G1PreparedVar { + x: q.x, + y: q.y, + x_twist, + y_twist, + }; + Ok(result) + } +} + +impl AllocVar, P::Fp> for G1PreparedVar

{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let g1_prep = f().map(|b| *b.borrow()); + + let x = FpVar::new_variable(ark_relations::ns!(cs, "x"), || g1_prep.map(|g| g.x), mode)?; + let y = FpVar::new_variable(ark_relations::ns!(cs, "y"), || g1_prep.map(|g| g.y), mode)?; + let x_twist = Fp3Var::new_variable( + ark_relations::ns!(cs, "x_twist"), + || g1_prep.map(|g| g.x_twist), + mode, + )?; + let y_twist = Fp3Var::new_variable( + ark_relations::ns!(cs, "y_twist"), + || g1_prep.map(|g| g.y_twist), + mode, + )?; + Ok(Self { + x, + y, + x_twist, + y_twist, + }) + } +} + +impl ToBytesGadget for G1PreparedVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_bytes()?; + let mut y = self.y.to_bytes()?; + let mut x_twist = self.x_twist.to_bytes()?; + let mut y_twist = self.y_twist.to_bytes()?; + + x.append(&mut y); + x.append(&mut x_twist); + x.append(&mut y_twist); + Ok(x) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_non_unique_bytes()?; + let mut y = self.y.to_non_unique_bytes()?; + let mut x_twist = self.x_twist.to_non_unique_bytes()?; + let mut y_twist = self.y_twist.to_non_unique_bytes()?; + + x.append(&mut y); + x.append(&mut x_twist); + x.append(&mut y_twist); + Ok(x) + } +} + +type Fp3G

= Fp3Var<

::Fp3Params>; + +/// Represents the cached precomputation that can be performed on a G2 element +/// which enables speeding up pairing computation. +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))] +pub struct G2PreparedVar { + #[doc(hidden)] + pub x: Fp3Var, + #[doc(hidden)] + pub y: Fp3Var, + #[doc(hidden)] + pub x_over_twist: Fp3Var, + #[doc(hidden)] + pub y_over_twist: Fp3Var, + #[doc(hidden)] + pub double_coefficients: Vec>, + #[doc(hidden)] + pub addition_coefficients: Vec>, +} + +impl AllocVar, P::Fp> for G2PreparedVar

{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let g2_prep = f().map(|b| b.borrow().clone()); + let g2 = g2_prep.as_ref().map_err(|e| *e); + + let x = Fp3Var::new_variable(ark_relations::ns!(cs, "x"), || g2.map(|g| g.x), mode)?; + let y = Fp3Var::new_variable(ark_relations::ns!(cs, "y"), || g2.map(|g| g.y), mode)?; + let x_over_twist = Fp3Var::new_variable( + ark_relations::ns!(cs, "x_over_twist"), + || g2.map(|g| g.x_over_twist), + mode, + )?; + let y_over_twist = Fp3Var::new_variable( + ark_relations::ns!(cs, "y_over_twist"), + || g2.map(|g| g.y_over_twist), + mode, + )?; + let double_coefficients = Vec::new_variable( + ark_relations::ns!(cs, "double coeffs"), + || g2.map(|g| g.double_coefficients.clone()), + mode, + )?; + let addition_coefficients = Vec::new_variable( + ark_relations::ns!(cs, "add coeffs"), + || g2.map(|g| g.addition_coefficients.clone()), + mode, + )?; + Ok(Self { + x, + y, + x_over_twist, + y_over_twist, + double_coefficients, + addition_coefficients, + }) + } +} + +impl ToBytesGadget for G2PreparedVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_bytes()?; + let mut y = self.y.to_bytes()?; + let mut x_over_twist = self.x_over_twist.to_bytes()?; + let mut y_over_twist = self.y_over_twist.to_bytes()?; + + x.append(&mut y); + x.append(&mut x_over_twist); + x.append(&mut y_over_twist); + + for coeff in self.double_coefficients.iter() { + x.extend_from_slice(&coeff.to_bytes()?); + } + for coeff in self.addition_coefficients.iter() { + x.extend_from_slice(&coeff.to_bytes()?); + } + Ok(x) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut x = self.x.to_non_unique_bytes()?; + let mut y = self.y.to_non_unique_bytes()?; + let mut x_over_twist = self.x_over_twist.to_non_unique_bytes()?; + let mut y_over_twist = self.y_over_twist.to_non_unique_bytes()?; + + x.append(&mut y); + x.append(&mut x_over_twist); + x.append(&mut y_over_twist); + + for coeff in self.double_coefficients.iter() { + x.extend_from_slice(&coeff.to_non_unique_bytes()?); + } + for coeff in self.addition_coefficients.iter() { + x.extend_from_slice(&coeff.to_non_unique_bytes()?); + } + Ok(x) + } +} + +impl G2PreparedVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let x = self.x.value()?; + let y = self.y.value()?; + let x_over_twist = self.x_over_twist.value()?; + let y_over_twist = self.y_over_twist.value()?; + let double_coefficients = self + .double_coefficients + .iter() + .map(|coeff| coeff.value()) + .collect::, SynthesisError>>()?; + let addition_coefficients = self + .addition_coefficients + .iter() + .map(|coeff| coeff.value()) + .collect::, SynthesisError>>()?; + Ok(G2Prepared { + x, + y, + x_over_twist, + y_over_twist, + double_coefficients, + addition_coefficients, + }) + } + + /// Constructs `Self` from a `G2Var`. + #[tracing::instrument(target = "r1cs")] + pub fn from_group_var(q: &G2Var

) -> Result { + let q = q.to_affine()?; + let twist_inv = P::TWIST.inverse().unwrap(); + + let mut g2p = G2PreparedVar { + x: q.x.clone(), + y: q.y.clone(), + x_over_twist: &q.x * twist_inv, + y_over_twist: &q.y * twist_inv, + double_coefficients: vec![], + addition_coefficients: vec![], + }; + + let mut r = G2ProjectiveExtendedVar { + x: q.x.clone(), + y: q.y.clone(), + z: Fp3G::

::one(), + t: Fp3G::

::one(), + }; + + for (idx, value) in P::ATE_LOOP_COUNT.iter().rev().enumerate() { + let mut tmp = *value; + let skip_extraneous_bits = 64 - value.leading_zeros(); + let mut v = Vec::with_capacity(16); + for i in 0..64 { + if idx == 0 && (i == 0 || i >= skip_extraneous_bits) { + continue; + } + v.push(tmp & 1 == 1); + tmp >>= 1; + } + + for bit in v.iter().rev() { + let (r2, coeff) = PairingVar::

::doubling_step_for_flipped_miller_loop(&r)?; + g2p.double_coefficients.push(coeff); + r = r2; + + if *bit { + let (r2, coeff) = PairingVar::

::mixed_addition_step_for_flipped_miller_loop( + &q.x, &q.y, &r, + )?; + g2p.addition_coefficients.push(coeff); + r = r2; + } + + tmp >>= 1; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let rz_inv = r.z.inverse()?; + let rz2_inv = rz_inv.square()?; + let rz3_inv = &rz_inv * &rz2_inv; + + let minus_r_affine_x = &r.x * &rz2_inv; + let minus_r_affine_y = r.y.negate()? * &rz3_inv; + + let add_result = PairingVar::

::mixed_addition_step_for_flipped_miller_loop( + &minus_r_affine_x, + &minus_r_affine_y, + &r, + )?; + g2p.addition_coefficients.push(add_result.1); + } + + Ok(g2p) + } +} + +#[doc(hidden)] +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))] +pub struct AteDoubleCoefficientsVar { + pub c_h: Fp3Var, + pub c_4c: Fp3Var, + pub c_j: Fp3Var, + pub c_l: Fp3Var, +} + +impl AllocVar, P::Fp> for AteDoubleCoefficientsVar

{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let c_prep = f().map(|c| c.borrow().clone()); + let c = c_prep.as_ref().map_err(|e| *e); + + let c_h = Fp3Var::new_variable(ark_relations::ns!(cs, "c_h"), || c.map(|c| c.c_h), mode)?; + let c_4c = + Fp3Var::new_variable(ark_relations::ns!(cs, "c_4c"), || c.map(|c| c.c_4c), mode)?; + let c_j = Fp3Var::new_variable(ark_relations::ns!(cs, "c_j"), || c.map(|c| c.c_j), mode)?; + let c_l = Fp3Var::new_variable(ark_relations::ns!(cs, "c_l"), || c.map(|c| c.c_l), mode)?; + Ok(Self { + c_h, + c_4c, + c_j, + c_l, + }) + } +} + +impl ToBytesGadget for AteDoubleCoefficientsVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut c_h = self.c_h.to_bytes()?; + let mut c_4c = self.c_4c.to_bytes()?; + let mut c_j = self.c_j.to_bytes()?; + let mut c_l = self.c_l.to_bytes()?; + + c_h.append(&mut c_4c); + c_h.append(&mut c_j); + c_h.append(&mut c_l); + Ok(c_h) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut c_h = self.c_h.to_non_unique_bytes()?; + let mut c_4c = self.c_4c.to_non_unique_bytes()?; + let mut c_j = self.c_j.to_non_unique_bytes()?; + let mut c_l = self.c_l.to_non_unique_bytes()?; + + c_h.append(&mut c_4c); + c_h.append(&mut c_j); + c_h.append(&mut c_l); + Ok(c_h) + } +} + +impl AteDoubleCoefficientsVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let c_h = self.c_h.value()?; + let c_4c = self.c_4c.value()?; + let c_j = self.c_j.value()?; + let c_l = self.c_l.value()?; + Ok(AteDoubleCoefficients { + c_h, + c_4c, + c_j, + c_l, + }) + } +} + +#[doc(hidden)] +#[derive(Derivative)] +#[derivative(Clone(bound = "P: MNT6Parameters"), Debug(bound = "P: MNT6Parameters"))] +pub struct AteAdditionCoefficientsVar { + pub c_l1: Fp3Var, + pub c_rz: Fp3Var, +} + +impl AllocVar, P::Fp> + for AteAdditionCoefficientsVar

+{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let c_prep = f().map(|c| c.borrow().clone()); + let c = c_prep.as_ref().map_err(|e| *e); + + let c_l1 = + Fp3Var::new_variable(ark_relations::ns!(cs, "c_l1"), || c.map(|c| c.c_l1), mode)?; + let c_rz = + Fp3Var::new_variable(ark_relations::ns!(cs, "c_rz"), || c.map(|c| c.c_rz), mode)?; + Ok(Self { c_l1, c_rz }) + } +} + +impl ToBytesGadget for AteAdditionCoefficientsVar

{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn to_bytes(&self) -> Result>, SynthesisError> { + let mut c_l1 = self.c_l1.to_bytes()?; + let mut c_rz = self.c_rz.to_bytes()?; + + c_l1.append(&mut c_rz); + Ok(c_l1) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes(&self) -> Result>, SynthesisError> { + let mut c_l1 = self.c_l1.to_non_unique_bytes()?; + let mut c_rz = self.c_rz.to_non_unique_bytes()?; + + c_l1.append(&mut c_rz); + Ok(c_l1) + } +} + +impl AteAdditionCoefficientsVar

{ + /// Returns the value assigned to `self` in the underlying constraint + /// system. + pub fn value(&self) -> Result, SynthesisError> { + let c_l1 = self.c_l1.value()?; + let c_rz = self.c_rz.value()?; + Ok(AteAdditionCoefficients { c_l1, c_rz }) + } +} + +#[doc(hidden)] +pub struct G2ProjectiveExtendedVar { + pub x: Fp3Var, + pub y: Fp3Var, + pub z: Fp3Var, + pub t: Fp3Var, +} diff --git a/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mod.rs b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mod.rs new file mode 100644 index 00000000..cf023c44 --- /dev/null +++ b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/mod.rs @@ -0,0 +1,947 @@ +use ark_ec::{ + short_weierstrass_jacobian::{GroupAffine as SWAffine, GroupProjective as SWProjective}, + AffineCurve, ProjectiveCurve, SWModelParameters, +}; +use ark_ff::{BigInteger, BitIteratorBE, Field, One, PrimeField, Zero}; +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; +use core::{borrow::Borrow, marker::PhantomData}; +use non_zero_affine::NonZeroAffineVar; + +use crate::{fields::fp::FpVar, prelude::*, ToConstraintFieldGadget, Vec}; + +/// This module provides a generic implementation of G1 and G2 for +/// the [\[BLS12]\]() family of bilinear groups. +pub mod bls12; + +/// This module provides a generic implementation of G1 and G2 for +/// the [\[MNT4]\]() +/// family of bilinear groups. +pub mod mnt4; +/// This module provides a generic implementation of G1 and G2 for +/// the [\[MNT6]\]() +/// family of bilinear groups. +pub mod mnt6; + +mod non_zero_affine; +/// An implementation of arithmetic for Short Weierstrass curves that relies on +/// the complete formulae derived in the paper of +/// [[Renes, Costello, Batina 2015]](). +#[derive(Derivative)] +#[derivative(Debug, Clone)] +#[must_use] +pub struct ProjectiveVar< + P: SWModelParameters, + F: FieldVar::BasePrimeField>, +> where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// The x-coordinate. + pub x: F, + /// The y-coordinate. + pub y: F, + /// The z-coordinate. + pub z: F, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +/// An affine representation of a curve point. +#[derive(Derivative)] +#[derivative(Debug, Clone)] +#[must_use] +pub struct AffineVar< + P: SWModelParameters, + F: FieldVar::BasePrimeField>, +> where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// The x-coordinate. + pub x: F, + /// The y-coordinate. + pub y: F, + /// Is `self` the point at infinity. + pub infinity: Boolean<::BasePrimeField>, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl AffineVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + fn new(x: F, y: F, infinity: Boolean<::BasePrimeField>) -> Self { + Self { + x, + y, + infinity, + _params: PhantomData, + } + } + + /// Returns the value assigned to `self` in the underlying + /// constraint system. + pub fn value(&self) -> Result, SynthesisError> { + Ok(SWAffine::new( + self.x.value()?, + self.y.value()?, + self.infinity.value()?, + )) + } +} + +impl ToConstraintFieldGadget<::BasePrimeField> for AffineVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, + F: ToConstraintFieldGadget<::BasePrimeField>, +{ + fn to_constraint_field( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let mut res = Vec::::BasePrimeField>>::new(); + + res.extend_from_slice(&self.x.to_constraint_field()?); + res.extend_from_slice(&self.y.to_constraint_field()?); + res.extend_from_slice(&self.infinity.to_constraint_field()?); + + Ok(res) + } +} + +impl R1CSVar<::BasePrimeField> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + type Value = SWProjective

; + + fn cs(&self) -> ConstraintSystemRef<::BasePrimeField> { + self.x.cs().or(self.y.cs()).or(self.z.cs()) + } + + fn value(&self) -> Result { + let (x, y, z) = (self.x.value()?, self.y.value()?, self.z.value()?); + let result = if let Some(z_inv) = z.inverse() { + SWAffine::new(x * &z_inv, y * &z_inv, false) + } else { + SWAffine::zero() + }; + Ok(result.into()) + } +} + +impl::BasePrimeField>> + ProjectiveVar +where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// Constructs `Self` from an `(x, y, z)` coordinate triple. + pub fn new(x: F, y: F, z: F) -> Self { + Self { + x, + y, + z, + _params: PhantomData, + } + } + + /// Convert this point into affine form. + #[tracing::instrument(target = "r1cs")] + pub fn to_affine(&self) -> Result, SynthesisError> { + if self.is_constant() { + let point = self.value()?.into_affine(); + let x = F::new_constant(ConstraintSystemRef::None, point.x)?; + let y = F::new_constant(ConstraintSystemRef::None, point.y)?; + let infinity = Boolean::constant(point.infinity); + Ok(AffineVar::new(x, y, infinity)) + } else { + let cs = self.cs(); + let infinity = self.is_zero()?; + let zero_x = F::zero(); + let zero_y = F::one(); + // Allocate a variable whose value is either `self.z.inverse()` if the inverse exists, + // and is zero otherwise. + let z_inv = F::new_witness(ark_relations::ns!(cs, "z_inverse"), || { + Ok(self.z.value()?.inverse().unwrap_or_else(P::BaseField::zero)) + })?; + // The inverse exists if `!self.is_zero()`. + // This means that `z_inv * self.z = 1` if `self.is_not_zero()`, and + // `z_inv * self.z = 0` if `self.is_zero()`. + // + // Thus, `z_inv * self.z = !self.is_zero()`. + z_inv.mul_equals(&self.z, &F::from(infinity.not()))?; + + let non_zero_x = &self.x * &z_inv; + let non_zero_y = &self.y * &z_inv; + + let x = infinity.select(&zero_x, &non_zero_x)?; + let y = infinity.select(&zero_y, &non_zero_y)?; + + Ok(AffineVar::new(x, y, infinity)) + } + } + + /// Allocates a new variable without performing an on-curve check, which is + /// useful if the variable is known to be on the curve (eg., if the point + /// is a constant or is a public input). + #[tracing::instrument(target = "r1cs", skip(cs, f))] + pub fn new_variable_omit_on_curve_check( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, SynthesisError>, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let (x, y, z) = match f() { + Ok(ge) => { + let ge = ge.into_affine(); + if ge.is_zero() { + ( + Ok(P::BaseField::zero()), + Ok(P::BaseField::one()), + Ok(P::BaseField::zero()), + ) + } else { + (Ok(ge.x), Ok(ge.y), Ok(P::BaseField::one())) + } + } + _ => ( + Err(SynthesisError::AssignmentMissing), + Err(SynthesisError::AssignmentMissing), + Err(SynthesisError::AssignmentMissing), + ), + }; + + let x = F::new_variable(ark_relations::ns!(cs, "x"), || x, mode)?; + let y = F::new_variable(ark_relations::ns!(cs, "y"), || y, mode)?; + let z = F::new_variable(ark_relations::ns!(cs, "z"), || z, mode)?; + + Ok(Self::new(x, y, z)) + } + + /// Mixed addition, which is useful when `other = (x2, y2)` is known to have z = 1. + #[tracing::instrument(target = "r1cs", skip(self, other))] + pub(crate) fn add_mixed(&self, other: &NonZeroAffineVar) -> Result { + // Complete mixed addition formula from Renes-Costello-Batina 2015 + // Algorithm 2 + // (https://eprint.iacr.org/2015/1060). + // Below, comments at the end of a line denote the corresponding + // step(s) of the algorithm + // + // Adapted from code in + // https://github.com/RustCrypto/elliptic-curves/blob/master/p256/src/arithmetic/projective.rs + let three_b = P::COEFF_B.double() + &P::COEFF_B; + let (x1, y1, z1) = (&self.x, &self.y, &self.z); + let (x2, y2) = (&other.x, &other.y); + + let xx = x1 * x2; // 1 + let yy = y1 * y2; // 2 + let xy_pairs = ((x1 + y1) * &(x2 + y2)) - (&xx + &yy); // 4, 5, 6, 7, 8 + let xz_pairs = (x2 * z1) + x1; // 8, 9 + let yz_pairs = (y2 * z1) + y1; // 10, 11 + + let axz = mul_by_coeff_a::(&xz_pairs); // 12 + + let bz3_part = &axz + z1 * three_b; // 13, 14 + + let yy_m_bz3 = &yy - &bz3_part; // 15 + let yy_p_bz3 = &yy + &bz3_part; // 16 + + let azz = mul_by_coeff_a::(z1); // 20 + let xx3_p_azz = xx.double().unwrap() + &xx + &azz; // 18, 19, 22 + + let bxz3 = &xz_pairs * three_b; // 21 + let b3_xz_pairs = mul_by_coeff_a::(&(&xx - &azz)) + &bxz3; // 23, 24, 25 + + let x = (&yy_m_bz3 * &xy_pairs) - &yz_pairs * &b3_xz_pairs; // 28,29, 30 + let y = (&yy_p_bz3 * &yy_m_bz3) + &xx3_p_azz * b3_xz_pairs; // 17, 26, 27 + let z = (&yy_p_bz3 * &yz_pairs) + xy_pairs * xx3_p_azz; // 31, 32, 33 + + Ok(ProjectiveVar::new(x, y, z)) + } + + /// Computes a scalar multiplication with a little-endian scalar of size `P::ScalarField::MODULUS_BITS`. + #[tracing::instrument( + target = "r1cs", + skip(self, mul_result, multiple_of_power_of_two, bits) + )] + fn fixed_scalar_mul_le( + &self, + mul_result: &mut Self, + multiple_of_power_of_two: &mut NonZeroAffineVar, + bits: &[&Boolean<::BasePrimeField>], + ) -> Result<(), SynthesisError> { + let scalar_modulus_bits = ::size_in_bits(); + + assert!(scalar_modulus_bits >= bits.len()); + let split_len = ark_std::cmp::min(scalar_modulus_bits - 2, bits.len()); + let (affine_bits, proj_bits) = bits.split_at(split_len); + // Computes the standard little-endian double-and-add algorithm + // (Algorithm 3.26, Guide to Elliptic Curve Cryptography) + // + // We rely on *incomplete* affine formulae for partially computing this. + // However, we avoid exceptional edge cases because we partition the scalar + // into two chunks: one guaranteed to be less than p - 2, and the rest. + // We only use incomplete formulae for the first chunk, which means we avoid exceptions: + // + // `add_unchecked(a, b)` is incomplete when either `b.is_zero()`, or when + // `b = ±a`. During scalar multiplication, we don't hit either case: + // * `b = ±a`: `b = accumulator = k * a`, where `2 <= k < p - 1`. + // This implies that `k != p ± 1`, and so `b != (p ± 1) * a`. + // Because the group is finite, this in turn means that `b != ±a`, as required. + // * `a` or `b` is zero: for `a`, we handle the zero case after the loop; for `b`, notice + // that it is monotonically increasing, and furthermore, equals `k * a`, where + // `k != p = 0 mod p`. + + // Unlike normal double-and-add, here we start off with a non-zero `accumulator`, + // because `NonZeroAffineVar::add_unchecked` doesn't support addition with `zero`. + // In more detail, we initialize `accumulator` to be the initial value of + // `multiple_of_power_of_two`. This ensures that all unchecked additions of `accumulator` + // with later values of `multiple_of_power_of_two` are safe. + // However, to do this correctly, we need to perform two steps: + // * We must skip the LSB, and instead proceed assuming that it was 1. Later, we will + // conditionally subtract the initial value of `accumulator`: + // if LSB == 0: subtract initial_acc_value; else, subtract 0. + // * Because we are assuming the first bit, we must double `multiple_of_power_of_two`. + + let mut accumulator = multiple_of_power_of_two.clone(); + let initial_acc_value = accumulator.into_projective(); + + // The powers start at 2 (instead of 1) because we're skipping the first bit. + multiple_of_power_of_two.double_in_place()?; + + // As mentioned, we will skip the LSB, and will later handle it via a conditional subtraction. + for bit in affine_bits.iter().skip(1) { + if bit.is_constant() { + if *bit == &Boolean::TRUE { + accumulator = accumulator.add_unchecked(&multiple_of_power_of_two)?; + } + } else { + let temp = accumulator.add_unchecked(&multiple_of_power_of_two)?; + accumulator = bit.select(&temp, &accumulator)?; + } + multiple_of_power_of_two.double_in_place()?; + } + // Perform conditional subtraction: + + // We can convert to projective safely because the result is guaranteed to be non-zero + // by the condition on `affine_bits.len()`, and by the fact that `accumulator` is non-zero + let result = accumulator.into_projective(); + // If bits[0] is 0, then we have to subtract `self`; else, we subtract zero. + let subtrahend = bits[0].select(&Self::zero(), &initial_acc_value)?; + *mul_result += result - subtrahend; + + // Now, let's finish off the rest of the bits using our complete formulae + for bit in proj_bits { + if bit.is_constant() { + if *bit == &Boolean::TRUE { + *mul_result += &multiple_of_power_of_two.into_projective(); + } + } else { + let temp = &*mul_result + &multiple_of_power_of_two.into_projective(); + *mul_result = bit.select(&temp, &mul_result)?; + } + multiple_of_power_of_two.double_in_place()?; + } + Ok(()) + } +} + +impl CurveVar, ::BasePrimeField> + for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + fn constant(g: SWProjective

) -> Self { + let cs = ConstraintSystemRef::None; + Self::new_variable_omit_on_curve_check(cs, || Ok(g), AllocationMode::Constant).unwrap() + } + + fn zero() -> Self { + Self::new(F::zero(), F::one(), F::zero()) + } + + fn is_zero(&self) -> Result::BasePrimeField>, SynthesisError> { + self.z.is_zero() + } + + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable_omit_prime_order_check( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, SynthesisError>, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + // Curve equation in projective form: + // E: Y² * Z = X³ + aX * Z² + bZ³ + // + // This can be re-written as + // E: Y² * Z - bZ³ = X³ + aX * Z² + // E: Z * (Y² - bZ²) = X * (X² + aZ²) + // so, compute X², Y², Z², + // compute temp = X * (X² + aZ²) + // check Z.mul_equals((Y² - bZ²), temp) + // + // A total of 5 multiplications + + let g = Self::new_variable_omit_on_curve_check(cs, f, mode)?; + + if mode != AllocationMode::Constant { + // Perform on-curve check. + let b = P::COEFF_B; + let a = P::COEFF_A; + + let x2 = g.x.square()?; + let y2 = g.y.square()?; + let z2 = g.z.square()?; + let t = &g.x * (x2 + &z2 * a); + + g.z.mul_equals(&(y2 - z2 * b), &t)?; + } + Ok(g) + } + + /// Enforce that `self` is in the prime-order subgroup. + /// + /// Does so by multiplying by the prime order, and checking that the result + /// is unchanged. + // TODO: at the moment this doesn't work, because the addition and doubling + // formulae are incomplete for even-order points. + #[tracing::instrument(target = "r1cs")] + fn enforce_prime_order(&self) -> Result<(), SynthesisError> { + unimplemented!("cannot enforce prime order"); + // let r_minus_1 = (-P::ScalarField::one()).into_repr(); + + // let mut result = Self::zero(); + // for b in BitIteratorBE::without_leading_zeros(r_minus_1) { + // result.double_in_place()?; + + // if b { + // result += self; + // } + // } + // self.negate()?.enforce_equal(&result)?; + // Ok(()) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn double_in_place(&mut self) -> Result<(), SynthesisError> { + // Complete doubling formula from Renes-Costello-Batina 2015 + // Algorithm 3 + // (https://eprint.iacr.org/2015/1060). + // Below, comments at the end of a line denote the corresponding + // step(s) of the algorithm + // + // Adapted from code in + // https://github.com/RustCrypto/elliptic-curves/blob/master/p256/src/arithmetic/projective.rs + let three_b = P::COEFF_B.double() + &P::COEFF_B; + + let xx = self.x.square()?; // 1 + let yy = self.y.square()?; // 2 + let zz = self.z.square()?; // 3 + let xy2 = (&self.x * &self.y).double()?; // 4, 5 + let xz2 = (&self.x * &self.z).double()?; // 6, 7 + + let axz2 = mul_by_coeff_a::(&xz2); // 8 + + let bzz3_part = &axz2 + &zz * three_b; // 9, 10 + let yy_m_bzz3 = &yy - &bzz3_part; // 11 + let yy_p_bzz3 = &yy + &bzz3_part; // 12 + let y_frag = yy_p_bzz3 * &yy_m_bzz3; // 13 + let x_frag = yy_m_bzz3 * &xy2; // 14 + + let bxz3 = xz2 * three_b; // 15 + let azz = mul_by_coeff_a::(&zz); // 16 + let b3_xz_pairs = mul_by_coeff_a::(&(&xx - &azz)) + &bxz3; // 15, 16, 17, 18, 19 + let xx3_p_azz = (xx.double()? + &xx + &azz) * &b3_xz_pairs; // 23, 24, 25 + + let y = y_frag + &xx3_p_azz; // 26, 27 + let yz2 = (&self.y * &self.z).double()?; // 28, 29 + let x = x_frag - &(b3_xz_pairs * &yz2); // 30, 31 + let z = (yz2 * &yy).double()?.double()?; // 32, 33, 34 + self.x = x; + self.y = y; + self.z = z; + Ok(()) + } + + #[tracing::instrument(target = "r1cs")] + fn negate(&self) -> Result { + Ok(Self::new(self.x.clone(), self.y.negate()?, self.z.clone())) + } + + /// Computes `bits * self`, where `bits` is a little-endian + /// `Boolean` representation of a scalar. + #[tracing::instrument(target = "r1cs", skip(bits))] + fn scalar_mul_le<'a>( + &self, + bits: impl Iterator::BasePrimeField>>, + ) -> Result { + if self.is_constant() { + if self.value().unwrap().is_zero() { + return Ok(self.clone()); + } + } + let self_affine = self.to_affine()?; + let (x, y, infinity) = (self_affine.x, self_affine.y, self_affine.infinity); + // We first handle the non-zero case, and then later + // will conditionally select zero if `self` was zero. + let non_zero_self = NonZeroAffineVar::new(x, y); + + let mut bits = bits.collect::>(); + if bits.len() == 0 { + return Ok(Self::zero()); + } + // Remove unnecessary constant zeros in the most-significant positions. + bits = bits + .into_iter() + // We iterate from the MSB down. + .rev() + // Skip leading zeros, if they are constants. + .skip_while(|b| b.is_constant() && (b.value().unwrap() == false)) + .collect(); + // After collecting we are in big-endian form; we have to reverse to get back to + // little-endian. + bits.reverse(); + + let scalar_modulus_bits = ::size_in_bits(); + let mut mul_result = Self::zero(); + let mut power_of_two_times_self = non_zero_self; + // We chunk up `bits` into `p`-sized chunks. + for bits in bits.chunks(scalar_modulus_bits) { + self.fixed_scalar_mul_le(&mut mul_result, &mut power_of_two_times_self, bits)?; + } + + // The foregoing algorithm relies on incomplete addition, and so does not + // work when the input (`self`) is zero. We hence have to perform + // a check to ensure that if the input is zero, then so is the output. + // The cost of this check should be less than the benefit of using + // mixed addition in almost all cases. + infinity.select(&Self::zero(), &mul_result) + } + + #[tracing::instrument(target = "r1cs", skip(scalar_bits_with_bases))] + fn precomputed_base_scalar_mul_le<'a, I, B>( + &mut self, + scalar_bits_with_bases: I, + ) -> Result<(), SynthesisError> + where + I: Iterator)>, + B: Borrow::BasePrimeField>>, + { + // We just ignore the provided bases and use the faster scalar multiplication. + let (bits, bases): (Vec<_>, Vec<_>) = scalar_bits_with_bases + .map(|(b, c)| (b.borrow().clone(), *c)) + .unzip(); + let base = bases[0]; + *self = Self::constant(base).scalar_mul_le(bits.iter())?; + Ok(()) + } +} + +impl ToConstraintFieldGadget<::BasePrimeField> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, + F: ToConstraintFieldGadget<::BasePrimeField>, +{ + fn to_constraint_field( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + self.to_affine()?.to_constraint_field() + } +} + +fn mul_by_coeff_a< + P: SWModelParameters, + F: FieldVar::BasePrimeField>, +>( + f: &F, +) -> F +where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + if !P::COEFF_A.is_zero() { + f * P::COEFF_A + } else { + F::zero() + } +} + +impl_bounded_ops!( + ProjectiveVar, + SWProjective

, + Add, + add, + AddAssign, + add_assign, + |mut this: &'a ProjectiveVar, mut other: &'a ProjectiveVar| { + // Implement complete addition for Short Weierstrass curves, following + // the complete addition formula from Renes-Costello-Batina 2015 + // (https://eprint.iacr.org/2015/1060). + // + // We special case handling of constants to get better constraint weight. + if this.is_constant() { + // we'll just act like `other` is constant. + core::mem::swap(&mut this, &mut other); + } + + if other.is_constant() { + // The value should exist because `other` is a constant. + let other = other.value().unwrap(); + if other.is_zero() { + // this + 0 = this + this.clone() + } else { + // We'll use mixed addition to add non-zero constants. + let x = F::constant(other.x); + let y = F::constant(other.y); + this.add_mixed(&NonZeroAffineVar::new(x, y)).unwrap() + } + } else { + // Complete addition formula from Renes-Costello-Batina 2015 + // Algorithm 1 + // (https://eprint.iacr.org/2015/1060). + // Below, comments at the end of a line denote the corresponding + // step(s) of the algorithm + // + // Adapted from code in + // https://github.com/RustCrypto/elliptic-curves/blob/master/p256/src/arithmetic/projective.rs + let three_b = P::COEFF_B.double() + &P::COEFF_B; + let (x1, y1, z1) = (&this.x, &this.y, &this.z); + let (x2, y2, z2) = (&other.x, &other.y, &other.z); + + let xx = x1 * x2; // 1 + let yy = y1 * y2; // 2 + let zz = z1 * z2; // 3 + let xy_pairs = ((x1 + y1) * &(x2 + y2)) - (&xx + &yy); // 4, 5, 6, 7, 8 + let xz_pairs = ((x1 + z1) * &(x2 + z2)) - (&xx + &zz); // 9, 10, 11, 12, 13 + let yz_pairs = ((y1 + z1) * &(y2 + z2)) - (&yy + &zz); // 14, 15, 16, 17, 18 + + let axz = mul_by_coeff_a::(&xz_pairs); // 19 + + let bzz3_part = &axz + &zz * three_b; // 20, 21 + + let yy_m_bzz3 = &yy - &bzz3_part; // 22 + let yy_p_bzz3 = &yy + &bzz3_part; // 23 + + let azz = mul_by_coeff_a::(&zz); + let xx3_p_azz = xx.double().unwrap() + &xx + &azz; // 25, 26, 27, 29 + + let bxz3 = &xz_pairs * three_b; // 28 + let b3_xz_pairs = mul_by_coeff_a::(&(&xx - &azz)) + &bxz3; // 30, 31, 32 + + let x = (&yy_m_bzz3 * &xy_pairs) - &yz_pairs * &b3_xz_pairs; // 35, 39, 40 + let y = (&yy_p_bzz3 * &yy_m_bzz3) + &xx3_p_azz * b3_xz_pairs; // 24, 36, 37, 38 + let z = (&yy_p_bzz3 * &yz_pairs) + xy_pairs * xx3_p_azz; // 41, 42, 43 + + ProjectiveVar::new(x, y, z) + } + + }, + |this: &'a ProjectiveVar, other: SWProjective

| { + this + ProjectiveVar::constant(other) + }, + (F: FieldVar::BasePrimeField>, P: SWModelParameters), + for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +); + +impl_bounded_ops!( + ProjectiveVar, + SWProjective

, + Sub, + sub, + SubAssign, + sub_assign, + |this: &'a ProjectiveVar, other: &'a ProjectiveVar| this + other.negate().unwrap(), + |this: &'a ProjectiveVar, other: SWProjective

| this - ProjectiveVar::constant(other), + (F: FieldVar::BasePrimeField>, P: SWModelParameters), + for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F> +); + +impl<'a, P, F> GroupOpsBounds<'a, SWProjective

, ProjectiveVar> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ +} + +impl<'a, P, F> GroupOpsBounds<'a, SWProjective

, ProjectiveVar> for &'a ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ +} + +impl CondSelectGadget<::BasePrimeField> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean<::BasePrimeField>, + true_value: &Self, + false_value: &Self, + ) -> Result { + let x = cond.select(&true_value.x, &false_value.x)?; + let y = cond.select(&true_value.y, &false_value.y)?; + let z = cond.select(&true_value.z, &false_value.z)?; + + Ok(Self::new(x, y, z)) + } +} + +impl EqGadget<::BasePrimeField> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs")] + fn is_eq( + &self, + other: &Self, + ) -> Result::BasePrimeField>, SynthesisError> { + let x_equal = (&self.x * &other.z).is_eq(&(&other.x * &self.z))?; + let y_equal = (&self.y * &other.z).is_eq(&(&other.y * &self.z))?; + let coordinates_equal = x_equal.and(&y_equal)?; + let both_are_zero = self.is_zero()?.and(&other.is_zero()?)?; + both_are_zero.or(&coordinates_equal) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean<::BasePrimeField>, + ) -> Result<(), SynthesisError> { + let x_equal = (&self.x * &other.z).is_eq(&(&other.x * &self.z))?; + let y_equal = (&self.y * &other.z).is_eq(&(&other.y * &self.z))?; + let coordinates_equal = x_equal.and(&y_equal)?; + let both_are_zero = self.is_zero()?.and(&other.is_zero()?)?; + both_are_zero + .or(&coordinates_equal)? + .conditional_enforce_equal(&Boolean::Constant(true), condition)?; + Ok(()) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + condition: &Boolean<::BasePrimeField>, + ) -> Result<(), SynthesisError> { + let is_equal = self.is_eq(other)?; + is_equal + .and(condition)? + .enforce_equal(&Boolean::Constant(false)) + } +} + +impl AllocVar, ::BasePrimeField> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + fn new_variable>>( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + Self::new_variable(cs, || f().map(|b| b.borrow().into_projective()), mode) + } +} + +impl AllocVar, ::BasePrimeField> + for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + fn new_variable>>( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let f = || Ok(*f()?.borrow()); + match mode { + AllocationMode::Constant => Self::new_variable_omit_prime_order_check(cs, f, mode), + AllocationMode::Input => Self::new_variable_omit_prime_order_check(cs, f, mode), + AllocationMode::Witness => { + // if cofactor.is_even(): + // divide until you've removed all even factors + // else: + // just directly use double and add. + let mut power_of_2: u32 = 0; + let mut cofactor = P::COFACTOR.to_vec(); + while cofactor[0] % 2 == 0 { + div2(&mut cofactor); + power_of_2 += 1; + } + + let cofactor_weight = BitIteratorBE::new(cofactor.as_slice()) + .filter(|b| *b) + .count(); + let modulus_minus_1 = (-P::ScalarField::one()).into_repr(); // r - 1 + let modulus_minus_1_weight = + BitIteratorBE::new(modulus_minus_1).filter(|b| *b).count(); + + // We pick the most efficient method of performing the prime order check: + // If the cofactor has lower hamming weight than the scalar field's modulus, + // we first multiply by the inverse of the cofactor, and then, after allocating, + // multiply by the cofactor. This ensures the resulting point has no cofactors + // + // Else, we multiply by the scalar field's modulus and ensure that the result + // equals the identity. + + let (mut ge, iter) = if cofactor_weight < modulus_minus_1_weight { + let ge = Self::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "Witness without subgroup check with cofactor mul"), + || f().map(|g| g.borrow().into_affine().mul_by_cofactor_inv().into()), + mode, + )?; + ( + ge, + BitIteratorBE::without_leading_zeros(cofactor.as_slice()), + ) + } else { + let ge = Self::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "Witness without subgroup check with `r` check"), + || { + f().map(|g| { + let g = g.into_affine(); + let mut power_of_two = P::ScalarField::one().into_repr(); + power_of_two.muln(power_of_2); + let power_of_two_inv = P::ScalarField::from_repr(power_of_two) + .and_then(|n| n.inverse()) + .unwrap(); + g.mul(power_of_two_inv) + }) + }, + mode, + )?; + + ( + ge, + BitIteratorBE::without_leading_zeros(modulus_minus_1.as_ref()), + ) + }; + // Remove the even part of the cofactor + for _ in 0..power_of_2 { + ge.double_in_place()?; + } + + let mut result = Self::zero(); + for b in iter { + result.double_in_place()?; + + if b { + result += &ge + } + } + if cofactor_weight < modulus_minus_1_weight { + Ok(result) + } else { + ge.enforce_equal(&ge)?; + Ok(ge) + } + } + } + } +} + +#[inline] +fn div2(limbs: &mut [u64]) { + let mut t = 0; + for i in limbs.iter_mut().rev() { + let t2 = *i << 63; + *i >>= 1; + *i |= t; + t = t2; + } +} + +impl ToBitsGadget<::BasePrimeField> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bits_le( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let g = self.to_affine()?; + let mut bits = g.x.to_bits_le()?; + let y_bits = g.y.to_bits_le()?; + bits.extend_from_slice(&y_bits); + bits.push(g.infinity); + Ok(bits) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bits_le( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let g = self.to_affine()?; + let mut bits = g.x.to_non_unique_bits_le()?; + let y_bits = g.y.to_non_unique_bits_le()?; + bits.extend_from_slice(&y_bits); + bits.push(g.infinity); + Ok(bits) + } +} + +impl ToBytesGadget<::BasePrimeField> for ProjectiveVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bytes( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let g = self.to_affine()?; + let mut bytes = g.x.to_bytes()?; + let y_bytes = g.y.to_bytes()?; + let inf_bytes = g.infinity.to_bytes()?; + bytes.extend_from_slice(&y_bytes); + bytes.extend_from_slice(&inf_bytes); + Ok(bytes) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let g = self.to_affine()?; + let mut bytes = g.x.to_non_unique_bytes()?; + let y_bytes = g.y.to_non_unique_bytes()?; + let inf_bytes = g.infinity.to_non_unique_bytes()?; + bytes.extend_from_slice(&y_bytes); + bytes.extend_from_slice(&inf_bytes); + Ok(bytes) + } +} diff --git a/arkworks/r1cs-std/src/groups/curves/short_weierstrass/non_zero_affine.rs b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/non_zero_affine.rs new file mode 100644 index 00000000..a598bdd0 --- /dev/null +++ b/arkworks/r1cs-std/src/groups/curves/short_weierstrass/non_zero_affine.rs @@ -0,0 +1,163 @@ +use super::*; +/// An affine representation of a prime order curve point that is guaranteed +/// to *not* be the point at infinity. +#[derive(Derivative)] +#[derivative(Debug, Clone)] +#[must_use] +pub struct NonZeroAffineVar< + P: SWModelParameters, + F: FieldVar::BasePrimeField>, +> where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// The x-coordinate. + pub x: F, + /// The y-coordinate. + pub y: F, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl NonZeroAffineVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + pub(crate) fn new(x: F, y: F) -> Self { + Self { + x, + y, + _params: PhantomData, + } + } + + /// Converts self into a non-zero projective point. + #[tracing::instrument(target = "r1cs", skip(self))] + pub(crate) fn into_projective(&self) -> ProjectiveVar { + ProjectiveVar::new(self.x.clone(), self.y.clone(), F::one()) + } + + /// Performs an addition without checking that other != ±self. + #[tracing::instrument(target = "r1cs", skip(self, other))] + pub(crate) fn add_unchecked(&self, other: &Self) -> Result { + if [self, other].is_constant() { + let result = + (self.value()?.into_projective() + other.value()?.into_projective()).into_affine(); + Ok(Self::new(F::constant(result.x), F::constant(result.y))) + } else { + let (x1, y1) = (&self.x, &self.y); + let (x2, y2) = (&other.x, &other.y); + // Then, + // slope lambda := (y2 - y1)/(x2 - x1); + // x3 = lambda^2 - x1 - x2; + // y3 = lambda * (x1 - x3) - y1 + let numerator = y2 - y1; + let denominator = x2 - x1; + let lambda = numerator.mul_by_inverse(&denominator)?; + let x3 = lambda.square()? - x1 - x2; + let y3 = lambda * &(x1 - &x3) - y1; + Ok(Self::new(x3, y3)) + } + } + + /// Doubles `self`. As this is a prime order curve point, + /// the output is guaranteed to not be the point at infinity. + #[tracing::instrument(target = "r1cs", skip(self))] + pub(crate) fn double(&self) -> Result { + if [self].is_constant() { + let result = self.value()?.into_projective().double().into_affine(); + // Panic if the result is zero. + assert!(!result.is_zero()); + Ok(Self::new(F::constant(result.x), F::constant(result.y))) + } else { + let (x1, y1) = (&self.x, &self.y); + let x1_sqr = x1.square()?; + // Then, + // tangent lambda := (3 * x1^2 + a) / (2 * y1); + // x3 = lambda^2 - 2x1 + // y3 = lambda * (x1 - x3) - y1 + let numerator = x1_sqr.double()? + &x1_sqr + P::COEFF_A; + let denominator = y1.double()?; + let lambda = numerator.mul_by_inverse(&denominator)?; + let x3 = lambda.square()? - x1.double()?; + let y3 = lambda * &(x1 - &x3) - y1; + Ok(Self::new(x3, y3)) + } + } + + /// Computes `(self + other) + self`. This method requires only 5 constraints, + /// less than the 7 required when computing via `self.double() + other`. + /// + /// This follows the formulae from [\[ELM03\]](https://arxiv.org/abs/math/0208038). + #[tracing::instrument(target = "r1cs", skip(self))] + pub(crate) fn double_and_add(&self, other: &Self) -> Result { + if [self].is_constant() || other.is_constant() { + self.double()?.add_unchecked(other) + } else { + let (x1, y1) = (&self.x, &self.y); + let (x2, y2) = (&other.x, &other.y); + + // Calculate self + other: + // slope lambda := (y2 - y1)/(x2 - x1); + // x3 = lambda^2 - x1 - x2; + // y3 = lambda * (x1 - x3) - y1 + let numerator = y2 - y1; + let denominator = x2 - x1; + let lambda_1 = numerator.mul_by_inverse(&denominator)?; + + let x3 = lambda_1.square()? - x1 - x2; + + // Calculate final addition slope: + let lambda_2 = (lambda_1 + y1.double()?.mul_by_inverse(&(&x3 - x1))?).negate()?; + + let x4 = lambda_2.square()? - x1 - x3; + let y4 = lambda_2 * &(x1 - &x4) - y1; + Ok(Self::new(x4, y4)) + } + } + + /// Doubles `self` in place. + #[tracing::instrument(target = "r1cs", skip(self))] + pub(crate) fn double_in_place(&mut self) -> Result<(), SynthesisError> { + *self = self.double()?; + Ok(()) + } +} + +impl R1CSVar<::BasePrimeField> for NonZeroAffineVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + type Value = SWAffine

; + + fn cs(&self) -> ConstraintSystemRef<::BasePrimeField> { + self.x.cs().or(self.y.cs()) + } + + fn value(&self) -> Result, SynthesisError> { + Ok(SWAffine::new(self.x.value()?, self.y.value()?, false)) + } +} + +impl CondSelectGadget<::BasePrimeField> for NonZeroAffineVar +where + P: SWModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean<::BasePrimeField>, + true_value: &Self, + false_value: &Self, + ) -> Result { + let x = cond.select(&true_value.x, &false_value.x)?; + let y = cond.select(&true_value.y, &false_value.y)?; + + Ok(Self::new(x, y)) + } +} diff --git a/arkworks/r1cs-std/src/groups/curves/twisted_edwards/mod.rs b/arkworks/r1cs-std/src/groups/curves/twisted_edwards/mod.rs new file mode 100644 index 00000000..aed8f825 --- /dev/null +++ b/arkworks/r1cs-std/src/groups/curves/twisted_edwards/mod.rs @@ -0,0 +1,927 @@ +use ark_ec::{ + twisted_edwards_extended::{GroupAffine as TEAffine, GroupProjective as TEProjective}, + AffineCurve, MontgomeryModelParameters, ProjectiveCurve, TEModelParameters, +}; +use ark_ff::{BigInteger, BitIteratorBE, Field, One, PrimeField, Zero}; + +use ark_relations::r1cs::{ConstraintSystemRef, Namespace, SynthesisError}; + +use crate::{prelude::*, ToConstraintFieldGadget, Vec}; + +use crate::fields::fp::FpVar; +use core::{borrow::Borrow, marker::PhantomData}; + +/// An implementation of arithmetic for Montgomery curves that relies on +/// incomplete addition formulae for the affine model, as outlined in the +/// [EFD](https://www.hyperelliptic.org/EFD/g1p/auto-montgom.html). +/// +/// This is intended for use primarily for implementing efficient +/// multi-scalar-multiplication in the Bowe-Hopwood-Pedersen hash. +#[derive(Derivative)] +#[derivative(Debug, Clone)] +#[must_use] +pub struct MontgomeryAffineVar< + P: TEModelParameters, + F: FieldVar::BasePrimeField>, +> where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// The x-coordinate. + pub x: F, + /// The y-coordinate. + pub y: F, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +mod montgomery_affine_impl { + use super::*; + use ark_ec::twisted_edwards_extended::GroupAffine; + use ark_ff::Field; + use core::ops::Add; + + impl R1CSVar<::BasePrimeField> for MontgomeryAffineVar + where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, + { + type Value = (P::BaseField, P::BaseField); + + fn cs(&self) -> ConstraintSystemRef<::BasePrimeField> { + self.x.cs().or(self.y.cs()) + } + + fn value(&self) -> Result { + let x = self.x.value()?; + let y = self.y.value()?; + Ok((x, y)) + } + } + + impl< + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + > MontgomeryAffineVar + where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, + { + /// Constructs `Self` from an `(x, y)` coordinate pair. + pub fn new(x: F, y: F) -> Self { + Self { + x, + y, + _params: PhantomData, + } + } + + /// Converts a Twisted Edwards curve point to coordinates for the + /// corresponding affine Montgomery curve point. + #[tracing::instrument(target = "r1cs")] + pub fn from_edwards_to_coords( + p: &TEAffine

, + ) -> Result<(P::BaseField, P::BaseField), SynthesisError> { + let montgomery_point: GroupAffine

= if p.y == P::BaseField::one() { + GroupAffine::zero() + } else if p.x == P::BaseField::zero() { + GroupAffine::new(P::BaseField::zero(), P::BaseField::zero()) + } else { + let u = + (P::BaseField::one() + &p.y) * &(P::BaseField::one() - &p.y).inverse().unwrap(); + let v = u * &p.x.inverse().unwrap(); + GroupAffine::new(u, v) + }; + + Ok((montgomery_point.x, montgomery_point.y)) + } + + /// Converts a Twisted Edwards curve point to coordinates for the + /// corresponding affine Montgomery curve point. + #[tracing::instrument(target = "r1cs")] + pub fn new_witness_from_edwards( + cs: ConstraintSystemRef<::BasePrimeField>, + p: &TEAffine

, + ) -> Result { + let montgomery_coords = Self::from_edwards_to_coords(p)?; + let u = F::new_witness(ark_relations::ns!(cs, "u"), || Ok(montgomery_coords.0))?; + let v = F::new_witness(ark_relations::ns!(cs, "v"), || Ok(montgomery_coords.1))?; + Ok(Self::new(u, v)) + } + + /// Converts `self` into a Twisted Edwards curve point variable. + #[tracing::instrument(target = "r1cs")] + pub fn into_edwards(&self) -> Result, SynthesisError> { + let cs = self.cs(); + + let mode = if cs.is_none() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + + // Compute u = x / y + let u = F::new_variable( + ark_relations::ns!(cs, "u"), + || { + let y_inv = self + .y + .value()? + .inverse() + .ok_or(SynthesisError::DivisionByZero)?; + Ok(self.x.value()? * &y_inv) + }, + mode, + )?; + + u.mul_equals(&self.y, &self.x)?; + + let v = F::new_variable( + ark_relations::ns!(cs, "v"), + || { + let mut t0 = self.x.value()?; + let mut t1 = t0; + t0 -= &P::BaseField::one(); + t1 += &P::BaseField::one(); + + Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?) + }, + mode, + )?; + + let xplusone = &self.x + P::BaseField::one(); + let xminusone = &self.x - P::BaseField::one(); + v.mul_equals(&xplusone, &xminusone)?; + + Ok(AffineVar::new(u, v)) + } + } + + impl<'a, P, F> Add<&'a MontgomeryAffineVar> for MontgomeryAffineVar + where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, + { + type Output = MontgomeryAffineVar; + + #[tracing::instrument(target = "r1cs")] + fn add(self, other: &'a Self) -> Self::Output { + let cs = [&self, other].cs(); + let mode = if cs.is_none() { + AllocationMode::Constant + } else { + AllocationMode::Witness + }; + + let coeff_b = P::MontgomeryModelParameters::COEFF_B; + let coeff_a = P::MontgomeryModelParameters::COEFF_A; + + let lambda = F::new_variable( + ark_relations::ns!(cs, "lambda"), + || { + let n = other.y.value()? - &self.y.value()?; + let d = other.x.value()? - &self.x.value()?; + Ok(n * &d.inverse().ok_or(SynthesisError::DivisionByZero)?) + }, + mode, + ) + .unwrap(); + let lambda_n = &other.y - &self.y; + let lambda_d = &other.x - &self.x; + lambda_d.mul_equals(&lambda, &lambda_n).unwrap(); + + // Compute x'' = B*lambda^2 - A - x - x' + let xprime = F::new_variable( + ark_relations::ns!(cs, "xprime"), + || { + Ok(lambda.value()?.square() * &coeff_b + - &coeff_a + - &self.x.value()? + - &other.x.value()?) + }, + mode, + ) + .unwrap(); + + let xprime_lc = &self.x + &other.x + &xprime + coeff_a; + // (lambda) * (lambda) = (A + x + x' + x'') + let lambda_b = &lambda * coeff_b; + lambda_b.mul_equals(&lambda, &xprime_lc).unwrap(); + + let yprime = F::new_variable( + ark_relations::ns!(cs, "yprime"), + || { + Ok(-(self.y.value()? + + &(lambda.value()? * &(xprime.value()? - &self.x.value()?)))) + }, + mode, + ) + .unwrap(); + + let xres = &self.x - &xprime; + let yres = &self.y + &yprime; + lambda.mul_equals(&xres, &yres).unwrap(); + MontgomeryAffineVar::new(xprime, yprime) + } + } +} + +/// An implementation of arithmetic for Twisted Edwards curves that relies on +/// the complete formulae for the affine model, as outlined in the +/// [EFD](https://www.hyperelliptic.org/EFD/g1p/auto-twisted.html). +#[derive(Derivative)] +#[derivative(Debug, Clone)] +#[must_use] +pub struct AffineVar< + P: TEModelParameters, + F: FieldVar::BasePrimeField>, +> where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// The x-coordinate. + pub x: F, + /// The y-coordinate. + pub y: F, + #[derivative(Debug = "ignore")] + _params: PhantomData

, +} + +impl::BasePrimeField>> + AffineVar +where + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// Constructs `Self` from an `(x, y)` coordinate triple. + pub fn new(x: F, y: F) -> Self { + Self { + x, + y, + _params: PhantomData, + } + } + + /// Allocates a new variable without performing an on-curve check, which is + /// useful if the variable is known to be on the curve (eg., if the point + /// is a constant or is a public input). + #[tracing::instrument(target = "r1cs", skip(cs, f))] + pub fn new_variable_omit_on_curve_check>>( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let (x, y) = match f() { + Ok(ge) => { + let ge: TEAffine

= ge.into(); + (Ok(ge.x), Ok(ge.y)) + } + _ => ( + Err(SynthesisError::AssignmentMissing), + Err(SynthesisError::AssignmentMissing), + ), + }; + + let x = F::new_variable(ark_relations::ns!(cs, "x"), || x, mode)?; + let y = F::new_variable(ark_relations::ns!(cs, "y"), || y, mode)?; + + Ok(Self::new(x, y)) + } +} + +impl::BasePrimeField>> + AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField> + + ThreeBitCondNegLookupGadget< + ::BasePrimeField, + TableConstant = P::BaseField, + >, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + /// Compute a scalar multiplication of `bases` with respect to `scalars`, + /// where the elements of `scalars` are length-three slices of bits, and + /// which such that the first two bits are use to select one of the + /// bases, while the third bit is used to conditionally negate the + /// selection. + #[tracing::instrument(target = "r1cs", skip(bases, scalars))] + pub fn precomputed_base_3_bit_signed_digit_scalar_mul( + bases: &[impl Borrow<[TEProjective

]>], + scalars: &[impl Borrow<[J]>], + ) -> Result + where + J: Borrow<[Boolean<::BasePrimeField>]>, + { + const CHUNK_SIZE: usize = 3; + let mut ed_result: Option> = None; + let mut result: Option> = None; + + let mut process_segment_result = |result: &MontgomeryAffineVar| { + let sgmt_result = result.into_edwards()?; + ed_result = match ed_result.as_ref() { + None => Some(sgmt_result), + Some(r) => Some(sgmt_result + r), + }; + Ok::<(), SynthesisError>(()) + }; + + // Compute ∏(h_i^{m_i}) for all i. + for (segment_bits_chunks, segment_powers) in scalars.iter().zip(bases) { + for (bits, base_power) in segment_bits_chunks + .borrow() + .iter() + .zip(segment_powers.borrow()) + { + let base_power = base_power.borrow(); + let mut acc_power = *base_power; + let mut coords = vec![]; + for _ in 0..4 { + coords.push(acc_power); + acc_power += base_power; + } + + let bits = bits.borrow().to_bits_le()?; + if bits.len() != CHUNK_SIZE { + return Err(SynthesisError::Unsatisfiable); + } + + let coords = coords + .iter() + .map(|p| MontgomeryAffineVar::from_edwards_to_coords(&p.into_affine())) + .collect::, _>>()?; + + let x_coeffs = coords.iter().map(|p| p.0).collect::>(); + let y_coeffs = coords.iter().map(|p| p.1).collect::>(); + + let precomp = bits[0].and(&bits[1])?; + + let x = F::zero() + + x_coeffs[0] + + F::from(bits[0].clone()) * (x_coeffs[1] - &x_coeffs[0]) + + F::from(bits[1].clone()) * (x_coeffs[2] - &x_coeffs[0]) + + F::from(precomp.clone()) + * (x_coeffs[3] - &x_coeffs[2] - &x_coeffs[1] + &x_coeffs[0]); + + let y = F::three_bit_cond_neg_lookup(&bits, &precomp, &y_coeffs)?; + + let tmp = MontgomeryAffineVar::new(x, y); + result = match result.as_ref() { + None => Some(tmp), + Some(r) => Some(tmp + r), + }; + } + + process_segment_result(&result.unwrap())?; + result = None; + } + if result.is_some() { + process_segment_result(&result.unwrap())?; + } + Ok(ed_result.unwrap()) + } +} + +impl R1CSVar<::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + type Value = TEProjective

; + + fn cs(&self) -> ConstraintSystemRef<::BasePrimeField> { + self.x.cs().or(self.y.cs()) + } + + #[inline] + fn value(&self) -> Result, SynthesisError> { + let (x, y) = (self.x.value()?, self.y.value()?); + let result = TEAffine::new(x, y); + Ok(result.into()) + } +} + +impl CurveVar, ::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + fn constant(g: TEProjective

) -> Self { + let cs = ConstraintSystemRef::None; + Self::new_variable_omit_on_curve_check(cs, || Ok(g), AllocationMode::Constant).unwrap() + } + + fn zero() -> Self { + Self::new(F::zero(), F::one()) + } + + fn is_zero(&self) -> Result::BasePrimeField>, SynthesisError> { + self.x.is_zero()?.and(&self.x.is_one()?) + } + + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable_omit_prime_order_check( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, SynthesisError>, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + + let g = Self::new_variable_omit_on_curve_check(cs, f, mode)?; + + if mode != AllocationMode::Constant { + let d = P::COEFF_D; + let a = P::COEFF_A; + // Check that ax^2 + y^2 = 1 + dx^2y^2 + // We do this by checking that ax^2 - 1 = y^2 * (dx^2 - 1) + let x2 = g.x.square()?; + let y2 = g.y.square()?; + + let one = P::BaseField::one(); + let d_x2_minus_one = &x2 * d - one; + let a_x2_minus_one = &x2 * a - one; + + d_x2_minus_one.mul_equals(&y2, &a_x2_minus_one)?; + } + Ok(g) + } + + /// Enforce that `self` is in the prime-order subgroup. + /// + /// Does so by multiplying by the prime order, and checking that the result + /// is unchanged. + #[tracing::instrument(target = "r1cs")] + fn enforce_prime_order(&self) -> Result<(), SynthesisError> { + let r_minus_1 = (-P::ScalarField::one()).into_repr(); + + let mut result = Self::zero(); + for b in BitIteratorBE::without_leading_zeros(r_minus_1) { + result.double_in_place()?; + + if b { + result += self; + } + } + self.negate()?.enforce_equal(&result)?; + Ok(()) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn double_in_place(&mut self) -> Result<(), SynthesisError> { + if self.is_constant() { + let value = self.value()?; + *self = Self::constant(value.double()); + } else { + let cs = self.cs(); + let a = P::COEFF_A; + + // xy + let xy = &self.x * &self.y; + let x2 = self.x.square()?; + let y2 = self.y.square()?; + + let a_x2 = &x2 * a; + + // Compute x3 = (2xy) / (ax^2 + y^2) + let x3 = F::new_witness(ark_relations::ns!(cs, "x3"), || { + let t0 = xy.value()?.double(); + let t1 = a * &x2.value()? + &y2.value()?; + Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?) + })?; + + let a_x2_plus_y2 = &a_x2 + &y2; + let two_xy = xy.double()?; + x3.mul_equals(&a_x2_plus_y2, &two_xy)?; + + // Compute y3 = (y^2 - ax^2) / (2 - ax^2 - y^2) + let two = P::BaseField::one().double(); + let y3 = F::new_witness(ark_relations::ns!(cs, "y3"), || { + let a_x2 = a * &x2.value()?; + let t0 = y2.value()? - &a_x2; + let t1 = two - &a_x2 - &y2.value()?; + Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?) + })?; + let y2_minus_a_x2 = &y2 - &a_x2; + let two_minus_ax2_minus_y2 = (&a_x2 + &y2).negate()? + two; + + y3.mul_equals(&two_minus_ax2_minus_y2, &y2_minus_a_x2)?; + self.x = x3; + self.y = y3; + } + Ok(()) + } + + #[tracing::instrument(target = "r1cs")] + fn negate(&self) -> Result { + Ok(Self::new(self.x.negate()?, self.y.clone())) + } + + #[tracing::instrument(target = "r1cs", skip(scalar_bits_with_base_multiples))] + fn precomputed_base_scalar_mul_le<'a, I, B>( + &mut self, + scalar_bits_with_base_multiples: I, + ) -> Result<(), SynthesisError> + where + I: Iterator)>, + B: Borrow::BasePrimeField>>, + { + let (bits, multiples): (Vec<_>, Vec<_>) = scalar_bits_with_base_multiples + .map(|(bit, base)| (bit.borrow().clone(), *base)) + .unzip(); + let zero: TEAffine

= TEProjective::zero().into_affine(); + for (bits, multiples) in bits.chunks(2).zip(multiples.chunks(2)) { + if bits.len() == 2 { + let mut table = [multiples[0], multiples[1], multiples[0] + multiples[1]]; + + TEProjective::batch_normalization(&mut table); + let x_s = [zero.x, table[0].x, table[1].x, table[2].x]; + let y_s = [zero.y, table[0].y, table[1].y, table[2].y]; + + let x = F::two_bit_lookup(&bits, &x_s)?; + let y = F::two_bit_lookup(&bits, &y_s)?; + *self += Self::new(x, y); + } else if bits.len() == 1 { + let bit = &bits[0]; + let tmp = &*self + multiples[0]; + *self = bit.select(&tmp, &*self)?; + } + } + + Ok(()) + } +} + +impl AllocVar, ::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + let ns = cs.into(); + let cs = ns.cs(); + let f = || Ok(*f()?.borrow()); + match mode { + AllocationMode::Constant => Self::new_variable_omit_prime_order_check(cs, f, mode), + AllocationMode::Input => Self::new_variable_omit_prime_order_check(cs, f, mode), + AllocationMode::Witness => { + // if cofactor.is_even(): + // divide until you've removed all even factors + // else: + // just directly use double and add. + let mut power_of_2: u32 = 0; + let mut cofactor = P::COFACTOR.to_vec(); + while cofactor[0] % 2 == 0 { + div2(&mut cofactor); + power_of_2 += 1; + } + + let cofactor_weight = BitIteratorBE::new(cofactor.as_slice()) + .filter(|b| *b) + .count(); + let modulus_minus_1 = (-P::ScalarField::one()).into_repr(); // r - 1 + let modulus_minus_1_weight = + BitIteratorBE::new(modulus_minus_1).filter(|b| *b).count(); + + // We pick the most efficient method of performing the prime order check: + // If the cofactor has lower hamming weight than the scalar field's modulus, + // we first multiply by the inverse of the cofactor, and then, after allocating, + // multiply by the cofactor. This ensures the resulting point has no cofactors + // + // Else, we multiply by the scalar field's modulus and ensure that the result + // equals the identity. + + let (mut ge, iter) = if cofactor_weight < modulus_minus_1_weight { + let ge = Self::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "Witness without subgroup check with cofactor mul"), + || f().map(|g| g.borrow().into_affine().mul_by_cofactor_inv().into()), + mode, + )?; + ( + ge, + BitIteratorBE::without_leading_zeros(cofactor.as_slice()), + ) + } else { + let ge = Self::new_variable_omit_prime_order_check( + ark_relations::ns!(cs, "Witness without subgroup check with `r` check"), + || { + f().map(|g| { + let g = g.into_affine(); + let mut power_of_two = P::ScalarField::one().into_repr(); + power_of_two.muln(power_of_2); + let power_of_two_inv = P::ScalarField::from_repr(power_of_two) + .and_then(|n| n.inverse()) + .unwrap(); + g.mul(power_of_two_inv) + }) + }, + mode, + )?; + + ( + ge, + BitIteratorBE::without_leading_zeros(modulus_minus_1.as_ref()), + ) + }; + // Remove the even part of the cofactor + for _ in 0..power_of_2 { + ge.double_in_place()?; + } + + let mut result = Self::zero(); + for b in iter { + result.double_in_place()?; + if b { + result += ≥ + } + } + if cofactor_weight < modulus_minus_1_weight { + Ok(result) + } else { + ge.enforce_equal(&ge)?; + Ok(ge) + } + } + } + } +} + +impl AllocVar, ::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs", skip(cs, f))] + fn new_variable>>( + cs: impl Into::BasePrimeField>>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result { + Self::new_variable(cs, || f().map(|b| b.borrow().into_projective()), mode) + } +} + +impl ToConstraintFieldGadget<::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'a> &'a F: FieldOpsBounds<'a, P::BaseField, F>, + F: ToConstraintFieldGadget<::BasePrimeField>, +{ + fn to_constraint_field( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let mut res = Vec::new(); + + res.extend_from_slice(&self.x.to_constraint_field()?); + res.extend_from_slice(&self.y.to_constraint_field()?); + + Ok(res) + } +} + +#[inline] +fn div2(limbs: &mut [u64]) { + let mut t = 0; + for i in limbs.iter_mut().rev() { + let t2 = *i << 63; + *i >>= 1; + *i |= t; + t = t2; + } +} + +impl_bounded_ops!( + AffineVar, + TEProjective

, + Add, + add, + AddAssign, + add_assign, + |this: &'a AffineVar, other: &'a AffineVar| { + + if [this, other].is_constant() { + assert!(this.is_constant() && other.is_constant()); + AffineVar::constant(this.value().unwrap() + &other.value().unwrap()) + } else { + let cs = [this, other].cs(); + let a = P::COEFF_A; + let d = P::COEFF_D; + + // Compute U = (x1 + y1) * (x2 + y2) + let u1 = (&this.x * -a) + &this.y; + let u2 = &other.x + &other.y; + + let u = u1 * &u2; + + // Compute v0 = x1 * y2 + let v0 = &other.y * &this.x; + + // Compute v1 = x2 * y1 + let v1 = &other.x * &this.y; + + // Compute C = d*v0*v1 + let v2 = &v0 * &v1 * d; + + // Compute x3 = (v0 + v1) / (1 + v2) + let x3 = F::new_witness(ark_relations::ns!(cs, "x3"), || { + let t0 = v0.value()? + &v1.value()?; + let t1 = P::BaseField::one() + &v2.value()?; + Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?) + }).unwrap(); + + let v2_plus_one = &v2 + P::BaseField::one(); + let v0_plus_v1 = &v0 + &v1; + x3.mul_equals(&v2_plus_one, &v0_plus_v1).unwrap(); + + // Compute y3 = (U + a * v0 - v1) / (1 - v2) + let y3 = F::new_witness(ark_relations::ns!(cs, "y3"), || { + let t0 = u.value()? + &(a * &v0.value()?) - &v1.value()?; + let t1 = P::BaseField::one() - &v2.value()?; + Ok(t0 * &t1.inverse().ok_or(SynthesisError::DivisionByZero)?) + }).unwrap(); + + let one_minus_v2 = (&v2 - P::BaseField::one()).negate().unwrap(); + let a_v0 = &v0 * a; + let u_plus_a_v0_minus_v1 = &u + &a_v0 - &v1; + + y3.mul_equals(&one_minus_v2, &u_plus_a_v0_minus_v1).unwrap(); + + AffineVar::new(x3, y3) + } + }, + |this: &'a AffineVar, other: TEProjective

| this + AffineVar::constant(other), + ( + F :FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField>, + P: TEModelParameters, + ), + for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +); + +impl_bounded_ops!( + AffineVar, + TEProjective

, + Sub, + sub, + SubAssign, + sub_assign, + |this: &'a AffineVar, other: &'a AffineVar| this + other.negate().unwrap(), + |this: &'a AffineVar, other: TEProjective

| this - AffineVar::constant(other), + ( + F :FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField>, + P: TEModelParameters, + ), + for <'b> &'b F: FieldOpsBounds<'b, P::BaseField, F> +); + +impl<'a, P, F> GroupOpsBounds<'a, TEProjective

, AffineVar> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ +} + +impl<'a, P, F> GroupOpsBounds<'a, TEProjective

, AffineVar> for &'a AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField> + + TwoBitLookupGadget<::BasePrimeField, TableConstant = P::BaseField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ +} + +impl CondSelectGadget<::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditionally_select( + cond: &Boolean<::BasePrimeField>, + true_value: &Self, + false_value: &Self, + ) -> Result { + let x = cond.select(&true_value.x, &false_value.x)?; + let y = cond.select(&true_value.y, &false_value.y)?; + + Ok(Self::new(x, y)) + } +} + +impl EqGadget<::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs")] + fn is_eq( + &self, + other: &Self, + ) -> Result::BasePrimeField>, SynthesisError> { + let x_equal = self.x.is_eq(&other.x)?; + let y_equal = self.y.is_eq(&other.y)?; + x_equal.and(&y_equal) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_equal( + &self, + other: &Self, + condition: &Boolean<::BasePrimeField>, + ) -> Result<(), SynthesisError> { + self.x.conditional_enforce_equal(&other.x, condition)?; + self.y.conditional_enforce_equal(&other.y, condition)?; + Ok(()) + } + + #[inline] + #[tracing::instrument(target = "r1cs")] + fn conditional_enforce_not_equal( + &self, + other: &Self, + condition: &Boolean<::BasePrimeField>, + ) -> Result<(), SynthesisError> { + self.is_eq(other)? + .and(condition)? + .enforce_equal(&Boolean::Constant(false)) + } +} + +impl ToBitsGadget<::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bits_le( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let mut x_bits = self.x.to_bits_le()?; + let y_bits = self.y.to_bits_le()?; + x_bits.extend_from_slice(&y_bits); + Ok(x_bits) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bits_le( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let mut x_bits = self.x.to_non_unique_bits_le()?; + let y_bits = self.y.to_non_unique_bits_le()?; + x_bits.extend_from_slice(&y_bits); + + Ok(x_bits) + } +} + +impl ToBytesGadget<::BasePrimeField> for AffineVar +where + P: TEModelParameters, + F: FieldVar::BasePrimeField>, + for<'b> &'b F: FieldOpsBounds<'b, P::BaseField, F>, +{ + #[tracing::instrument(target = "r1cs")] + fn to_bytes( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let mut x_bytes = self.x.to_bytes()?; + let y_bytes = self.y.to_bytes()?; + x_bytes.extend_from_slice(&y_bytes); + Ok(x_bytes) + } + + #[tracing::instrument(target = "r1cs")] + fn to_non_unique_bytes( + &self, + ) -> Result::BasePrimeField>>, SynthesisError> { + let mut x_bytes = self.x.to_non_unique_bytes()?; + let y_bytes = self.y.to_non_unique_bytes()?; + x_bytes.extend_from_slice(&y_bytes); + + Ok(x_bytes) + } +} diff --git a/arkworks/r1cs-std/src/groups/mod.rs b/arkworks/r1cs-std/src/groups/mod.rs new file mode 100644 index 00000000..881c5663 --- /dev/null +++ b/arkworks/r1cs-std/src/groups/mod.rs @@ -0,0 +1,163 @@ +use crate::prelude::*; +use ark_ec::ProjectiveCurve; +use ark_ff::Field; +use ark_relations::r1cs::{Namespace, SynthesisError}; +use core::ops::{Add, AddAssign, Sub, SubAssign}; + +use core::{borrow::Borrow, fmt::Debug}; + +/// This module contains implementations of arithmetic for various curve models. +pub mod curves; + +pub use self::curves::short_weierstrass::{bls12, mnt4, mnt6}; + +/// A hack used to work around the lack of implied bounds. +pub trait GroupOpsBounds<'a, F, T: 'a>: + Sized + + Add<&'a T, Output = T> + + Sub<&'a T, Output = T> + + Add + + Sub + + Add + + Sub +{ +} + +/// A variable that represents a curve point for +/// the curve `C`. +pub trait CurveVar: + 'static + + Sized + + Clone + + Debug + + R1CSVar + + ToBitsGadget + + ToBytesGadget + + EqGadget + + CondSelectGadget + + AllocVar + + AllocVar + + for<'a> GroupOpsBounds<'a, C, Self> + + for<'a> AddAssign<&'a Self> + + for<'a> SubAssign<&'a Self> + + AddAssign + + SubAssign + + AddAssign + + SubAssign +{ + /// Returns the constant `F::zero()`. This is the identity + /// of the group. + fn zero() -> Self; + + /// Returns a `Boolean` representing whether `self == Self::zero()`. + #[tracing::instrument(target = "r1cs")] + fn is_zero(&self) -> Result, SynthesisError> { + self.is_eq(&Self::zero()) + } + + /// Returns a constant with value `v`. + /// + /// This *should not* allocate any variables. + fn constant(other: C) -> Self; + + /// Allocates a variable in the subgroup without checking if it's in the + /// prime-order subgroup. + fn new_variable_omit_prime_order_check( + cs: impl Into>, + f: impl FnOnce() -> Result, + mode: AllocationMode, + ) -> Result; + + /// Enforce that `self` is in the prime-order subgroup. + fn enforce_prime_order(&self) -> Result<(), SynthesisError>; + + /// Computes `self + self`. + #[tracing::instrument(target = "r1cs")] + fn double(&self) -> Result { + let mut result = self.clone(); + result.double_in_place()?; + Ok(result) + } + + /// Sets `self = self + self`. + fn double_in_place(&mut self) -> Result<(), SynthesisError>; + + /// Coputes `-self`. + fn negate(&self) -> Result; + + /// Computes `bits * self`, where `bits` is a little-endian + /// `Boolean` representation of a scalar. + #[tracing::instrument(target = "r1cs", skip(bits))] + fn scalar_mul_le<'a>( + &self, + bits: impl Iterator>, + ) -> Result { + // TODO: in the constant case we should call precomputed_scalar_mul_le, + // but rn there's a bug when doing this with TE curves. + + // Computes the standard little-endian double-and-add algorithm + // (Algorithm 3.26, Guide to Elliptic Curve Cryptography) + let mut res = Self::zero(); + let mut multiple = self.clone(); + for bit in bits { + let tmp = res.clone() + &multiple; + res = bit.select(&tmp, &res)?; + multiple.double_in_place()?; + } + Ok(res) + } + + /// Computes a `I * self` in place, where `I` is a `Boolean` *little-endian* + /// representation of the scalar. + /// + /// The bases are precomputed power-of-two multiples of a single + /// base. + #[tracing::instrument(target = "r1cs", skip(scalar_bits_with_bases))] + fn precomputed_base_scalar_mul_le<'a, I, B>( + &mut self, + scalar_bits_with_bases: I, + ) -> Result<(), SynthesisError> + where + I: Iterator, + B: Borrow>, + C: 'a, + { + // Computes the standard little-endian double-and-add algorithm + // (Algorithm 3.26, Guide to Elliptic Curve Cryptography) + + // Let `original` be the initial value of `self`. + let mut result = Self::zero(); + for (bit, base) in scalar_bits_with_bases { + // Compute `self + 2^i * original` + let self_plus_base = result.clone() + *base; + // If `bit == 1`, set self = self + 2^i * original; + // else, set self = self; + result = bit.borrow().select(&self_plus_base, &result)?; + } + *self = result; + Ok(()) + } + + /// Computes `Σⱼ(scalarⱼ * baseⱼ)` for all j, + /// where `scalarⱼ` is a `Boolean` *little-endian* + /// representation of the j-th scalar. + #[tracing::instrument(target = "r1cs", skip(bases, scalars))] + fn precomputed_base_multiscalar_mul_le<'a, T, I, B>( + bases: &[B], + scalars: I, + ) -> Result + where + T: 'a + ToBitsGadget + ?Sized, + I: Iterator, + B: Borrow<[C]>, + { + let mut result = Self::zero(); + // Compute Σᵢ(bitᵢ * baseᵢ) for all i. + for (bits, bases) in scalars.zip(bases) { + let bases = bases.borrow(); + let bits = bits.to_bits_le()?; + result.precomputed_base_scalar_mul_le(bits.iter().zip(bases))?; + } + Ok(result) + } +} diff --git a/arkworks/r1cs-std/src/lib.rs b/arkworks/r1cs-std/src/lib.rs new file mode 100644 index 00000000..8ff44b0d --- /dev/null +++ b/arkworks/r1cs-std/src/lib.rs @@ -0,0 +1,146 @@ +#![cfg_attr(not(feature = "std"), no_std)] +//! This crate implements common "gadgets" that make +//! programming rank-1 constraint systems easier. +#![deny( + warnings, + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms +)] +#![allow(clippy::op_ref)] + +#[macro_use] +extern crate ark_std; + +#[macro_use] +extern crate ark_relations; + +#[doc(hidden)] +#[macro_use] +extern crate derivative; + +/// Some utility macros for making downstream impls easier. +#[macro_use] +pub mod macros; + +pub(crate) use ark_std::vec::Vec; + +use ark_ff::Field; + +/// This module implements gadgets related to bit manipulation, such as +/// `Boolean` and `UInt`s. +pub mod bits; +pub use self::bits::*; + +/// This module implements gadgets related to field arithmetic. +pub mod fields; + +/// This module implements gadgets related to group arithmetic, and specifically +/// elliptic curve arithmetic. +pub mod groups; + +/// This module implements gadgets related to computing pairings in bilinear +/// groups. +pub mod pairing; + +/// This module describes a trait for allocating new variables in a constraint +/// system. +pub mod alloc; +/// This module describes a trait for checking equality of variables. +pub mod eq; +/// This module implements functions for manipulating polynomial variables over finite fields. +pub mod poly; +/// This module describes traits for conditionally selecting a variable from a +/// list of variables. +pub mod select; + +#[allow(missing_docs)] +pub mod prelude { + pub use crate::{ + alloc::*, + bits::{boolean::Boolean, uint32::UInt32, uint8::UInt8, ToBitsGadget, ToBytesGadget}, + eq::*, + fields::{FieldOpsBounds, FieldVar}, + groups::{CurveVar, GroupOpsBounds}, + pairing::PairingVar, + select::*, + R1CSVar, + }; +} + +/// This trait describes some core functionality that is common to high-level +/// variables, such as `Boolean`s, `FieldVar`s, `GroupVar`s, etc. +pub trait R1CSVar { + /// The type of the "native" value that `Self` represents in the constraint + /// system. + type Value: core::fmt::Debug + Eq + Clone; + + /// Returns the underlying `ConstraintSystemRef`. + /// + /// If `self` is a constant value, then this *must* return + /// `ark_relations::r1cs::ConstraintSystemRef::None`. + fn cs(&self) -> ark_relations::r1cs::ConstraintSystemRef; + + /// Returns `true` if `self` is a circuit-generation-time constant. + fn is_constant(&self) -> bool { + self.cs().is_none() + } + + /// Returns the value that is assigned to `self` in the underlying + /// `ConstraintSystem`. + fn value(&self) -> Result; +} + +impl> R1CSVar for [T] { + type Value = Vec; + + fn cs(&self) -> ark_relations::r1cs::ConstraintSystemRef { + let mut result = ark_relations::r1cs::ConstraintSystemRef::None; + for var in self { + result = var.cs().or(result); + } + result + } + + fn value(&self) -> Result { + let mut result = Vec::new(); + for var in self { + result.push(var.value()?); + } + Ok(result) + } +} + +impl<'a, F: Field, T: 'a + R1CSVar> R1CSVar for &'a T { + type Value = T::Value; + + fn cs(&self) -> ark_relations::r1cs::ConstraintSystemRef { + (*self).cs() + } + + fn value(&self) -> Result { + (*self).value() + } +} + +/// A utility trait to convert `Self` to `Result +pub trait Assignment { + /// Converts `self` to `Result`. + fn get(self) -> Result; +} + +impl Assignment for Option { + fn get(self) -> Result { + self.ok_or(ark_relations::r1cs::SynthesisError::AssignmentMissing) + } +} + +/// Specifies how to convert a variable of type `Self` to variables of +/// type `FpVar` +pub trait ToConstraintFieldGadget { + /// Converts `self` to `FpVar` variables. + fn to_constraint_field( + &self, + ) -> Result>, ark_relations::r1cs::SynthesisError>; +} diff --git a/arkworks/r1cs-std/src/macros.rs b/arkworks/r1cs-std/src/macros.rs new file mode 100644 index 00000000..2770a7e3 --- /dev/null +++ b/arkworks/r1cs-std/src/macros.rs @@ -0,0 +1,167 @@ +#[allow(unused_braces)] +/// Implements arithmetic traits (eg: `Add`, `Sub`, `Mul`) for the given type +/// using the impl in `$impl`. +/// +/// Used primarily for implementing these traits for `FieldVar`s and +/// `GroupVar`s. +#[macro_export] +macro_rules! impl_ops { + ( + $type: ty, + $native: ty, + $trait: ident, + $fn: ident, + $assign_trait: ident, + $assign_fn: ident, + $impl: expr, + $constant_impl: expr, + $($args:tt)* + ) => { + impl_bounded_ops!($type, $native, $trait, $fn, $assign_trait, $assign_fn, $impl, $constant_impl, ($($args)+), ); + }; +} + +/// Implements arithmetic traits (eg: `Add`, `Sub`, `Mul`) for the given type +/// using the impl in `$impl`. +/// +/// Used primarily for implementing these traits for `FieldVar`s and +/// `GroupVar`s. +/// +/// When compared to `impl_ops`, this macro allows specifying additional trait +/// bounds. +#[macro_export] +macro_rules! impl_bounded_ops { + ( + $type: ty, + $native: ty, + $trait: ident, + $fn: ident, + $assign_trait: ident, + $assign_fn: ident, + $impl: expr, + $constant_impl: expr, + ($($params:tt)+), + $($bounds:tt)* + ) => { + impl<'a, $($params)+> core::ops::$trait<&'a $type> for &'a $type + where + $($bounds)* + { + type Output = $type; + + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces, clippy::redundant_closure_call)] + fn $fn(self, other: Self) -> Self::Output { + ($impl)(self, other) + } + } + + impl<'a, $($params)+> core::ops::$trait<$type> for &'a $type + where + $($bounds)* + { + type Output = $type; + + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces)] + fn $fn(self, other: $type) -> Self::Output { + core::ops::$trait::$fn(self, &other) + } + } + + impl<'a, $($params)+> core::ops::$trait<&'a $type> for $type + where + $($bounds)* + { + type Output = $type; + + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces)] + fn $fn(self, other: &'a $type) -> Self::Output { + core::ops::$trait::$fn(&self, other) + } + } + + impl<$($params)+> core::ops::$trait<$type> for $type + where + + $($bounds)* + { + type Output = $type; + + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces)] + fn $fn(self, other: $type) -> Self::Output { + core::ops::$trait::$fn(&self, &other) + } + } + + impl<$($params)+> core::ops::$assign_trait<$type> for $type + where + + $($bounds)* + { + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces)] + fn $assign_fn(&mut self, other: $type) { + let result = core::ops::$trait::$fn(&*self, &other); + *self = result + } + } + + impl<'a, $($params)+> core::ops::$assign_trait<&'a $type> for $type + where + + $($bounds)* + { + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces)] + fn $assign_fn(&mut self, other: &'a $type) { + let result = core::ops::$trait::$fn(&*self, other); + *self = result + } + } + + impl<'a, $($params)+> core::ops::$trait<$native> for &'a $type + where + + $($bounds)* + { + type Output = $type; + + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces, clippy::redundant_closure_call)] + fn $fn(self, other: $native) -> Self::Output { + ($constant_impl)(self, other) + } + } + + impl<$($params)+> core::ops::$trait<$native> for $type + where + + $($bounds)* + { + type Output = $type; + + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces)] + fn $fn(self, other: $native) -> Self::Output { + core::ops::$trait::$fn(&self, other) + } + } + + impl<$($params)+> core::ops::$assign_trait<$native> for $type + where + + $($bounds)* + { + + #[tracing::instrument(target = "r1cs", skip(self))] + #[allow(unused_braces)] + fn $assign_fn(&mut self, other: $native) { + let result = core::ops::$trait::$fn(&*self, other); + *self = result + } + } + } +} diff --git a/arkworks/r1cs-std/src/pairing/bls12/mod.rs b/arkworks/r1cs-std/src/pairing/bls12/mod.rs new file mode 100644 index 00000000..33454c41 --- /dev/null +++ b/arkworks/r1cs-std/src/pairing/bls12/mod.rs @@ -0,0 +1,167 @@ +use ark_relations::r1cs::SynthesisError; + +use super::PairingVar as PG; + +use crate::{ + fields::{fp::FpVar, fp12::Fp12Var, fp2::Fp2Var, FieldVar}, + groups::bls12::{G1AffineVar, G1PreparedVar, G1Var, G2PreparedVar, G2Var}, +}; +use ark_ec::bls12::{Bls12, Bls12Parameters, TwistType}; +use ark_ff::fields::BitIteratorBE; +use core::marker::PhantomData; + +/// Specifies the constraints for computing a pairing in a BLS12 bilinear group. +pub struct PairingVar(PhantomData

); + +type Fp2V

= Fp2Var<

::Fp2Params>; + +impl PairingVar

{ + // Evaluate the line function at point p. + #[tracing::instrument(target = "r1cs")] + fn ell( + f: &mut Fp12Var, + coeffs: &(Fp2V

, Fp2V

), + p: &G1AffineVar

, + ) -> Result<(), SynthesisError> { + let zero = FpVar::::zero(); + + match P::TWIST_TYPE { + TwistType::M => { + let c0 = coeffs.0.clone(); + let mut c1 = coeffs.1.clone(); + let c2 = Fp2V::

::new(p.y.clone(), zero); + + c1.c0 *= &p.x; + c1.c1 *= &p.x; + *f = f.mul_by_014(&c0, &c1, &c2)?; + Ok(()) + } + TwistType::D => { + let c0 = Fp2V::

::new(p.y.clone(), zero); + let mut c1 = coeffs.0.clone(); + let c2 = coeffs.1.clone(); + + c1.c0 *= &p.x; + c1.c1 *= &p.x; + *f = f.mul_by_034(&c0, &c1, &c2)?; + Ok(()) + } + } + } + + #[tracing::instrument(target = "r1cs")] + fn exp_by_x(f: &Fp12Var) -> Result, SynthesisError> { + let mut result = f.optimized_cyclotomic_exp(P::X)?; + if P::X_IS_NEGATIVE { + result = result.unitary_inverse()?; + } + Ok(result) + } +} + +impl PG, P::Fp> for PairingVar

{ + type G1Var = G1Var

; + type G2Var = G2Var

; + type G1PreparedVar = G1PreparedVar

; + type G2PreparedVar = G2PreparedVar

; + type GTVar = Fp12Var; + + #[tracing::instrument(target = "r1cs")] + fn miller_loop( + ps: &[Self::G1PreparedVar], + qs: &[Self::G2PreparedVar], + ) -> Result { + let mut pairs = vec![]; + for (p, q) in ps.iter().zip(qs.iter()) { + pairs.push((p, q.ell_coeffs.iter())); + } + let mut f = Self::GTVar::one(); + + for i in BitIteratorBE::new(P::X).skip(1) { + f.square_in_place()?; + + for &mut (p, ref mut coeffs) in pairs.iter_mut() { + Self::ell(&mut f, coeffs.next().unwrap(), &p.0)?; + } + + if i { + for &mut (p, ref mut coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0)?; + } + } + } + + if P::X_IS_NEGATIVE { + f = f.unitary_inverse()?; + } + + Ok(f) + } + + #[tracing::instrument(target = "r1cs")] + fn final_exponentiation(f: &Self::GTVar) -> Result { + // Computing the final exponentation following + // https://eprint.iacr.org/2016/130.pdf. + // We don't use their "faster" formula because it is difficult to make + // it work for curves with odd `P::X`. + // Hence we implement the slower algorithm from Table 1 below. + + let f1 = f.unitary_inverse()?; + + f.inverse().and_then(|mut f2| { + // f2 = f^(-1); + // r = f^(p^6 - 1) + let mut r = f1; + r *= &f2; + + // f2 = f^(p^6 - 1) + f2 = r.clone(); + // r = f^((p^6 - 1)(p^2)) + r.frobenius_map_in_place(2)?; + + // r = f^((p^6 - 1)(p^2) + (p^6 - 1)) + // r = f^((p^6 - 1)(p^2 + 1)) + r *= &f2; + + // Hard part of the final exponentation is below: + // From https://eprint.iacr.org/2016/130.pdf, Table 1 + let mut y0 = r.cyclotomic_square()?; + y0 = y0.unitary_inverse()?; + + let mut y5 = Self::exp_by_x(&r)?; + + let mut y1 = y5.cyclotomic_square()?; + let mut y3 = y0 * &y5; + y0 = Self::exp_by_x(&y3)?; + let y2 = Self::exp_by_x(&y0)?; + let mut y4 = Self::exp_by_x(&y2)?; + y4 *= &y1; + y1 = Self::exp_by_x(&y4)?; + y3 = y3.unitary_inverse()?; + y1 *= &y3; + y1 *= &r; + y3 = r.clone(); + y3 = y3.unitary_inverse()?; + y0 *= &r; + y0.frobenius_map_in_place(3)?; + y4 *= &y3; + y4.frobenius_map_in_place(1)?; + y5 *= &y2; + y5.frobenius_map_in_place(2)?; + y5 *= &y0; + y5 *= &y4; + y5 *= &y1; + Ok(y5) + }) + } + + #[tracing::instrument(target = "r1cs")] + fn prepare_g1(p: &Self::G1Var) -> Result { + Self::G1PreparedVar::from_group_var(p) + } + + #[tracing::instrument(target = "r1cs")] + fn prepare_g2(q: &Self::G2Var) -> Result { + Self::G2PreparedVar::from_group_var(q) + } +} diff --git a/arkworks/r1cs-std/src/pairing/mnt4/mod.rs b/arkworks/r1cs-std/src/pairing/mnt4/mod.rs new file mode 100644 index 00000000..0ddbec59 --- /dev/null +++ b/arkworks/r1cs-std/src/pairing/mnt4/mod.rs @@ -0,0 +1,223 @@ +use ark_relations::r1cs::SynthesisError; + +use super::PairingVar as PG; + +use crate::{ + fields::{fp::FpVar, fp2::Fp2Var, fp4::Fp4Var, FieldVar}, + groups::mnt4::{ + AteAdditionCoefficientsVar, AteDoubleCoefficientsVar, G1PreparedVar, G1Var, G2PreparedVar, + G2ProjectiveExtendedVar, G2Var, + }, +}; +use ark_ec::mnt4::{MNT4Parameters, MNT4}; +use ark_ff::BitIteratorBE; + +use core::marker::PhantomData; + +/// Specifies the constraints for computing a pairing in a MNT4 bilinear group. +pub struct PairingVar(PhantomData

); + +type Fp2G

= Fp2Var<

::Fp2Params>; +type Fp4G

= Fp4Var<

::Fp4Params>; +/// A variable corresponding to `ark_ec::mnt4::GT`. +pub type GTVar

= Fp4G

; + +impl PairingVar

{ + #[tracing::instrument(target = "r1cs", skip(r))] + pub(crate) fn doubling_step_for_flipped_miller_loop( + r: &G2ProjectiveExtendedVar

, + ) -> Result<(G2ProjectiveExtendedVar

, AteDoubleCoefficientsVar

), SynthesisError> { + let a = r.t.square()?; + let b = r.x.square()?; + let c = r.y.square()?; + let d = c.square()?; + let e = (&r.x + &c).square()? - &b - &d; + let f = (b.double()? + &b) + &a * P::TWIST_COEFF_A; + let g = f.square()?; + + let d_eight = d.double()?.double()?.double()?; + + let e2 = e.double()?; + let x = &g - &e2.double()?; + + let y = &f * (&e2 - &x) - &d_eight; + let z = (&r.y + &r.z).square()? - &c - &r.z.square()?; + let t = z.square()?; + + let r2 = G2ProjectiveExtendedVar { x, y, z, t }; + let c_h = (&r2.z + &r.t).square()? - &r2.t - &a; + let c_4c = c.double()?.double()?; + let c_j = (&f + &r.t).square()? - &g - &a; + let c_l = (&f + &r.x).square()? - &g - &b; + let coeff = AteDoubleCoefficientsVar { + c_h, + c_4c, + c_j, + c_l, + }; + + Ok((r2, coeff)) + } + + #[tracing::instrument(target = "r1cs", skip(r))] + pub(crate) fn mixed_addition_step_for_flipped_miller_loop( + x: &Fp2G

, + y: &Fp2G

, + r: &G2ProjectiveExtendedVar

, + ) -> Result<(G2ProjectiveExtendedVar

, AteAdditionCoefficientsVar

), SynthesisError> { + let a = y.square()?; + let b = &r.t * x; + let d = ((&r.z + y).square()? - &a - &r.t) * &r.t; + let h = &b - &r.x; + let i = h.square()?; + let e = i.double()?.double()?; + let j = &h * &e; + let v = &r.x * &e; + let ry2 = r.y.double()?; + let l1 = &d - &ry2; + + let x = l1.square()? - &j - &v.double()?; + let y = &l1 * &(&v - &x) - j * &ry2; + let z = (&r.z + &h).square()? - &r.t - &i; + let t = z.square()?; + + let r2 = G2ProjectiveExtendedVar { + x, + y, + z: z.clone(), + t, + }; + let coeff = AteAdditionCoefficientsVar { c_l1: l1, c_rz: z }; + + Ok((r2, coeff)) + } + + #[tracing::instrument(target = "r1cs", skip(p, q))] + pub(crate) fn ate_miller_loop( + p: &G1PreparedVar

, + q: &G2PreparedVar

, + ) -> Result, SynthesisError> { + let l1_coeff = Fp2G::

::new(p.x.clone(), FpVar::::zero()) - &q.x_over_twist; + + let mut f = Fp4G::

::one(); + + let mut add_idx: usize = 0; + + // code below gets executed for all bits (EXCEPT the MSB itself) of + // mnt6_param_p (skipping leading zeros) in MSB to LSB order + for (dbl_idx, bit) in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT) + .skip(1) + .enumerate() + { + let dc = &q.double_coefficients[dbl_idx]; + + let g_rr_at_p = Fp4G::

::new( + &dc.c_l - &dc.c_4c - &dc.c_j * &p.x_twist, + &dc.c_h * &p.y_twist, + ); + + f = f.square()? * &g_rr_at_p; + + if bit { + let ac = &q.addition_coefficients[add_idx]; + add_idx += 1; + + let g_rq_at_p = Fp4G::

::new( + &ac.c_rz * &p.y_twist, + (&q.y_over_twist * &ac.c_rz + &l1_coeff * &ac.c_l1).negate()?, + ); + f *= &g_rq_at_p; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let ac = &q.addition_coefficients[add_idx]; + + let g_rnegr_at_p = Fp4G::

::new( + &ac.c_rz * &p.y_twist, + (&q.y_over_twist * &ac.c_rz + &l1_coeff * &ac.c_l1).negate()?, + ); + f = (&f * &g_rnegr_at_p).inverse()?; + } + + Ok(f) + } + + #[tracing::instrument(target = "r1cs", skip(value))] + pub(crate) fn final_exponentiation(value: &Fp4G

) -> Result, SynthesisError> { + let value_inv = value.inverse()?; + let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv)?; + let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, value)?; + Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk) + } + + #[tracing::instrument(target = "r1cs", skip(elt, elt_inv))] + fn final_exponentiation_first_chunk( + elt: &Fp4G

, + elt_inv: &Fp4G

, + ) -> Result, SynthesisError> { + // (q^2-1) + + // elt_q2 = elt^(q^2) + let elt_q2 = elt.unitary_inverse()?; + // elt_q2_over_elt = elt^(q^2-1) + Ok(elt_q2 * elt_inv) + } + + #[tracing::instrument(target = "r1cs", skip(elt, elt_inv))] + fn final_exponentiation_last_chunk( + elt: &Fp4G

, + elt_inv: &Fp4G

, + ) -> Result, SynthesisError> { + let elt_clone = elt.clone(); + let elt_inv_clone = elt_inv.clone(); + + let mut elt_q = elt.clone(); + elt_q.frobenius_map_in_place(1)?; + + let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1)?; + let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG { + elt_inv_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)? + } else { + elt_clone.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)? + }; + + Ok(w1_part * &w0_part) + } +} + +impl PG, P::Fp> for PairingVar

{ + type G1Var = G1Var

; + type G2Var = G2Var

; + type G1PreparedVar = G1PreparedVar

; + type G2PreparedVar = G2PreparedVar

; + type GTVar = GTVar

; + + #[tracing::instrument(target = "r1cs")] + fn miller_loop( + ps: &[Self::G1PreparedVar], + qs: &[Self::G2PreparedVar], + ) -> Result { + let mut result = Fp4G::

::one(); + for (p, q) in ps.iter().zip(qs) { + result *= Self::ate_miller_loop(p, q)?; + } + + Ok(result) + } + + #[tracing::instrument(target = "r1cs")] + fn final_exponentiation(r: &Self::GTVar) -> Result { + Self::final_exponentiation(r) + } + + #[tracing::instrument(target = "r1cs")] + fn prepare_g1(p: &Self::G1Var) -> Result { + Self::G1PreparedVar::from_group_var(p) + } + + #[tracing::instrument(target = "r1cs")] + fn prepare_g2(q: &Self::G2Var) -> Result { + Self::G2PreparedVar::from_group_var(q) + } +} diff --git a/arkworks/r1cs-std/src/pairing/mnt6/mod.rs b/arkworks/r1cs-std/src/pairing/mnt6/mod.rs new file mode 100644 index 00000000..bb849b29 --- /dev/null +++ b/arkworks/r1cs-std/src/pairing/mnt6/mod.rs @@ -0,0 +1,218 @@ +use ark_relations::r1cs::SynthesisError; + +use super::PairingVar as PG; + +use crate::{ + fields::{fp::FpVar, fp3::Fp3Var, fp6_2over3::Fp6Var, FieldVar}, + groups::mnt6::{ + AteAdditionCoefficientsVar, AteDoubleCoefficientsVar, G1PreparedVar, G1Var, G2PreparedVar, + G2ProjectiveExtendedVar, G2Var, + }, +}; +use ark_ec::mnt6::{MNT6Parameters, MNT6}; +use ark_ff::fields::BitIteratorBE; +use core::marker::PhantomData; + +/// Specifies the constraints for computing a pairing in a MNT6 bilinear group. +pub struct PairingVar(PhantomData

); + +type Fp3G

= Fp3Var<

::Fp3Params>; +type Fp6G

= Fp6Var<

::Fp6Params>; +/// A variable corresponding to `ark_ec::mnt6::GT`. +pub type GTVar

= Fp6G

; + +impl PairingVar

{ + #[tracing::instrument(target = "r1cs", skip(r))] + pub(crate) fn doubling_step_for_flipped_miller_loop( + r: &G2ProjectiveExtendedVar

, + ) -> Result<(G2ProjectiveExtendedVar

, AteDoubleCoefficientsVar

), SynthesisError> { + let a = r.t.square()?; + let b = r.x.square()?; + let c = r.y.square()?; + let d = c.square()?; + let e = (&r.x + &c).square()? - &b - &d; + let f = b.double()? + &b + &(&a * P::TWIST_COEFF_A); + let g = f.square()?; + + let d_eight = d.double()?.double()?.double()?; + + let e2 = e.double()?; + let x = &g - e2.double()?; + let y = &f * (e2 - &x) - d_eight; + let z = (&r.y + &r.z).square()? - &c - &r.z.square()?; + let t = z.square()?; + + let r2 = G2ProjectiveExtendedVar { x, y, z, t }; + let coeff = AteDoubleCoefficientsVar { + c_h: (&r2.z + &r.t).square()? - &r2.t - &a, + c_4c: c.double()?.double()?, + c_j: (&f + &r.t).square()? - &g - &a, + c_l: (&f + &r.x).square()? - &g - &b, + }; + + Ok((r2, coeff)) + } + + #[tracing::instrument(target = "r1cs", skip(r))] + pub(crate) fn mixed_addition_step_for_flipped_miller_loop( + x: &Fp3G

, + y: &Fp3G

, + r: &G2ProjectiveExtendedVar

, + ) -> Result<(G2ProjectiveExtendedVar

, AteAdditionCoefficientsVar

), SynthesisError> { + let a = y.square()?; + let b = &r.t * x; + let d = ((&r.z + y).square()? - &a - &r.t) * &r.t; + let h = &b - &r.x; + let i = h.square()?; + let e = i.double()?.double()?; + let j = &h * &e; + let v = &r.x * &e; + let ry2 = r.y.double()?; + let l1 = &d - &ry2; + + let x = l1.square()? - &j - &v.double()?; + let y = &l1 * &(&v - &x) - &j * ry2; + let z = (&r.z + &h).square()? - &r.t - &i; + let t = z.square()?; + + let r2 = G2ProjectiveExtendedVar { + x, + y, + z: z.clone(), + t, + }; + let coeff = AteAdditionCoefficientsVar { c_l1: l1, c_rz: z }; + + Ok((r2, coeff)) + } + + #[tracing::instrument(target = "r1cs", skip(p, q))] + pub(crate) fn ate_miller_loop( + p: &G1PreparedVar

, + q: &G2PreparedVar

, + ) -> Result, SynthesisError> { + let zero = FpVar::::zero(); + let l1_coeff = Fp3Var::new(p.x.clone(), zero.clone(), zero) - &q.x_over_twist; + + let mut f = Fp6G::

::one(); + + let mut add_idx: usize = 0; + + // code below gets executed for all bits (EXCEPT the MSB itself) of + // mnt6_param_p (skipping leading zeros) in MSB to LSB order + for (dbl_idx, bit) in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT) + .skip(1) + .enumerate() + { + let dc = &q.double_coefficients[dbl_idx]; + + let g_rr_at_p = Fp6Var::new( + &dc.c_l - &dc.c_4c - &dc.c_j * &p.x_twist, + &dc.c_h * &p.y_twist, + ); + + f = f.square()? * &g_rr_at_p; + + if bit { + let ac = &q.addition_coefficients[add_idx]; + add_idx += 1; + + let g_rq_at_p = Fp6Var::new( + &ac.c_rz * &p.y_twist, + (&q.y_over_twist * &ac.c_rz + &(&l1_coeff * &ac.c_l1)).negate()?, + ); + f *= &g_rq_at_p; + } + } + + if P::ATE_IS_LOOP_COUNT_NEG { + let ac = &q.addition_coefficients[add_idx]; + + let g_rnegr_at_p = Fp6Var::new( + &ac.c_rz * &p.y_twist, + (&q.y_over_twist * &ac.c_rz + &(l1_coeff * &ac.c_l1)).negate()?, + ); + f = (f * &g_rnegr_at_p).inverse()?; + } + + Ok(f) + } + + #[tracing::instrument(target = "r1cs")] + pub(crate) fn final_exponentiation(value: &Fp6G

) -> Result, SynthesisError> { + let value_inv = value.inverse()?; + let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv)?; + let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, value)?; + Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk) + } + + #[tracing::instrument(target = "r1cs", skip(elt, elt_inv))] + fn final_exponentiation_first_chunk( + elt: &Fp6G

, + elt_inv: &Fp6G

, + ) -> Result, SynthesisError> { + // (q^3-1)*(q+1) + + // elt_q3 = elt^(q^3) + let elt_q3 = elt.unitary_inverse()?; + // elt_q3_over_elt = elt^(q^3-1) + let elt_q3_over_elt = elt_q3 * elt_inv; + // alpha = elt^((q^3-1) * q) + let alpha = elt_q3_over_elt.frobenius_map(1)?; + // beta = elt^((q^3-1)*(q+1) + Ok(alpha * &elt_q3_over_elt) + } + + #[tracing::instrument(target = "r1cs", skip(elt, elt_inv))] + fn final_exponentiation_last_chunk( + elt: &Fp6G

, + elt_inv: &Fp6G

, + ) -> Result, SynthesisError> { + let elt_q = elt.frobenius_map(1)?; + + let w1_part = elt_q.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_1)?; + let w0_part = if P::FINAL_EXPONENT_LAST_CHUNK_W0_IS_NEG { + elt_inv.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)? + } else { + elt.cyclotomic_exp(&P::FINAL_EXPONENT_LAST_CHUNK_ABS_OF_W0)? + }; + + Ok(w1_part * &w0_part) + } +} + +impl PG, P::Fp> for PairingVar

{ + type G1Var = G1Var

; + type G2Var = G2Var

; + type G1PreparedVar = G1PreparedVar

; + type G2PreparedVar = G2PreparedVar

; + type GTVar = GTVar

; + + #[tracing::instrument(target = "r1cs")] + fn miller_loop( + ps: &[Self::G1PreparedVar], + qs: &[Self::G2PreparedVar], + ) -> Result { + let mut result = Fp6G::

::one(); + for (p, q) in ps.iter().zip(qs) { + result *= Self::ate_miller_loop(p, q)?; + } + + Ok(result) + } + + #[tracing::instrument(target = "r1cs")] + fn final_exponentiation(r: &Self::GTVar) -> Result { + Self::final_exponentiation(r) + } + + #[tracing::instrument(target = "r1cs")] + fn prepare_g1(p: &Self::G1Var) -> Result { + Self::G1PreparedVar::from_group_var(p) + } + + #[tracing::instrument(target = "r1cs")] + fn prepare_g2(q: &Self::G2Var) -> Result { + Self::G2PreparedVar::from_group_var(q) + } +} diff --git a/arkworks/r1cs-std/src/pairing/mod.rs b/arkworks/r1cs-std/src/pairing/mod.rs new file mode 100644 index 00000000..157f26d8 --- /dev/null +++ b/arkworks/r1cs-std/src/pairing/mod.rs @@ -0,0 +1,84 @@ +use crate::prelude::*; +use ark_ec::PairingEngine; +use ark_ff::Field; +use ark_relations::r1cs::SynthesisError; +use core::fmt::Debug; + +/// This module implements pairings for BLS12 bilinear groups. +pub mod bls12; +/// This module implements pairings for MNT4 bilinear groups. +pub mod mnt4; +/// This module implements pairings for MNT6 bilinear groups. +pub mod mnt6; + +/// Specifies the constraints for computing a pairing in the yybilinear group +/// `E`. +pub trait PairingVar::Fq> { + /// An variable representing an element of `G1`. + /// This is the R1CS equivalent of `E::G1Projective`. + type G1Var: CurveVar + + AllocVar + + AllocVar; + + /// An variable representing an element of `G2`. + /// This is the R1CS equivalent of `E::G2Projective`. + type G2Var: CurveVar + + AllocVar + + AllocVar; + + /// An variable representing an element of `GT`. + /// This is the R1CS equivalent of `E::GT`. + type GTVar: FieldVar; + + /// An variable representing cached precomputation that can speed up + /// pairings computations. This is the R1CS equivalent of + /// `E::G1Prepared`. + type G1PreparedVar: ToBytesGadget + + AllocVar + + Clone + + Debug; + /// An variable representing cached precomputation that can speed up + /// pairings computations. This is the R1CS equivalent of + /// `E::G2Prepared`. + type G2PreparedVar: ToBytesGadget + + AllocVar + + Clone + + Debug; + + /// Computes a multi-miller loop between elements + /// of `p` and `q`. + fn miller_loop( + p: &[Self::G1PreparedVar], + q: &[Self::G2PreparedVar], + ) -> Result; + + /// Computes a final exponentiation over `p`. + fn final_exponentiation(p: &Self::GTVar) -> Result; + + /// Computes a pairing over `p` and `q`. + #[tracing::instrument(target = "r1cs")] + fn pairing( + p: Self::G1PreparedVar, + q: Self::G2PreparedVar, + ) -> Result { + let tmp = Self::miller_loop(&[p], &[q])?; + Self::final_exponentiation(&tmp) + } + + /// Computes a product of pairings over the elements in `p` and `q`. + #[must_use] + #[tracing::instrument(target = "r1cs")] + fn product_of_pairings( + p: &[Self::G1PreparedVar], + q: &[Self::G2PreparedVar], + ) -> Result { + let miller_result = Self::miller_loop(p, q)?; + Self::final_exponentiation(&miller_result) + } + + /// Performs the precomputation to generate `Self::G1PreparedVar`. + fn prepare_g1(q: &Self::G1Var) -> Result; + + /// Performs the precomputation to generate `Self::G2PreparedVar`. + fn prepare_g2(q: &Self::G2Var) -> Result; +} diff --git a/arkworks/r1cs-std/src/poly/domain/mod.rs b/arkworks/r1cs-std/src/poly/domain/mod.rs new file mode 100644 index 00000000..4959682a --- /dev/null +++ b/arkworks/r1cs-std/src/poly/domain/mod.rs @@ -0,0 +1,93 @@ +use crate::boolean::Boolean; +use crate::eq::EqGadget; +use crate::fields::fp::FpVar; +use crate::fields::FieldVar; +use ark_ff::PrimeField; +use ark_relations::r1cs::SynthesisError; +use ark_std::vec::Vec; + +pub mod vanishing_poly; + +#[derive(Clone, Debug)] +/// Defines an evaluation domain over a prime field. The domain is a coset of size `1< { + /// generator of subgroup g + pub gen: F, + /// index of the quotient group (i.e. the `offset`) + pub offset: FpVar, + /// dimension of evaluation domain + pub dim: u64, +} + +impl EqGadget for Radix2DomainVar { + fn is_eq(&self, other: &Self) -> Result, SynthesisError> { + if self.gen != other.gen || self.dim != other.dim { + Ok(Boolean::constant(false)) + } else { + self.offset.is_eq(&other.offset) + } + } +} + +impl Radix2DomainVar { + /// order of the domain + pub fn order(&self) -> usize { + 1 << self.dim + } + + /// Returns g, g^2, ..., g^{dim} + fn powers_of_gen(&self, dim: usize) -> Vec { + let mut result = Vec::new(); + let mut cur = self.gen; + for _ in 0..dim { + result.push(cur); + cur = cur * cur; + } + result + } + + /// Size of the domain + pub fn size(&self) -> u64 { + 1 << self.dim + } + + /// For domain `h` with dimension `n`, `position` represented by `query_pos` in big endian form, + /// returns `h*g^{position}` + pub fn query_position_to_coset( + &self, + query_pos: &[Boolean], + coset_dim: u64, + ) -> Result>, SynthesisError> { + let mut coset_index = query_pos; + assert!( + query_pos.len() == self.dim as usize + || query_pos.len() == (self.dim - coset_dim) as usize + ); + if query_pos.len() == self.dim as usize { + coset_index = &coset_index[0..(coset_index.len() - coset_dim as usize)]; + } + let mut coset = Vec::new(); + let powers_of_g = &self.powers_of_gen(self.dim as usize)[(coset_dim as usize)..]; + + let mut first_point_in_coset: FpVar = FpVar::zero(); + for i in 0..coset_index.len() { + let term = coset_index[i].select(&FpVar::constant(powers_of_g[i]), &FpVar::zero())?; + first_point_in_coset += &term; + } + + first_point_in_coset *= &self.offset; + + coset.push(first_point_in_coset); + for i in 1..(1 << (coset_dim as usize)) { + let new_elem = &coset[i - 1] * &FpVar::Constant(self.gen); + coset.push(new_elem); + } + + Ok(coset) + } +} diff --git a/arkworks/r1cs-std/src/poly/domain/vanishing_poly.rs b/arkworks/r1cs-std/src/poly/domain/vanishing_poly.rs new file mode 100644 index 00000000..8adce831 --- /dev/null +++ b/arkworks/r1cs-std/src/poly/domain/vanishing_poly.rs @@ -0,0 +1,79 @@ +use crate::fields::fp::FpVar; +use crate::fields::FieldVar; +use ark_ff::{Field, PrimeField}; +use ark_relations::r1cs::SynthesisError; +use ark_std::ops::Sub; + +/// Struct describing vanishing polynomial for a multiplicative coset H where |H| is a power of 2. +/// As H is a coset, every element can be described as h*g^i and therefore +/// has vanishing polynomial Z_H(x) = x^|H| - h^|H| +#[derive(Clone)] +pub struct VanishingPolynomial { + /// h^|H| + pub constant_term: F, + /// log_2(|H|) + pub dim_h: u64, + /// |H| + pub order_h: u64, +} + +impl VanishingPolynomial { + /// returns a VanishingPolynomial of coset `H = h`. + pub fn new(offset: F, dim_h: u64) -> Self { + let order_h = 1 << dim_h; + let vp = VanishingPolynomial { + constant_term: offset.pow([order_h]), + dim_h, + order_h, + }; + vp + } + + /// Evaluates the vanishing polynomial without generating the constraints. + pub fn evaluate(&self, x: &F) -> F { + let mut result = x.pow([self.order_h]); + result -= &self.constant_term; + result + } + + /// Evaluates the constraints and just gives you the gadget for the result. + /// Caution for use in holographic lincheck: The output has 2 entries in one matrix + pub fn evaluate_constraints(&self, x: &FpVar) -> Result, SynthesisError> { + if self.dim_h == 1 { + let result = x.sub(x); + return Ok(result); + } + + let mut cur = x.square()?; + for _ in 1..self.dim_h { + cur.square_in_place()?; + } + cur -= &FpVar::Constant(self.constant_term); + Ok(cur) + } +} + +#[cfg(test)] +mod tests { + use crate::alloc::AllocVar; + use crate::fields::fp::FpVar; + use crate::poly::domain::vanishing_poly::VanishingPolynomial; + use crate::R1CSVar; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::{test_rng, UniformRand}; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn constraints_test() { + let mut rng = test_rng(); + let offset = Fr::rand(&mut rng); + let cs = ConstraintSystem::new_ref(); + let x = Fr::rand(&mut rng); + let x_var = FpVar::new_witness(ns!(cs, "x_var"), || Ok(x)).unwrap(); + let vp = VanishingPolynomial::new(offset, 12); + let native = vp.evaluate(&x); + let result_var = vp.evaluate_constraints(&x_var).unwrap(); + assert!(cs.is_satisfied().unwrap()); + assert_eq!(result_var.value().unwrap(), native); + } +} diff --git a/arkworks/r1cs-std/src/poly/evaluations/mod.rs b/arkworks/r1cs-std/src/poly/evaluations/mod.rs new file mode 100644 index 00000000..becd6c46 --- /dev/null +++ b/arkworks/r1cs-std/src/poly/evaluations/mod.rs @@ -0,0 +1 @@ +pub mod univariate; diff --git a/arkworks/r1cs-std/src/poly/evaluations/univariate/lagrange_interpolator.rs b/arkworks/r1cs-std/src/poly/evaluations/univariate/lagrange_interpolator.rs new file mode 100644 index 00000000..8acd68cf --- /dev/null +++ b/arkworks/r1cs-std/src/poly/evaluations/univariate/lagrange_interpolator.rs @@ -0,0 +1,146 @@ +use crate::poly::domain::vanishing_poly::VanishingPolynomial; +use ark_ff::{batch_inversion_and_mul, PrimeField}; +use ark_std::vec::Vec; +/// Struct describing Lagrange interpolation for a multiplicative coset I, +/// with |I| a power of 2. +/// TODO: Pull in lagrange poly explanation from libiop +#[derive(Clone)] +pub struct LagrangeInterpolator { + pub(crate) domain_order: usize, + pub(crate) all_domain_elems: Vec, + pub(crate) v_inv_elems: Vec, + pub(crate) domain_vp: VanishingPolynomial, + poly_evaluations: Vec, +} + +impl LagrangeInterpolator { + /// Returns a lagrange interpolator, given the domain specification. + pub fn new( + domain_offset: F, + domain_generator: F, + domain_dim: u64, + poly_evaluations: Vec, + ) -> Self { + let domain_order = 1 << domain_dim; + assert_eq!(poly_evaluations.len(), domain_order); + let mut cur_elem = domain_offset; + let mut all_domain_elems = vec![domain_offset]; + let mut v_inv_elems: Vec = Vec::new(); + // Cache all elements in the domain + for _ in 1..domain_order { + cur_elem *= domain_generator; + all_domain_elems.push(cur_elem); + } + /* + By computing the following elements as constants, + we can further reduce the interpolation costs. + + m = order of the interpolation domain + v_inv[i] = prod_{j != i} h(g^i - g^j) + We use the following facts to compute this: + v_inv[0] = m*h^{m-1} + v_inv[i] = g^{-1} * v_inv[i-1] + */ + // TODO: Include proof of the above two points + let g_inv = domain_generator.inverse().unwrap(); + let m = F::from((1 << domain_dim) as u128); + let mut v_inv_i = m * domain_offset.pow([(domain_order - 1) as u64]); + for _ in 0..domain_order { + v_inv_elems.push(v_inv_i); + v_inv_i *= g_inv; + } + + // TODO: Cache the intermediate terms with Z_H(x) evaluations. + let vp = VanishingPolynomial::new(domain_offset, domain_dim); + + let lagrange_interpolation: LagrangeInterpolator = LagrangeInterpolator { + domain_order, + all_domain_elems, + v_inv_elems, + domain_vp: vp, + poly_evaluations, + }; + lagrange_interpolation + } + + pub(crate) fn compute_lagrange_coefficients(&self, interpolation_point: F) -> Vec { + /* + * Let t be the interpolation point, H be the multiplicative coset, with elements of the form h*g^i. + Compute each L_{i,H}(t) as Z_{H}(t) * v_i / (t- h g^i) + where: + - Z_{H}(t) = \prod_{j} (t-h*g^j) = (t^m-h^m), and + - v_{i} = 1 / \prod_{j \neq i} h(g^i-g^j). + Below we use the fact that v_{0} = 1/(m * h^(m-1)) and v_{i+1} = g * v_{i}. + We first compute the inverse of each coefficient, except for the Z_H(t) term. + We then batch invert the entire result, and multiply by Z_H(t). + */ + let mut inverted_lagrange_coeffs: Vec = Vec::with_capacity(self.all_domain_elems.len()); + for i in 0..self.domain_order { + let l = self.v_inv_elems[i]; + let r = self.all_domain_elems[i]; + inverted_lagrange_coeffs.push(l * (interpolation_point - r)); + } + let vp_t = self.domain_vp.evaluate(&interpolation_point); + let lagrange_coeffs = inverted_lagrange_coeffs.as_mut_slice(); + batch_inversion_and_mul::(lagrange_coeffs, &vp_t); + lagrange_coeffs.iter().cloned().collect() + } + + pub fn interpolate(&self, interpolation_point: F) -> F { + let lagrange_coeffs = self.compute_lagrange_coefficients(interpolation_point); + let mut interpolation = F::zero(); + for i in 0..self.domain_order { + interpolation += lagrange_coeffs[i] * self.poly_evaluations[i]; + } + interpolation + } +} + +#[cfg(test)] +mod tests { + use crate::fields::fp::FpVar; + use crate::fields::FieldVar; + use crate::poly::domain::Radix2DomainVar; + use crate::poly::evaluations::univariate::lagrange_interpolator::LagrangeInterpolator; + use crate::R1CSVar; + use ark_ff::{FftField, Field, One}; + use ark_poly::univariate::DensePolynomial; + use ark_poly::{Polynomial, UVPolynomial}; + use ark_std::{test_rng, UniformRand}; + use ark_test_curves::bls12_381::Fr; + + #[test] + pub fn test_native_interpolate() { + let mut rng = test_rng(); + let poly = DensePolynomial::rand(15, &mut rng); + let gen = Fr::get_root_of_unity(1 << 4).unwrap(); + assert_eq!(gen.pow(&[1 << 4]), Fr::one()); + let domain = Radix2DomainVar { + gen, + offset: FpVar::constant(Fr::multiplicative_generator()), + dim: 4, // 2^4 = 16 + }; + // generate evaluations of `poly` on this domain + let mut coset_point = domain.offset.value().unwrap(); + let mut oracle_evals = Vec::new(); + for _ in 0..(1 << 4) { + oracle_evals.push(poly.evaluate(&coset_point)); + coset_point *= gen; + } + + let interpolator = LagrangeInterpolator::new( + domain.offset.value().unwrap(), + domain.gen, + domain.dim, + oracle_evals, + ); + + // the point to evaluate at + let interpolate_point = Fr::rand(&mut rng); + + let expected = poly.evaluate(&interpolate_point); + let actual = interpolator.interpolate(interpolate_point); + + assert_eq!(actual, expected) + } +} diff --git a/arkworks/r1cs-std/src/poly/evaluations/univariate/mod.rs b/arkworks/r1cs-std/src/poly/evaluations/univariate/mod.rs new file mode 100644 index 00000000..295adac8 --- /dev/null +++ b/arkworks/r1cs-std/src/poly/evaluations/univariate/mod.rs @@ -0,0 +1,464 @@ +pub mod lagrange_interpolator; + +use crate::alloc::AllocVar; +use crate::fields::fp::FpVar; +use crate::fields::FieldVar; +use crate::poly::domain::Radix2DomainVar; +use crate::poly::evaluations::univariate::lagrange_interpolator::LagrangeInterpolator; +use crate::R1CSVar; +use ark_ff::{batch_inversion, PrimeField}; +use ark_relations::r1cs::SynthesisError; +use ark_std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign}; +use ark_std::vec::Vec; + +#[derive(Clone)] +/// Stores a UV polynomial in evaluation form. +pub struct EvaluationsVar { + /// Evaluations of univariate polynomial over domain + pub evals: Vec>, + /// Optional Lagrange Interpolator. Useful for lagrange interpolation. + pub lagrange_interpolator: Option>, + domain: Radix2DomainVar, + /// Contains all domain elements of `domain.base_domain`. + /// + /// This is a cache for lagrange interpolation when offset is non-constant. Will be `None` if offset is constant + /// or `interpolate` is set to `false`. + subgroup_points: Option>, +} + +impl EvaluationsVar { + /// Construct `Self` from evaluations and a domain. + /// `interpolate` indicates if user wants to interpolate this polynomial + /// using lagrange interpolation. + pub fn from_vec_and_domain( + evaluations: Vec>, + domain: Radix2DomainVar, + interpolate: bool, + ) -> Self { + assert_eq!( + evaluations.len(), + 1 << domain.dim, + "evaluations and domain has different dimensions" + ); + + let mut ev = Self { + evals: evaluations, + lagrange_interpolator: None, + domain, + subgroup_points: None, + }; + if interpolate { + ev.generate_interpolation_cache(); + } + ev + } + + /// Precompute necessary calculation for lagrange interpolation and mark it ready to interpolate + pub fn generate_interpolation_cache(&mut self) { + if self.domain.offset.is_constant() { + let poly_evaluations_val: Vec<_> = + self.evals.iter().map(|v| v.value().unwrap()).collect(); + let domain = &self.domain; + let lagrange_interpolator = if let FpVar::Constant(x) = domain.offset { + LagrangeInterpolator::new(x, domain.gen, domain.dim, poly_evaluations_val) + } else { + panic!("Domain offset needs to be constant.") + }; + self.lagrange_interpolator = Some(lagrange_interpolator) + } else { + // calculate all elements of base subgroup so that in later part we don't need to calculate the exponents again + let mut subgroup_points = Vec::with_capacity(self.domain.size() as usize); + subgroup_points.push(F::one()); + for i in 1..self.domain.size() as usize { + subgroup_points.push(subgroup_points[i - 1] * self.domain.gen) + } + self.subgroup_points = Some(subgroup_points) + } + } + + /// Compute lagrange coefficients for each evaluation, given `interpolation_point`. + /// Only valid if the domain offset is constant. + fn compute_lagrange_coefficients( + &self, + interpolation_point: &FpVar, + ) -> Result>, SynthesisError> { + // ref: https://github.com/alexchmit/perfect-constraints/blob/79692f2652a95a57f2c7187f5b5276345e680230/fractal/src/algebra/lagrange_interpolation.rs#L159 + let cs = interpolation_point.cs(); + let t = interpolation_point; + let lagrange_interpolator = self + .lagrange_interpolator + .as_ref() + .expect("lagrange interpolator has not been initialized. \ + Call `self.generate_interpolation_cache` first or set `interpolate` to true in constructor. "); + let lagrange_coeffs = + lagrange_interpolator.compute_lagrange_coefficients(t.value().unwrap()); + let mut lagrange_coeffs_fg = Vec::new(); + // Now we convert these lagrange coefficients to gadgets, and then constrain them. + // The i-th lagrange coefficients constraint is: + // (v_inv[i] * t - v_inv[i] * domain_elem[i]) * (coeff) = 1/Z_I(t) + let vp_t = lagrange_interpolator.domain_vp.evaluate_constraints(t)?; + // let inv_vp_t = vp_t.inverse()?; + for i in 0..lagrange_interpolator.domain_order { + let constant: F = + (-lagrange_interpolator.all_domain_elems[i]) * lagrange_interpolator.v_inv_elems[i]; + let mut a_element: FpVar = + t * &FpVar::constant(lagrange_interpolator.v_inv_elems[i]); + a_element += FpVar::constant(constant); + + let lag_coeff: FpVar = + FpVar::new_witness(ns!(cs, "generate lagrange coefficient"), || { + Ok(lagrange_coeffs[i]) + })?; + // Enforce the actual constraint (A_element) * (lagrange_coeff) = 1/Z_I(t) + assert_eq!( + (lagrange_interpolator.v_inv_elems[i] * t.value().unwrap() + - lagrange_interpolator.v_inv_elems[i] + * lagrange_interpolator.all_domain_elems[i]) + * lagrange_coeffs[i], + vp_t.value().unwrap() + ); + a_element.mul_equals(&lag_coeff, &vp_t)?; + lagrange_coeffs_fg.push(lag_coeff); + } + Ok(lagrange_coeffs_fg) + } + + /// Returns constraints for Interpolating and evaluating at `interpolation_point` + pub fn interpolate_and_evaluate( + &self, + interpolation_point: &FpVar, + ) -> Result, SynthesisError> { + // specialize: if domain offset is constant, we can optimize to have fewer constraints + if self.domain.offset.is_constant() { + self.lagrange_interpolate_with_constant_offset(interpolation_point) + } else { + // if domain offset is not constant, then we use standard lagrange interpolation code + self.lagrange_interpolate_with_non_constant_offset(interpolation_point) + } + } + + fn lagrange_interpolate_with_constant_offset( + &self, + interpolation_point: &FpVar, + ) -> Result, SynthesisError> { + let lagrange_interpolator = self + .lagrange_interpolator + .as_ref() + .expect("lagrange interpolator has not been initialized. "); + let lagrange_coeffs = self.compute_lagrange_coefficients(interpolation_point)?; + let mut interpolation: FpVar = FpVar::zero(); + for i in 0..lagrange_interpolator.domain_order { + let intermediate = &lagrange_coeffs[i] * &self.evals[i]; + interpolation += &intermediate + } + + Ok(interpolation) + } + + /// Generate interpolation constraints. We assume at compile time we know the base coset (i.e. `gen`) but not know `offset`. + fn lagrange_interpolate_with_non_constant_offset( + &self, + interpolation_point: &FpVar, + ) -> Result, SynthesisError> { + // first, make sure `subgroup_points` is made + let subgroup_points = self.subgroup_points.as_ref() + .expect("lagrange interpolator has not been initialized. \ + Call `self.generate_interpolation_cache` first or set `interpolate` to true in constructor. "); + // Let denote interpolation_point as alpha. + // Lagrange polynomial for coset element `a` is + // \frac{1}{size * offset ^ size} * \frac{alpha^size - offset^size}{alpha * a^{-1} - 1} + // Notice that a = (offset * a') where a' is the corresponding element of base coset + + // let `lhs` become \frac{alpha^size - offset^size}{size * offset ^ size}. This part is shared by all lagrange polynomials + let coset_offset_to_size = self.domain.offset.pow_by_constant(&[self.domain.size()])?; // offset^size + let alpha_to_s = interpolation_point.pow_by_constant(&[self.domain.size()])?; + let lhs_numerator = &alpha_to_s - &coset_offset_to_size; + let lhs_denominator = &coset_offset_to_size * FpVar::constant(F::from(self.domain.size())); + + let lhs = lhs_numerator.mul_by_inverse(&lhs_denominator)?; + + // `rhs` for coset element `a` is \frac{1}{alpha * a^{-1} - 1} = \frac{1}{alpha * offset^{-1} * a'^{-1} - 1} + let alpha_coset_offset_inv = interpolation_point.mul_by_inverse(&self.domain.offset)?; + + // `res` stores the sum of all lagrange polynomials evaluated at alpha + let mut res = FpVar::::zero(); + + let domain_size = self.domain.size() as usize; + for i in 0..domain_size { + // a'^{-1} where a is the base coset element + let subgroup_point_inv = subgroup_points[(domain_size - i) % domain_size]; + debug_assert_eq!(subgroup_points[i] * subgroup_point_inv, F::one()); + // alpha * offset^{-1} * a'^{-1} - 1 + let lag_donom = &alpha_coset_offset_inv * subgroup_point_inv - F::one(); + let lag_coeff = lhs.mul_by_inverse(&lag_donom)?; + + let lag_interpoland = &self.evals[i] * lag_coeff; + res += lag_interpoland + } + + Ok(res) + } +} + +impl<'a, 'b, F: PrimeField> Add<&'a EvaluationsVar> for &'b EvaluationsVar { + type Output = EvaluationsVar; + + fn add(self, rhs: &'a EvaluationsVar) -> Self::Output { + let mut result = self.clone(); + result += rhs; + result + } +} + +impl<'a, F: PrimeField> AddAssign<&'a EvaluationsVar> for EvaluationsVar { + /// Performs the `+=` operations, assuming `domain.offset` is equal. + fn add_assign(&mut self, other: &'a EvaluationsVar) { + // offset might be unknown at compile time, so we assume offset is equal + assert!( + self.domain.gen == other.domain.gen && self.domain.dim == other.domain.dim, + "domains are unequal" + ); + + self.lagrange_interpolator = None; + self.evals + .iter_mut() + .zip(&other.evals) + .for_each(|(a, b)| *a = &*a + b) + } +} + +impl<'a, 'b, F: PrimeField> Sub<&'a EvaluationsVar> for &'b EvaluationsVar { + type Output = EvaluationsVar; + + fn sub(self, rhs: &'a EvaluationsVar) -> Self::Output { + let mut result = self.clone(); + result -= rhs; + result + } +} + +impl<'a, F: PrimeField> SubAssign<&'a EvaluationsVar> for EvaluationsVar { + /// Performs the `-=` operations, assuming `domain.offset` is equal. + fn sub_assign(&mut self, other: &'a EvaluationsVar) { + // offset might be unknown at compile time, so we assume offset is equal + assert!( + self.domain.gen == other.domain.gen && self.domain.dim == other.domain.dim, + "domains are unequal" + ); + + self.lagrange_interpolator = None; + self.evals + .iter_mut() + .zip(&other.evals) + .for_each(|(a, b)| *a = &*a - b) + } +} + +impl<'a, 'b, F: PrimeField> Mul<&'a EvaluationsVar> for &'b EvaluationsVar { + type Output = EvaluationsVar; + + /// Performs the `*` operations, assuming `domain.offset` is equal. + fn mul(self, rhs: &'a EvaluationsVar) -> Self::Output { + let mut result = self.clone(); + result *= rhs; + result + } +} + +impl<'a, F: PrimeField> MulAssign<&'a EvaluationsVar> for EvaluationsVar { + /// Performs the `*=` operations, assuming `domain.offset` is equal. + fn mul_assign(&mut self, other: &'a EvaluationsVar) { + // offset might be unknown at compile time, so we assume offset is equal + assert!( + self.domain.gen == other.domain.gen && self.domain.dim == other.domain.dim, + "domains are unequal" + ); + + self.lagrange_interpolator = None; + self.evals + .iter_mut() + .zip(&other.evals) + .for_each(|(a, b)| *a = &*a * b) + } +} + +impl<'a, 'b, F: PrimeField> Div<&'a EvaluationsVar> for &'b EvaluationsVar { + type Output = EvaluationsVar; + + fn div(self, rhs: &'a EvaluationsVar) -> Self::Output { + let mut result = self.clone(); + result /= rhs; + result + } +} + +impl<'a, F: PrimeField> DivAssign<&'a EvaluationsVar> for EvaluationsVar { + /// Performs the `/=` operations, assuming `domain.offset` is equal. + fn div_assign(&mut self, other: &'a EvaluationsVar) { + // offset might be unknown at compile time, so we assume offset is equal + assert!( + self.domain.gen == other.domain.gen && self.domain.dim == other.domain.dim, + "domains are unequal" + ); + let cs = self.evals[0].cs(); + // the prover can generate result = (1 / other) * self offline + let mut result_val: Vec<_> = other.evals.iter().map(|x| x.value().unwrap()).collect(); + batch_inversion(&mut result_val); + result_val + .iter_mut() + .zip(&self.evals) + .for_each(|(a, self_var)| *a *= self_var.value().unwrap()); + let result_var: Vec<_> = result_val + .iter() + .map(|x| FpVar::new_witness(ns!(cs, "div result"), || Ok(*x)).unwrap()) + .collect(); + // enforce constraint + for i in 0..result_var.len() { + result_var[i] + .mul_equals(&other.evals[i], &self.evals[i]) + .unwrap(); + } + + self.lagrange_interpolator = None; + self.evals = result_var + } +} + +#[cfg(test)] +mod tests { + use crate::alloc::AllocVar; + use crate::fields::fp::FpVar; + use crate::fields::FieldVar; + use crate::poly::domain::Radix2DomainVar; + use crate::poly::evaluations::univariate::EvaluationsVar; + use crate::R1CSVar; + use ark_ff::{FftField, Field, One, UniformRand}; + use ark_poly::polynomial::univariate::DensePolynomial; + use ark_poly::{Polynomial, UVPolynomial}; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::test_rng; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn test_interpolate_constant_offset() { + let mut rng = test_rng(); + let poly = DensePolynomial::rand(15, &mut rng); + let gen = Fr::get_root_of_unity(1 << 4).unwrap(); + assert_eq!(gen.pow(&[1 << 4]), Fr::one()); + let domain = Radix2DomainVar { + gen, + offset: FpVar::constant(Fr::rand(&mut rng)), + dim: 4, // 2^4 = 16 + }; + let mut coset_point = domain.offset.value().unwrap(); + let mut oracle_evals = Vec::new(); + for _ in 0..(1 << 4) { + oracle_evals.push(poly.evaluate(&coset_point)); + coset_point *= gen; + } + let cs = ConstraintSystem::new_ref(); + let evaluations_fp: Vec<_> = oracle_evals + .iter() + .map(|x| FpVar::new_input(ns!(cs, "evaluations"), || Ok(x)).unwrap()) + .collect(); + let evaluations_var = EvaluationsVar::from_vec_and_domain(evaluations_fp, domain, true); + + let interpolate_point = Fr::rand(&mut rng); + let interpolate_point_fp = + FpVar::new_input(ns!(cs, "interpolate point"), || Ok(interpolate_point)).unwrap(); + + let expected = poly.evaluate(&interpolate_point); + + let actual = evaluations_var + .interpolate_and_evaluate(&interpolate_point_fp) + .unwrap() + .value() + .unwrap(); + + assert_eq!(actual, expected); + assert!(cs.is_satisfied().unwrap()); + println!("number of constraints: {}", cs.num_constraints()) + } + + #[test] + fn test_interpolate_non_constant_offset() { + let mut rng = test_rng(); + let poly = DensePolynomial::rand(15, &mut rng); + let gen = Fr::get_root_of_unity(1 << 4).unwrap(); + assert_eq!(gen.pow(&[1 << 4]), Fr::one()); + let cs = ConstraintSystem::new_ref(); + let domain = Radix2DomainVar { + gen, + offset: FpVar::new_witness(ns!(cs, "offset"), || Ok(Fr::rand(&mut rng))).unwrap(), + dim: 4, // 2^4 = 16 + }; + let mut coset_point = domain.offset.value().unwrap(); + let mut oracle_evals = Vec::new(); + for _ in 0..(1 << 4) { + oracle_evals.push(poly.evaluate(&coset_point)); + coset_point *= gen; + } + + let evaluations_fp: Vec<_> = oracle_evals + .iter() + .map(|x| FpVar::new_input(ns!(cs, "evaluations"), || Ok(x)).unwrap()) + .collect(); + let evaluations_var = EvaluationsVar::from_vec_and_domain(evaluations_fp, domain, true); + + let interpolate_point = Fr::rand(&mut rng); + let interpolate_point_fp = + FpVar::new_input(ns!(cs, "interpolate point"), || Ok(interpolate_point)).unwrap(); + + let expected = poly.evaluate(&interpolate_point); + + let actual = evaluations_var + .interpolate_and_evaluate(&interpolate_point_fp) + .unwrap() + .value() + .unwrap(); + + assert_eq!(actual, expected); + assert!(cs.is_satisfied().unwrap()); + println!("number of constraints: {}", cs.num_constraints()) + } + + #[test] + fn test_division() { + let mut rng = test_rng(); + let gen = Fr::get_root_of_unity(1 << 4).unwrap(); + assert_eq!(gen.pow(&[1 << 4]), Fr::one()); + let domain = Radix2DomainVar { + gen, + offset: FpVar::constant(Fr::multiplicative_generator()), + dim: 4, // 2^4 = 16 + }; + + let cs = ConstraintSystem::new_ref(); + + let ev_a = EvaluationsVar::from_vec_and_domain( + (0..16) + .map(|_| FpVar::new_input(ns!(cs, "poly_a"), || Ok(Fr::rand(&mut rng))).unwrap()) + .collect(), + domain.clone(), + false, + ); + let ev_b = EvaluationsVar::from_vec_and_domain( + (0..16) + .map(|_| FpVar::new_input(ns!(cs, "poly_a"), || Ok(Fr::rand(&mut rng))).unwrap()) + .collect(), + domain.clone(), + false, + ); + + let a_div_b = (&ev_a) / (&ev_b); + assert!(cs.is_satisfied().unwrap()); + let b_div_a = (&ev_b) / (&ev_a); + + let one = &a_div_b * &b_div_a; + for ev in one.evals.iter() { + assert!(Fr::is_one(&ev.value().unwrap())) + } + + assert!(cs.is_satisfied().unwrap()); + } +} diff --git a/arkworks/r1cs-std/src/poly/mod.rs b/arkworks/r1cs-std/src/poly/mod.rs new file mode 100644 index 00000000..f17cb273 --- /dev/null +++ b/arkworks/r1cs-std/src/poly/mod.rs @@ -0,0 +1,4 @@ +pub mod domain; +pub mod evaluations; +/// Modules for working with polynomials in coefficient forms. +pub mod polynomial; diff --git a/arkworks/r1cs-std/src/poly/polynomial/mod.rs b/arkworks/r1cs-std/src/poly/polynomial/mod.rs new file mode 100644 index 00000000..f5a0037d --- /dev/null +++ b/arkworks/r1cs-std/src/poly/polynomial/mod.rs @@ -0,0 +1,2 @@ +/// Module defining data structures for univariate polynomials. +pub mod univariate; diff --git a/arkworks/r1cs-std/src/poly/polynomial/univariate/dense.rs b/arkworks/r1cs-std/src/poly/polynomial/univariate/dense.rs new file mode 100644 index 00000000..04e17fda --- /dev/null +++ b/arkworks/r1cs-std/src/poly/polynomial/univariate/dense.rs @@ -0,0 +1,78 @@ +use ark_ff::PrimeField; +use ark_relations::r1cs::SynthesisError; + +use crate::fields::fp::FpVar; +use crate::fields::FieldVar; +use ark_std::vec::Vec; + +/// Stores a polynomial in coefficient form, where coeffcient is represented by a list of `Fpvar`. +pub struct DensePolynomialVar { + /// The coefficient of `x^i` is stored at location `i` in `self.coeffs`. + pub coeffs: Vec>, +} + +impl DensePolynomialVar { + /// Constructs a new polynomial from a list of coefficients. + pub fn from_coefficients_slice(coeffs: &[FpVar]) -> Self { + Self::from_coefficients_vec(coeffs.to_vec()) + } + + /// Constructs a new polynomial from a list of coefficients. + pub fn from_coefficients_vec(coeffs: Vec>) -> Self { + Self { coeffs } + } + + /// Evaluates `self` at the given `point` and just gives you the gadget for the result. + /// Caution for use in holographic lincheck: The output has 2 entries in one matrix + pub fn evaluate(&self, point: &FpVar) -> Result, SynthesisError> { + let mut result: FpVar = FpVar::zero(); + // current power of point + let mut curr_pow_x: FpVar = FpVar::one(); + for i in 0..self.coeffs.len() { + let term = &curr_pow_x * &self.coeffs[i]; + result += &term; + curr_pow_x *= point; + } + + Ok(result) + } +} + +#[cfg(test)] +mod tests { + use crate::alloc::AllocVar; + use crate::fields::fp::FpVar; + use crate::poly::polynomial::univariate::dense::DensePolynomialVar; + use crate::R1CSVar; + use ark_poly::polynomial::univariate::DensePolynomial; + use ark_poly::{Polynomial, UVPolynomial}; + use ark_relations::r1cs::ConstraintSystem; + use ark_std::vec::Vec; + use ark_std::{test_rng, UniformRand}; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn test_evaluate() { + let mut rng = test_rng(); + for _ in 0..100 { + let cs = ConstraintSystem::new_ref(); + let poly: DensePolynomial = DensePolynomial::rand(10, &mut rng); + let poly_var = { + let coeff: Vec<_> = poly + .coeffs + .iter() + .map(|&x| FpVar::new_witness(ns!(cs, "coeff"), || Ok(x)).unwrap()) + .collect(); + DensePolynomialVar::from_coefficients_vec(coeff) + }; + let point = Fr::rand(&mut rng); + let point_var = FpVar::new_witness(ns!(cs, "point"), || Ok(point)).unwrap(); + + let expected = poly.evaluate(&point); + let actual = poly_var.evaluate(&point_var).unwrap(); + + assert_eq!(actual.value().unwrap(), expected); + assert!(cs.is_satisfied().unwrap()); + } + } +} diff --git a/arkworks/r1cs-std/src/poly/polynomial/univariate/mod.rs b/arkworks/r1cs-std/src/poly/polynomial/univariate/mod.rs new file mode 100644 index 00000000..746c1cd0 --- /dev/null +++ b/arkworks/r1cs-std/src/poly/polynomial/univariate/mod.rs @@ -0,0 +1,2 @@ +/// A dense univariate polynomial represented in coefficient form. +pub mod dense; diff --git a/arkworks/r1cs-std/src/select.rs b/arkworks/r1cs-std/src/select.rs new file mode 100644 index 00000000..bbc2c3c9 --- /dev/null +++ b/arkworks/r1cs-std/src/select.rs @@ -0,0 +1,117 @@ +use crate::prelude::*; +use ark_ff::Field; +use ark_relations::r1cs::SynthesisError; +use ark_std::vec::Vec; +/// Generates constraints for selecting between one of two values. +pub trait CondSelectGadget +where + Self: Sized, + Self: Clone, +{ + /// If `cond == &Boolean::TRUE`, then this returns `true_value`; else, + /// returns `false_value`. + /// + /// # Note + /// `Self::conditionally_select(cond, true_value, false_value)?` can be more + /// succinctly written as `cond.select(&true_value, &false_value)?`. + fn conditionally_select( + cond: &Boolean, + true_value: &Self, + false_value: &Self, + ) -> Result; + + /// Returns an element of `values` whose index in represented by `position`. + /// `position` is an array of boolean that represents an unsigned integer in big endian order. + /// + /// # Example + /// To get the 6th element of `values`, convert unsigned integer 6 (`0b110`) to `position = [True, True, False]`, + /// and call `conditionally_select_power_of_two_vector(position, values)`. + fn conditionally_select_power_of_two_vector( + position: &[Boolean], + values: &[Self], + ) -> Result { + let m = values.len(); + let n = position.len(); + + // Assert m is a power of 2, and n = log(m) + assert!(m.is_power_of_two()); + assert_eq!(1 << n, m); + + let mut cur_mux_values = values.to_vec(); + + // Traverse the evaluation tree from bottom to top in level order traversal. + // This is method 5.1 from https://github.com/mir-protocol/r1cs-workshop/blob/master/workshop.pdf + // TODO: Add method 5.2/5.3 + for i in 0..n { + // Size of current layer. + let cur_size = 1 << (n - i); + assert_eq!(cur_mux_values.len(), cur_size); + + let mut next_mux_values = Vec::new(); + for j in (0..cur_size).step_by(2) { + let cur = Self::conditionally_select( + &position[n - 1 - i], + // true case + &cur_mux_values[j + 1], + // false case + &cur_mux_values[j], + )?; + next_mux_values.push(cur); + } + cur_mux_values = next_mux_values; + } + + Ok(cur_mux_values[0].clone()) + } +} + +/// Performs a lookup in a 4-element table using two bits. +pub trait TwoBitLookupGadget +where + Self: Sized, +{ + /// The type of values being looked up. + type TableConstant; + + /// Interprets the slice `bits` as a two-bit integer `b = bits[0] + (bits[1] + /// << 1)`, and then outputs `constants[b]`. + /// + /// For example, if `bits == [0, 1]`, and `constants == [0, 1, 2, 3]`, this + /// method should output a variable corresponding to `2`. + /// + /// # Panics + /// + /// This method panics if `bits.len() != 2` or `constants.len() != 4`. + fn two_bit_lookup( + bits: &[Boolean], + constants: &[Self::TableConstant], + ) -> Result; +} + +/// Uses three bits to perform a lookup into a table, where the last bit +/// conditionally negates the looked-up value. +pub trait ThreeBitCondNegLookupGadget +where + Self: Sized, +{ + /// The type of values being looked up. + type TableConstant; + + /// Interprets the slice `bits` as a two-bit integer `b = bits[0] + (bits[1] + /// << 1)`, and then outputs `constants[b] * c`, where `c = if bits[2] { + /// -1 } else { 1 };`. + /// + /// That is, `bits[2]` conditionally negates the looked-up value. + /// + /// For example, if `bits == [1, 0, 1]`, and `constants == [0, 1, 2, 3]`, + /// this method should output a variable corresponding to `-1`. + /// + /// # Panics + /// + /// This method panics if `bits.len() != 3` or `constants.len() != 4`. + fn three_bit_cond_neg_lookup( + bits: &[Boolean], + b0b1: &Boolean, + constants: &[Self::TableConstant], + ) -> Result; +} diff --git a/arkworks/snark/.github/ISSUE_TEMPLATE/bug_report.md b/arkworks/snark/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e01ca941 --- /dev/null +++ b/arkworks/snark/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! + +--- + +∂ + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + + + diff --git a/arkworks/snark/.github/ISSUE_TEMPLATE/feature_request.md b/arkworks/snark/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..7d5ed5df --- /dev/null +++ b/arkworks/snark/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Feature Request +about: Create a proposal to request a feature + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/arkworks/snark/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/snark/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/snark/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/snark/.github/workflows/ci.yml b/arkworks/snark/.github/workflows/ci.yml new file mode 100644 index 00000000..d916ccc0 --- /dev/null +++ b/arkworks/snark/.github/workflows/ci.yml @@ -0,0 +1,112 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --workspace + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --workspace + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --workspace --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: "--workspace \ + --all-features" + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - name: Check + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --workspace --target aarch64-unknown-none + + - name: Build + uses: actions-rs/cargo@v1 + with: + command: build + args: --workspace --target aarch64-unknown-none diff --git a/arkworks/snark/.github/workflows/linkify_changelog.yml b/arkworks/snark/.github/workflows/linkify_changelog.yml new file mode 100644 index 00000000..8f3086e0 --- /dev/null +++ b/arkworks/snark/.github/workflows/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push diff --git a/arkworks/snark/.gitignore b/arkworks/snark/.gitignore new file mode 100644 index 00000000..9b5e101e --- /dev/null +++ b/arkworks/snark/.gitignore @@ -0,0 +1,11 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo diff --git a/arkworks/snark/.hooks/pre-commit b/arkworks/snark/.hooks/pre-commit new file mode 100755 index 00000000..8d4d19fe --- /dev/null +++ b/arkworks/snark/.hooks/pre-commit @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +rustfmt --version &>/dev/null +if [ $? != 0 ]; then + printf "[pre_commit] \033[0;31merror\033[0m: \"rustfmt\" not available. \n" + printf "[pre_commit] \033[0;31merror\033[0m: rustfmt can be installed via - \n" + printf "[pre_commit] $ rustup component add rustfmt \n" + exit 1 +fi + +problem_files=() + +# collect ill-formatted files +for file in $(git diff --name-only --cached); do + if [ ${file: -3} == ".rs" ]; then + rustfmt +stable --check $file &>/dev/null + if [ $? != 0 ]; then + problem_files+=($file) + fi + fi +done + +if [ ${#problem_files[@]} == 0 ]; then + # done + printf "[pre_commit] rustfmt \033[0;32mok\033[0m \n" +else + # reformat the files that need it and re-stage them. + printf "[pre_commit] the following files were rustfmt'd before commit: \n" + for file in ${problem_files[@]}; do + rustfmt +stable $file + git add $file + printf "\033[0;32m $file\033[0m \n" + done +fi + +exit 0 diff --git a/arkworks/snark/CHANGELOG.md b/arkworks/snark/CHANGELOG.md new file mode 100644 index 00000000..bd897f81 --- /dev/null +++ b/arkworks/snark/CHANGELOG.md @@ -0,0 +1,42 @@ +# CHANGELOG + +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +### Features +- [\#347](https://github.com/arkworks-rs/snark/pull/347) Add `into_inner` function for `ConstraintSystemRef`. + +### Improvements + +### Bug fixes + +## v0.2.0 + +### Breaking changes +- [\#334](https://github.com/arkworks-rs/snark/pull/334) Outlining linear combinations is now specified via the optimization goal interface. + +### Features + +### Improvements +- [\#325](https://github.com/arkworks-rs/snark/pull/325) Reduce memory consumption during inlining + +### Bug fixes +- [\#340](https://github.com/arkworks-rs/snark/pull/340) Compile with `panic='abort'` in release mode, for safety of the library across FFI boundaries. + +## v0.1.0 + +This tag corresponds to the old `zexe` codebase. +After this release, all of the code has been split up into +more modular repositories in the github organization `arkworks-rs`. +See #320 for guides in migration of old codebases. diff --git a/arkworks/snark/CONTRIBUTING.md b/arkworks/snark/CONTRIBUTING.md new file mode 100644 index 00000000..0fdd6f38 --- /dev/null +++ b/arkworks/snark/CONTRIBUTING.md @@ -0,0 +1,65 @@ +# Contributing + +Thank you for considering making contributions to `arkworks-rs/snark`! + +Contributing to this repo can be done in several forms, such as participating in discussion or proposing code changes. +To ensure a smooth workflow for all contributors, the following general procedure for contributing has been established: + +1) Either open or find an issue you'd like to help with +2) Participate in thoughtful discussion on that issue +3) If you would like to contribute: + * If the issue is a feature proposal, ensure that the proposal has been accepted + * Ensure that nobody else has already begun working on this issue. + If they have, please try to contact them to collaborate + * If nobody has been assigned for the issue and you would like to work on it, make a comment on the issue to inform the community of your intentions to begin work. (So we can avoid duplication of efforts) + * We suggest using standard Github best practices for contributing: fork the repo, branch from the HEAD of `master`, make some commits on your branch, and submit a PR from the branch to `master`. + More detail on this is below + * Be sure to include a relevant change log entry in the Pending section of CHANGELOG.md (see file for log format) + * If the change is breaking, we may add migration instructions. + +Note that for very small or clear problems (such as typos), or well isolated improvements, it is not required to an open issue to submit a PR. +But be aware that for more complex problems/features touching multiple parts of the codebase, if a PR is opened before an adequate design discussion has taken place in a github issue, that PR runs a larger likelihood of being rejected. + +Looking for a good place to start contributing? How about checking out some good first issues + +## Branch Structure + +`snark` has its default branch as `master`, which is where PRs are merged into. Releases will be periodically made, on no set schedule. +All other branches should be assumed to be miscellaneous feature development branches. + +All downstream users of the library should be using tagged versions of the library pulled from cargo. + +## How to work on a fork +Please skip this section if you're familiar with contributing to opensource github projects. + +First fork the repo from the github UI, and clone it locally. +Then in the repo, you want to add the repo you forked from as a new remote. You do this as: +```bash +git remote add upstream git@github.com:arkworks-rs/snark.git +``` + +Then the way you make code contributions is to first think of a branch name that describes your change. +Then do the following: +```bash +git checkout master +git pull upstream master +git checkout -b $NEW_BRANCH_NAME +``` +and then work as normal on that branch, and pull request to upstream master when you're done =) + +## Updating documentation + +All PRs should aim to leave the code more documented than it started with. +Please don't assume that its easy to infer what the code is doing, +as that is usually not the case for these complex protocols. +(Even when you understand the paper!) + +Its often very useful to describe what is the high level view of what a code block is doing, +and either refer to the relevant section of a paper or include a short proof/argument for why it makes sense before the actual logic. + +## Performance improvements + +All performance improvements should be accompanied with benchmarks improving, or otherwise have it be clear that things have improved. +For some areas of the codebase, performance roughly follows the number of field multiplications, but there are also many areas where +hard to predict low level system effects such as cache locality and superscalar operations become important for performance. +Thus performance can often become very non-intuitive / diverge from minimizing the number of arithmetic operations. \ No newline at end of file diff --git a/arkworks/snark/Cargo.toml b/arkworks/snark/Cargo.toml new file mode 100644 index 00000000..8a32f6b0 --- /dev/null +++ b/arkworks/snark/Cargo.toml @@ -0,0 +1,31 @@ +[workspace] + +members = [ + "relations", + "snark", +] + +[profile.release] +opt-level = 3 +lto = "thin" +incremental = true +panic = 'abort' + +[profile.bench] +opt-level = 3 +debug = false +rpath = false +lto = "thin" +incremental = true +debug-assertions = false + +[profile.dev] +opt-level = 0 +panic = 'abort' + +[profile.test] +opt-level = 3 +lto = "thin" +incremental = true +debug-assertions = true +debug = true diff --git a/arkworks/snark/LICENSE-APACHE b/arkworks/snark/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/snark/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/snark/LICENSE-MIT b/arkworks/snark/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/snark/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/snark/README.md b/arkworks/snark/README.md new file mode 100644 index 00000000..d1dd23c8 --- /dev/null +++ b/arkworks/snark/README.md @@ -0,0 +1,66 @@ +

SNARK and Relation Traits

+ +

+ + + + +

+ +The arkworks ecosystem consists of Rust libraries for designing and working with __zero knowledge succinct non-interactive arguments (zkSNARKs)__. This repository contains efficient libraries that describe interfaces for zkSNARKs, as well as interfaces for programming them. + +This library is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic proof-of-concept prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Directory structure + +This repository contains two Rust crates: + +* [`ark-snark`](snark): Provides generic traits for zkSNARKs +* [`ark-relations`](relations): Provides generic traits for NP relations used in programming zkSNARKs, such as R1CS + +## Overview + +This repository provides the core infrastucture for using the succinct argument systems that arkworks provides. Users who want to produce arguments about various problems of interest will first reduce those problems to an NP relation, various examples of which are defined in the `ark-relations` crate. Then a SNARK system defined over that relation is used to produce a succinct argument. The `ark-snark` crate defines a `SNARK` trait that encapsulates the general functionality, as well as specific traits for various types of SNARK (those with transparent and universal setup, for instance). Different repositories within the arkworks ecosystem implement this trait for various specific SNARK constructions, such as [Groth16](https://github.com/arkworks-rs/groth16), [GM17](https://github.com/arkworks-rs/gm17), and [Marlin](https://github.com/arkworks-rs/marlin). + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler. To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: +```bash +rustup install stable +``` + +After that, use `cargo`, the standard Rust build tool, to build the libraries: +```bash +git clone https://github.com/arkworks-rs/snark.git +cd algebra +cargo build --release +``` + +## Tests +This library comes with comprehensive unit and integration tests for each of the provided crates. Run the tests with: +```bash +cargo test --all +``` + +## License + +The crates in this repo are licensed under either of the following licenses, at your discretion. + + * Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +[zexe]: https://ia.cr/2018/962 + +## Acknowledgements + +This work was supported by: +a Google Faculty Award; +the National Science Foundation; +the UC Berkeley Center for Long-Term Cybersecurity; +and donations from the Ethereum Foundation, the Interchain Foundation, and Qtum. + +An earlier version of this library was developed as part of the paper *"[ZEXE: Enabling Decentralized Private Computation][zexe]"*. diff --git a/arkworks/snark/relations/Cargo.toml b/arkworks/snark/relations/Cargo.toml new file mode 100644 index 00000000..047ae848 --- /dev/null +++ b/arkworks/snark/relations/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "ark-relations" +version = "0.3.0" +authors = [ "arkworks constributors" ] +description = "A library for rank-one constraint systems" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/snark" +documentation = "https://docs.rs/ark-relations/" +keywords = ["zero-knowledge", "cryptography", "zkSNARK", "SNARK", "constraint-systems"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { path = "../../algebra/ff", version = "^0.3.0", default-features = false } +ark-std = { path = "../../std", version = "^0.3.0", default-features = false } +tracing = { version = "0.1", default-features = false } +tracing-subscriber = { version = "0.2", default-features = false, optional = true } + +[dev-dependencies] +ark-test-curves = { version = "^0.3.0", default-features = false, features = [ "bls12_381_scalar_field" ] } + +[features] +default = [] +std = [ "ark-std/std", "ark-ff/std", "tracing-subscriber", "tracing/std" ] diff --git a/arkworks/snark/relations/LICENSE-APACHE b/arkworks/snark/relations/LICENSE-APACHE new file mode 120000 index 00000000..965b606f --- /dev/null +++ b/arkworks/snark/relations/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/arkworks/snark/relations/LICENSE-MIT b/arkworks/snark/relations/LICENSE-MIT new file mode 120000 index 00000000..76219eb7 --- /dev/null +++ b/arkworks/snark/relations/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/arkworks/snark/relations/src/lib.rs b/arkworks/snark/relations/src/lib.rs new file mode 100644 index 00000000..cf249f77 --- /dev/null +++ b/arkworks/snark/relations/src/lib.rs @@ -0,0 +1,18 @@ +//! Core interface for working with various relations that are useful in +//! zkSNARKs. At the moment, we only implement APIs for working with Rank-1 +//! Constraint Systems (R1CS). + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn( + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms, + missing_docs +)] +#![deny(unsafe_code)] + +#[macro_use] +extern crate ark_std; + +pub mod r1cs; diff --git a/arkworks/snark/relations/src/r1cs/constraint_system.rs b/arkworks/snark/relations/src/r1cs/constraint_system.rs new file mode 100644 index 00000000..f3e7bd5e --- /dev/null +++ b/arkworks/snark/relations/src/r1cs/constraint_system.rs @@ -0,0 +1,1056 @@ +#[cfg(feature = "std")] +use crate::r1cs::ConstraintTrace; +use crate::r1cs::{LcIndex, LinearCombination, Matrix, SynthesisError, Variable}; +use ark_ff::Field; +use ark_std::{ + any::{Any, TypeId}, + boxed::Box, + cell::{Ref, RefCell, RefMut}, + collections::BTreeMap, + format, + rc::Rc, + string::String, + vec, + vec::Vec, +}; + +/// Computations are expressed in terms of rank-1 constraint systems (R1CS). +/// The `generate_constraints` method is called to generate constraints for +/// both CRS generation and for proving. +// TODO: Think: should we replace this with just a closure? +pub trait ConstraintSynthesizer { + /// Drives generation of new constraints inside `cs`. + fn generate_constraints(self, cs: ConstraintSystemRef) -> crate::r1cs::Result<()>; +} + +/// An Rank-One `ConstraintSystem`. Enforces constraints of the form +/// `⟨a_i, z⟩ ⋅ ⟨b_i, z⟩ = ⟨c_i, z⟩`, where `a_i`, `b_i`, and `c_i` are linear +/// combinations over variables, and `z` is the concrete assignment to these +/// variables. +#[derive(Debug, Clone)] +pub struct ConstraintSystem { + /// The mode in which the constraint system is operating. `self` can either + /// be in setup mode (i.e., `self.mode == SynthesisMode::Setup`) or in + /// proving mode (i.e., `self.mode == SynthesisMode::Prove`). If we are + /// in proving mode, then we have the additional option of whether or + /// not to construct the A, B, and C matrices of the constraint system + /// (see below). + pub mode: SynthesisMode, + /// The number of variables that are "public inputs" to the constraint + /// system. + pub num_instance_variables: usize, + /// The number of variables that are "private inputs" to the constraint + /// system. + pub num_witness_variables: usize, + /// The number of constraints in the constraint system. + pub num_constraints: usize, + /// The number of linear combinations + pub num_linear_combinations: usize, + + /// The parameter we aim to minimize in this constraint system (either the + /// number of constraints or their total weight). + pub optimization_goal: OptimizationGoal, + + /// Assignments to the public input variables. This is empty if `self.mode + /// == SynthesisMode::Setup`. + pub instance_assignment: Vec, + /// Assignments to the private input variables. This is empty if `self.mode + /// == SynthesisMode::Setup`. + pub witness_assignment: Vec, + + /// Map for gadgets to cache computation results. + pub cache_map: Rc>>>, + + lc_map: BTreeMap>, + + #[cfg(feature = "std")] + constraint_traces: Vec>, + + a_constraints: Vec, + b_constraints: Vec, + c_constraints: Vec, + + lc_assignment_cache: Rc>>, +} + +impl Default for ConstraintSystem { + fn default() -> Self { + Self::new() + } +} + +/// Defines the mode of operation of a `ConstraintSystem`. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +pub enum SynthesisMode { + /// Indicate to the `ConstraintSystem` that it should only generate + /// constraint matrices and not populate the variable assignments. + Setup, + /// Indicate to the `ConstraintSystem` that it populate the variable + /// assignments. If additionally `construct_matrices == true`, then generate + /// the matrices as in the `Setup` case. + Prove { + /// If `construct_matrices == true`, then generate + /// the matrices as in the `Setup` case. + construct_matrices: bool, + }, +} + +/// Defines the parameter to optimize for a `ConstraintSystem`. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +pub enum OptimizationGoal { + /// Make no attempt to optimize. + None, + /// Minimize the number of constraints. + Constraints, + /// Minimize the total weight of the constraints (the number of nonzero + /// entries across all constraints). + Weight, +} + +impl ConstraintSystem { + #[inline] + fn make_row(&self, l: &LinearCombination) -> Vec<(F, usize)> { + let num_input = self.num_instance_variables; + l.0.iter() + .filter_map(|(coeff, var)| { + if coeff.is_zero() { + None + } else { + Some(( + *coeff, + var.get_index_unchecked(num_input).expect("no symbolic LCs"), + )) + } + }) + .collect() + } + + /// Construct an empty `ConstraintSystem`. + pub fn new() -> Self { + Self { + num_instance_variables: 1, + num_witness_variables: 0, + num_constraints: 0, + num_linear_combinations: 0, + a_constraints: Vec::new(), + b_constraints: Vec::new(), + c_constraints: Vec::new(), + instance_assignment: vec![F::one()], + witness_assignment: Vec::new(), + cache_map: Rc::new(RefCell::new(BTreeMap::new())), + #[cfg(feature = "std")] + constraint_traces: Vec::new(), + + lc_map: BTreeMap::new(), + lc_assignment_cache: Rc::new(RefCell::new(BTreeMap::new())), + + mode: SynthesisMode::Prove { + construct_matrices: true, + }, + + optimization_goal: OptimizationGoal::Constraints, + } + } + + /// Create a new `ConstraintSystemRef`. + pub fn new_ref() -> ConstraintSystemRef { + ConstraintSystemRef::new(Self::new()) + } + + /// Set `self.mode` to `mode`. + pub fn set_mode(&mut self, mode: SynthesisMode) { + self.mode = mode; + } + + /// Check whether `self.mode == SynthesisMode::Setup`. + pub fn is_in_setup_mode(&self) -> bool { + self.mode == SynthesisMode::Setup + } + + /// Check whether this constraint system aims to optimize weight, + /// number of constraints, or neither. + pub fn optimization_goal(&self) -> OptimizationGoal { + self.optimization_goal + } + + /// Specify whether this constraint system should aim to optimize weight, + /// number of constraints, or neither. + pub fn set_optimization_goal(&mut self, goal: OptimizationGoal) { + // `set_optimization_goal` should only be executed before any constraint or value is created. + assert_eq!(self.num_instance_variables, 1); + assert_eq!(self.num_witness_variables, 0); + assert_eq!(self.num_constraints, 0); + assert_eq!(self.num_linear_combinations, 0); + + self.optimization_goal = goal; + } + + /// Check whether or not `self` will construct matrices. + pub fn should_construct_matrices(&self) -> bool { + match self.mode { + SynthesisMode::Setup => true, + SynthesisMode::Prove { construct_matrices } => construct_matrices, + } + } + + /// Return a variable representing the constant "zero" inside the constraint + /// system. + #[inline] + pub fn zero() -> Variable { + Variable::Zero + } + + /// Return a variable representing the constant "one" inside the constraint + /// system. + #[inline] + pub fn one() -> Variable { + Variable::One + } + + /// Obtain a variable representing a new public instance input. + #[inline] + pub fn new_input_variable(&mut self, f: Func) -> crate::r1cs::Result + where + Func: FnOnce() -> crate::r1cs::Result, + { + let index = self.num_instance_variables; + self.num_instance_variables += 1; + + if !self.is_in_setup_mode() { + self.instance_assignment.push(f()?); + } + Ok(Variable::Instance(index)) + } + + /// Obtain a variable representing a new private witness input. + #[inline] + pub fn new_witness_variable(&mut self, f: Func) -> crate::r1cs::Result + where + Func: FnOnce() -> crate::r1cs::Result, + { + let index = self.num_witness_variables; + self.num_witness_variables += 1; + + if !self.is_in_setup_mode() { + self.witness_assignment.push(f()?); + } + Ok(Variable::Witness(index)) + } + + /// Obtain a variable representing a linear combination. + #[inline] + pub fn new_lc(&mut self, lc: LinearCombination) -> crate::r1cs::Result { + let index = LcIndex(self.num_linear_combinations); + let var = Variable::SymbolicLc(index); + + self.lc_map.insert(index, lc); + + self.num_linear_combinations += 1; + Ok(var) + } + + /// Enforce a R1CS constraint with the name `name`. + #[inline] + pub fn enforce_constraint( + &mut self, + a: LinearCombination, + b: LinearCombination, + c: LinearCombination, + ) -> crate::r1cs::Result<()> { + if self.should_construct_matrices() { + let a_index = self.new_lc(a)?.get_lc_index().unwrap(); + let b_index = self.new_lc(b)?.get_lc_index().unwrap(); + let c_index = self.new_lc(c)?.get_lc_index().unwrap(); + self.a_constraints.push(a_index); + self.b_constraints.push(b_index); + self.c_constraints.push(c_index); + } + self.num_constraints += 1; + #[cfg(feature = "std")] + { + let trace = ConstraintTrace::capture(); + self.constraint_traces.push(trace); + } + Ok(()) + } + + /// Count the number of times each LC is used within other LCs in the + /// constraint system + fn lc_num_times_used(&self, count_sinks: bool) -> Vec { + let mut num_times_used = vec![0; self.lc_map.len()]; + + // Iterate over every lc in constraint system + for (index, lc) in self.lc_map.iter() { + num_times_used[index.0] += count_sinks as usize; + + // Increment the counter for each lc that this lc has a direct dependency on. + for &(_, var) in lc.iter() { + if var.is_lc() { + let lc_index = var.get_lc_index().expect("should be lc"); + num_times_used[lc_index.0] += 1; + } + } + } + num_times_used + } + + /// Transform the map of linear combinations. + /// Specifically, allow the creation of additional witness assignments. + /// + /// This method is used as a subroutine of `inline_all_lcs` and `outline_lcs`. + /// + /// The transformer function is given a references of this constraint system (&self), + /// number of times used, and a mutable reference of the linear combination to be transformed. + /// (&ConstraintSystem, usize, &mut LinearCombination) + /// + /// The transformer function returns the number of new witness variables needed + /// and a vector of new witness assignments (if not in the setup mode). + /// (usize, Option>) + pub fn transform_lc_map( + &mut self, + transformer: &mut dyn FnMut( + &ConstraintSystem, + usize, + &mut LinearCombination, + ) -> (usize, Option>), + ) { + // `transformed_lc_map` stores the transformed linear combinations. + let mut transformed_lc_map = BTreeMap::new(); + let mut num_times_used = self.lc_num_times_used(false); + + // This loop goes through all the LCs in the map, starting from + // the early ones. The transformer function is applied to the + // inlined LC, where new witness variables can be created. + for (&index, lc) in &self.lc_map { + let mut transformed_lc = LinearCombination::new(); + + // Inline the LC, unwrapping symbolic LCs that may constitute it, + // and updating them according to transformations in prior iterations. + for &(coeff, var) in lc.iter() { + if var.is_lc() { + let lc_index = var.get_lc_index().expect("should be lc"); + + // If `var` is a `SymbolicLc`, fetch the corresponding + // inlined LC, and substitute it in. + // + // We have the guarantee that `lc_index` must exist in + // `new_lc_map` since a LC can only depend on other + // LCs with lower indices, which we have transformed. + // + let lc = transformed_lc_map + .get(&lc_index) + .expect("should be inlined"); + transformed_lc.extend((lc * coeff).0.into_iter()); + + // Delete linear combinations that are no longer used. + // + // Deletion is safe for both outlining and inlining: + // * Inlining: the LC is substituted directly into all use sites, and so once it + // is fully inlined, it is redundant. + // + // * Outlining: the LC is associated with a new variable `w`, and a new + // constraint of the form `lc_data * 1 = w`, where `lc_data` is the actual + // data in the linear combination. Furthermore, we replace its entry in + // `new_lc_map` with `(1, w)`. Once `w` is fully inlined, then we can delete + // the entry from `new_lc_map` + // + num_times_used[lc_index.0] -= 1; + if num_times_used[lc_index.0] == 0 { + // This lc is not used any more, so remove it. + transformed_lc_map.remove(&lc_index); + } + } else { + // Otherwise, it's a concrete variable and so we + // substitute it in directly. + transformed_lc.push((coeff, var)); + } + } + transformed_lc.compactify(); + + // Call the transformer function. + let (num_new_witness_variables, new_witness_assignments) = + transformer(&self, num_times_used[index.0], &mut transformed_lc); + + // Insert the transformed LC. + transformed_lc_map.insert(index, transformed_lc); + + // Update the witness counter. + self.num_witness_variables += num_new_witness_variables; + + // Supply additional witness assignments if not in the + // setup mode and if new witness variables are created. + if !self.is_in_setup_mode() && num_new_witness_variables > 0 { + assert!(new_witness_assignments.is_some()); + if let Some(new_witness_assignments) = new_witness_assignments { + assert_eq!(new_witness_assignments.len(), num_new_witness_variables); + self.witness_assignment + .extend_from_slice(&new_witness_assignments); + } + } + } + // Replace the LC map. + self.lc_map = transformed_lc_map; + } + + /// Naively inlines symbolic linear combinations into the linear + /// combinations that use them. + /// + /// Useful for standard pairing-based SNARKs where addition gates are cheap. + /// For example, in the SNARKs such as [\[Groth16\]](https://eprint.iacr.org/2016/260) and + /// [\[Groth-Maller17\]](https://eprint.iacr.org/2017/540), addition gates + /// do not contribute to the size of the multi-scalar multiplication, which + /// is the dominating cost. + pub fn inline_all_lcs(&mut self) { + // Only inline when a matrix representing R1CS is needed. + if !self.should_construct_matrices() { + return; + } + + // A dummy closure is used, which means that + // - it does not modify the inlined LC. + // - it does not add new witness variables. + self.transform_lc_map(&mut |_, _, _| (0, None)); + } + + /// If a `SymbolicLc` is used in more than one location and has sufficient + /// length, this method makes a new variable for that `SymbolicLc`, adds + /// a constraint ensuring the equality of the variable and the linear + /// combination, and then uses that variable in every location the + /// `SymbolicLc` is used. + /// + /// Useful for SNARKs like [\[Marlin\]](https://eprint.iacr.org/2019/1047) or + /// [\[Fractal\]](https://eprint.iacr.org/2019/1076), where addition gates + /// are not cheap. + fn outline_lcs(&mut self) { + // Only inline when a matrix representing R1CS is needed. + if !self.should_construct_matrices() { + return; + } + + // Store information about new witness variables created + // for outlining. New constraints will be added after the + // transformation of the LC map. + let mut new_witness_linear_combinations = Vec::new(); + let mut new_witness_indices = Vec::new(); + + // It goes through all the LCs in the map, starting from + // the early ones, and decides whether or not to dedicate a witness + // variable for this LC. + // + // If true, the LC is replaced with 1 * this witness variable. + // Otherwise, the LC is inlined. + // + // Each iteration first updates the LC according to outlinings in prior + // iterations, and then sees if it should be outlined, and if so adds + // the outlining to the map. + // + self.transform_lc_map(&mut |cs, num_times_used, inlined_lc| { + let mut should_dedicate_a_witness_variable = false; + let mut new_witness_index = None; + let mut new_witness_assignment = Vec::new(); + + // Check if it is worthwhile to dedicate a witness variable. + let this_used_times = num_times_used + 1; + let this_len = inlined_lc.len(); + + // Cost with no outlining = `lc_len * number of usages` + // Cost with outlining is one constraint for `(lc_len) * 1 = {new variable}` and + // using that single new variable in each of the prior usages. + // This has total cost `number_of_usages + lc_len + 2` + if this_used_times * this_len > this_used_times + 2 + this_len { + should_dedicate_a_witness_variable = true; + } + + // If it is worthwhile to dedicate a witness variable, + if should_dedicate_a_witness_variable { + // Add a new witness (the value of the linear combination). + // This part follows the same logic of `new_witness_variable`. + let witness_index = cs.num_witness_variables; + new_witness_index = Some(witness_index); + + // Compute the witness assignment. + if !cs.is_in_setup_mode() { + let mut acc = F::zero(); + for (coeff, var) in inlined_lc.iter() { + acc += *coeff * &cs.assigned_value(*var).unwrap(); + } + new_witness_assignment.push(acc); + } + + // Add a new constraint for this new witness. + new_witness_linear_combinations.push(inlined_lc.clone()); + new_witness_indices.push(witness_index); + + // Replace the linear combination with (1 * this new witness). + *inlined_lc = LinearCombination::from(Variable::Witness(witness_index)); + } + // Otherwise, the LC remains unchanged. + + // Return information about new witness variables. + if new_witness_index.is_some() { + (1, Some(new_witness_assignment)) + } else { + (0, None) + } + }); + + // Add the constraints for the newly added witness variables. + for (new_witness_linear_combination, new_witness_variable) in + new_witness_linear_combinations + .iter() + .zip(new_witness_indices.iter()) + { + // Add a new constraint + self.enforce_constraint( + new_witness_linear_combination.clone(), + LinearCombination::from(Self::one()), + LinearCombination::from(Variable::Witness(*new_witness_variable)), + ) + .unwrap(); + } + } + + /// Finalize the constraint system (either by outlining or inlining, + /// if an optimization goal is set). + pub fn finalize(&mut self) { + match self.optimization_goal { + OptimizationGoal::None => self.inline_all_lcs(), + OptimizationGoal::Constraints => self.inline_all_lcs(), + OptimizationGoal::Weight => self.outline_lcs(), + }; + } + + /// This step must be called after constraint generation has completed, and + /// after all symbolic LCs have been inlined into the places that they + /// are used. + pub fn to_matrices(&self) -> Option> { + if let SynthesisMode::Prove { + construct_matrices: false, + } = self.mode + { + None + } else { + let a: Vec<_> = self + .a_constraints + .iter() + .map(|index| self.make_row(self.lc_map.get(index).unwrap())) + .collect(); + let b: Vec<_> = self + .b_constraints + .iter() + .map(|index| self.make_row(self.lc_map.get(index).unwrap())) + .collect(); + let c: Vec<_> = self + .c_constraints + .iter() + .map(|index| self.make_row(self.lc_map.get(index).unwrap())) + .collect(); + + let a_num_non_zero: usize = a.iter().map(|lc| lc.len()).sum(); + let b_num_non_zero: usize = b.iter().map(|lc| lc.len()).sum(); + let c_num_non_zero: usize = c.iter().map(|lc| lc.len()).sum(); + let matrices = ConstraintMatrices { + num_instance_variables: self.num_instance_variables, + num_witness_variables: self.num_witness_variables, + num_constraints: self.num_constraints, + + a_num_non_zero, + b_num_non_zero, + c_num_non_zero, + + a, + b, + c, + }; + Some(matrices) + } + } + + fn eval_lc(&self, lc: LcIndex) -> Option { + let lc = self.lc_map.get(&lc)?; + let mut acc = F::zero(); + for (coeff, var) in lc.iter() { + acc += *coeff * self.assigned_value(*var)?; + } + Some(acc) + } + + /// If `self` is satisfied, outputs `Ok(true)`. + /// If `self` is unsatisfied, outputs `Ok(false)`. + /// If `self.is_in_setup_mode()`, outputs `Err(())`. + pub fn is_satisfied(&self) -> crate::r1cs::Result { + self.which_is_unsatisfied().map(|s| s.is_none()) + } + + /// If `self` is satisfied, outputs `Ok(None)`. + /// If `self` is unsatisfied, outputs `Some(i)`, where `i` is the index of + /// the first unsatisfied constraint. If `self.is_in_setup_mode()`, outputs + /// `Err(())`. + pub fn which_is_unsatisfied(&self) -> crate::r1cs::Result> { + if self.is_in_setup_mode() { + Err(SynthesisError::AssignmentMissing) + } else { + for i in 0..self.num_constraints { + let a = self + .eval_lc(self.a_constraints[i]) + .ok_or(SynthesisError::AssignmentMissing)?; + let b = self + .eval_lc(self.b_constraints[i]) + .ok_or(SynthesisError::AssignmentMissing)?; + let c = self + .eval_lc(self.c_constraints[i]) + .ok_or(SynthesisError::AssignmentMissing)?; + if a * b != c { + let trace; + #[cfg(feature = "std")] + { + trace = self.constraint_traces[i].as_ref().map_or_else( + || { + eprintln!("Constraint trace requires enabling `ConstraintLayer`"); + format!("{}", i) + }, + |t| format!("{}", t), + ); + } + #[cfg(not(feature = "std"))] + { + trace = format!("{}", i); + } + return Ok(Some(trace)); + } + } + Ok(None) + } + } + + /// Obtain the assignment corresponding to the `Variable` `v`. + pub fn assigned_value(&self, v: Variable) -> Option { + match v { + Variable::One => Some(F::one()), + Variable::Zero => Some(F::zero()), + Variable::Witness(idx) => self.witness_assignment.get(idx).copied(), + Variable::Instance(idx) => self.instance_assignment.get(idx).copied(), + Variable::SymbolicLc(idx) => { + let value = self.lc_assignment_cache.borrow().get(&idx).copied(); + if value.is_some() { + value + } else { + let value = self.eval_lc(idx)?; + self.lc_assignment_cache.borrow_mut().insert(idx, value); + Some(value) + } + } + } + } +} +/// The A, B and C matrices of a Rank-One `ConstraintSystem`. +/// Also contains metadata on the structure of the constraint system +/// and the matrices. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ConstraintMatrices { + /// The number of variables that are "public instances" to the constraint + /// system. + pub num_instance_variables: usize, + /// The number of variables that are "private witnesses" to the constraint + /// system. + pub num_witness_variables: usize, + /// The number of constraints in the constraint system. + pub num_constraints: usize, + /// The number of non_zero entries in the A matrix. + pub a_num_non_zero: usize, + /// The number of non_zero entries in the B matrix. + pub b_num_non_zero: usize, + /// The number of non_zero entries in the C matrix. + pub c_num_non_zero: usize, + + /// The A constraint matrix. This is empty when + /// `self.mode == SynthesisMode::Prove { construct_matrices = false }`. + pub a: Matrix, + /// The B constraint matrix. This is empty when + /// `self.mode == SynthesisMode::Prove { construct_matrices = false }`. + pub b: Matrix, + /// The C constraint matrix. This is empty when + /// `self.mode == SynthesisMode::Prove { construct_matrices = false }`. + pub c: Matrix, +} + +/// A shared reference to a constraint system that can be stored in high level +/// variables. +#[derive(Debug, Clone)] +pub enum ConstraintSystemRef { + /// Represents the case where we *don't* need to allocate variables or + /// enforce constraints. Encountered when operating over constant + /// values. + None, + /// Represents the case where we *do* allocate variables or enforce + /// constraints. + CS(Rc>>), +} + +impl PartialEq for ConstraintSystemRef { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::None, Self::None) => true, + (..) => false, + } + } +} + +impl Eq for ConstraintSystemRef {} + +/// A namespaced `ConstraintSystemRef`. +#[derive(Debug, Clone)] +pub struct Namespace { + inner: ConstraintSystemRef, + id: Option, +} + +impl From> for Namespace { + fn from(other: ConstraintSystemRef) -> Self { + Self { + inner: other, + id: None, + } + } +} + +impl Namespace { + /// Construct a new `Namespace`. + pub fn new(inner: ConstraintSystemRef, id: Option) -> Self { + Self { inner, id } + } + + /// Obtain the inner `ConstraintSystemRef`. + pub fn cs(&self) -> ConstraintSystemRef { + self.inner.clone() + } + + /// Manually leave the namespace. + pub fn leave_namespace(self) { + drop(self) + } +} + +impl Drop for Namespace { + fn drop(&mut self) { + if let Some(id) = self.id.as_ref() { + tracing::dispatcher::get_default(|dispatch| dispatch.exit(id)) + } + drop(&mut self.inner) + } +} + +impl ConstraintSystemRef { + /// Returns `self` if `!self.is_none()`, otherwise returns `other`. + pub fn or(self, other: Self) -> Self { + match self { + ConstraintSystemRef::None => other, + _ => self, + } + } + + /// Returns `true` is `self == ConstraintSystemRef::None`. + pub fn is_none(&self) -> bool { + matches!(self, ConstraintSystemRef::None) + } + + /// Construct a `ConstraintSystemRef` from a `ConstraintSystem`. + #[inline] + pub fn new(inner: ConstraintSystem) -> Self { + Self::CS(Rc::new(RefCell::new(inner))) + } + + fn inner(&self) -> Option<&Rc>>> { + match self { + Self::CS(a) => Some(a), + Self::None => None, + } + } + + /// Consumes self to return the inner `ConstraintSystem`. Returns + /// `None` if `Self::CS` is `None` or if any other references to + /// `Self::CS` exist. + pub fn into_inner(self) -> Option> { + match self { + Self::CS(a) => Rc::try_unwrap(a).ok().map(|s| s.into_inner()), + Self::None => None, + } + } + + /// Obtain an immutable reference to the underlying `ConstraintSystem`. + /// + /// # Panics + /// This method panics if `self` is already mutably borrowed. + #[inline] + pub fn borrow(&self) -> Option>> { + self.inner().map(|cs| cs.borrow()) + } + + /// Obtain a mutable reference to the underlying `ConstraintSystem`. + /// + /// # Panics + /// This method panics if `self` is already mutably borrowed. + #[inline] + pub fn borrow_mut(&self) -> Option>> { + self.inner().map(|cs| cs.borrow_mut()) + } + + /// Set `self.mode` to `mode`. + pub fn set_mode(&self, mode: SynthesisMode) { + self.inner().map_or((), |cs| cs.borrow_mut().set_mode(mode)) + } + + /// Check whether `self.mode == SynthesisMode::Setup`. + #[inline] + pub fn is_in_setup_mode(&self) -> bool { + self.inner() + .map_or(false, |cs| cs.borrow().is_in_setup_mode()) + } + + /// Returns the number of constraints. + #[inline] + pub fn num_constraints(&self) -> usize { + self.inner().map_or(0, |cs| cs.borrow().num_constraints) + } + + /// Returns the number of instance variables. + #[inline] + pub fn num_instance_variables(&self) -> usize { + self.inner() + .map_or(0, |cs| cs.borrow().num_instance_variables) + } + + /// Returns the number of witness variables. + #[inline] + pub fn num_witness_variables(&self) -> usize { + self.inner() + .map_or(0, |cs| cs.borrow().num_witness_variables) + } + + /// Check whether this constraint system aims to optimize weight, + /// number of constraints, or neither. + #[inline] + pub fn optimization_goal(&self) -> OptimizationGoal { + self.inner().map_or(OptimizationGoal::Constraints, |cs| { + cs.borrow().optimization_goal() + }) + } + + /// Specify whether this constraint system should aim to optimize weight, + /// number of constraints, or neither. + #[inline] + pub fn set_optimization_goal(&self, goal: OptimizationGoal) { + self.inner() + .map_or((), |cs| cs.borrow_mut().set_optimization_goal(goal)) + } + + /// Check whether or not `self` will construct matrices. + #[inline] + pub fn should_construct_matrices(&self) -> bool { + self.inner() + .map_or(false, |cs| cs.borrow().should_construct_matrices()) + } + + /// Obtain a variable representing a new public instance input. + #[inline] + pub fn new_input_variable(&self, f: Func) -> crate::r1cs::Result + where + Func: FnOnce() -> crate::r1cs::Result, + { + self.inner() + .ok_or(SynthesisError::MissingCS) + .and_then(|cs| { + if !self.is_in_setup_mode() { + // This is needed to avoid double-borrows, because `f` + // might itself mutably borrow `cs` (eg: `f = || g.value()`). + let value = f(); + cs.borrow_mut().new_input_variable(|| value) + } else { + cs.borrow_mut().new_input_variable(f) + } + }) + } + + /// Obtain a variable representing a new private witness input. + #[inline] + pub fn new_witness_variable(&self, f: Func) -> crate::r1cs::Result + where + Func: FnOnce() -> crate::r1cs::Result, + { + self.inner() + .ok_or(SynthesisError::MissingCS) + .and_then(|cs| { + if !self.is_in_setup_mode() { + // This is needed to avoid double-borrows, because `f` + // might itself mutably borrow `cs` (eg: `f = || g.value()`). + let value = f(); + cs.borrow_mut().new_witness_variable(|| value) + } else { + cs.borrow_mut().new_witness_variable(f) + } + }) + } + + /// Obtain a variable representing a linear combination. + #[inline] + pub fn new_lc(&self, lc: LinearCombination) -> crate::r1cs::Result { + self.inner() + .ok_or(SynthesisError::MissingCS) + .and_then(|cs| cs.borrow_mut().new_lc(lc)) + } + + /// Enforce a R1CS constraint with the name `name`. + #[inline] + pub fn enforce_constraint( + &self, + a: LinearCombination, + b: LinearCombination, + c: LinearCombination, + ) -> crate::r1cs::Result<()> { + self.inner() + .ok_or(SynthesisError::MissingCS) + .and_then(|cs| cs.borrow_mut().enforce_constraint(a, b, c)) + } + + /// Naively inlines symbolic linear combinations into the linear + /// combinations that use them. + /// + /// Useful for standard pairing-based SNARKs where addition gates are cheap. + /// For example, in the SNARKs such as [\[Groth16\]](https://eprint.iacr.org/2016/260) and + /// [\[Groth-Maller17\]](https://eprint.iacr.org/2017/540), addition gates + /// do not contribute to the size of the multi-scalar multiplication, which + /// is the dominating cost. + pub fn inline_all_lcs(&self) { + if let Some(cs) = self.inner() { + cs.borrow_mut().inline_all_lcs() + } + } + + /// Finalize the constraint system (either by outlining or inlining, + /// if an optimization goal is set). + pub fn finalize(&self) { + if let Some(cs) = self.inner() { + cs.borrow_mut().finalize() + } + } + + /// This step must be called after constraint generation has completed, and + /// after all symbolic LCs have been inlined into the places that they + /// are used. + #[inline] + pub fn to_matrices(&self) -> Option> { + self.inner().and_then(|cs| cs.borrow().to_matrices()) + } + + /// If `self` is satisfied, outputs `Ok(true)`. + /// If `self` is unsatisfied, outputs `Ok(false)`. + /// If `self.is_in_setup_mode()` or if `self == None`, outputs `Err(())`. + pub fn is_satisfied(&self) -> crate::r1cs::Result { + self.inner() + .map_or(Err(SynthesisError::AssignmentMissing), |cs| { + cs.borrow().is_satisfied() + }) + } + + /// If `self` is satisfied, outputs `Ok(None)`. + /// If `self` is unsatisfied, outputs `Some(i)`, where `i` is the index of + /// the first unsatisfied constraint. + /// If `self.is_in_setup_mode()` or `self == None`, outputs `Err(())`. + pub fn which_is_unsatisfied(&self) -> crate::r1cs::Result> { + self.inner() + .map_or(Err(SynthesisError::AssignmentMissing), |cs| { + cs.borrow().which_is_unsatisfied() + }) + } + + /// Obtain the assignment corresponding to the `Variable` `v`. + pub fn assigned_value(&self, v: Variable) -> Option { + self.inner().and_then(|cs| cs.borrow().assigned_value(v)) + } + + /// Get trace information about all constraints in the system + pub fn constraint_names(&self) -> Option> { + #[cfg(feature = "std")] + { + self.inner().and_then(|cs| { + cs.borrow() + .constraint_traces + .iter() + .map(|trace| { + let mut constraint_path = String::new(); + let mut prev_module_path = ""; + let mut prefixes = ark_std::collections::BTreeSet::new(); + for step in trace.as_ref()?.path() { + let module_path = if prev_module_path == step.module_path { + prefixes.insert(step.module_path.to_string()); + String::new() + } else { + let mut parts = step + .module_path + .split("::") + .filter(|&part| part != "r1cs_std" && part != "constraints"); + let mut path_so_far = String::new(); + for part in parts.by_ref() { + if path_so_far.is_empty() { + path_so_far += part; + } else { + path_so_far += &["::", part].join(""); + } + if prefixes.contains(&path_so_far) { + continue; + } else { + prefixes.insert(path_so_far.clone()); + break; + } + } + parts.collect::>().join("::") + "::" + }; + prev_module_path = step.module_path; + constraint_path += &["/", &module_path, step.name].join(""); + } + Some(constraint_path) + }) + .collect::>>() + }) + } + #[cfg(not(feature = "std"))] + { + None + } + } +} + +#[cfg(test)] +mod tests { + use crate::r1cs::*; + use ark_ff::One; + use ark_test_curves::bls12_381::Fr; + + #[test] + fn matrix_generation() -> crate::r1cs::Result<()> { + let cs = ConstraintSystem::::new_ref(); + let two = Fr::one() + Fr::one(); + let a = cs.new_input_variable(|| Ok(Fr::one()))?; + let b = cs.new_witness_variable(|| Ok(Fr::one()))?; + let c = cs.new_witness_variable(|| Ok(two))?; + cs.enforce_constraint(lc!() + a, lc!() + (two, b), lc!() + c)?; + let d = cs.new_lc(lc!() + a + b)?; + cs.enforce_constraint(lc!() + a, lc!() + d, lc!() + d)?; + let e = cs.new_lc(lc!() + d + d)?; + cs.enforce_constraint(lc!() + Variable::One, lc!() + e, lc!() + e)?; + cs.inline_all_lcs(); + let matrices = cs.to_matrices().unwrap(); + assert_eq!(matrices.a[0], vec![(Fr::one(), 1)]); + assert_eq!(matrices.b[0], vec![(two, 2)]); + assert_eq!(matrices.c[0], vec![(Fr::one(), 3)]); + + assert_eq!(matrices.a[1], vec![(Fr::one(), 1)]); + assert_eq!(matrices.b[1], vec![(Fr::one(), 1), (Fr::one(), 2)]); + assert_eq!(matrices.c[1], vec![(Fr::one(), 1), (Fr::one(), 2)]); + + assert_eq!(matrices.a[2], vec![(Fr::one(), 0)]); + assert_eq!(matrices.b[2], vec![(two, 1), (two, 2)]); + assert_eq!(matrices.c[2], vec![(two, 1), (two, 2)]); + Ok(()) + } +} diff --git a/arkworks/snark/relations/src/r1cs/error.rs b/arkworks/snark/relations/src/r1cs/error.rs new file mode 100644 index 00000000..162b2b73 --- /dev/null +++ b/arkworks/snark/relations/src/r1cs/error.rs @@ -0,0 +1,47 @@ +use core::fmt; + +/// This is an error that could occur during circuit synthesis contexts, +/// such as CRS generation, proving or verification. +#[derive(PartialEq, Eq, Clone, Copy, Debug)] +pub enum SynthesisError { + /// During synthesis, we tried to allocate a variable when + /// `ConstraintSystemRef` was `None`. + MissingCS, + /// During synthesis, we lacked knowledge of a variable assignment. + AssignmentMissing, + /// During synthesis, we divided by zero. + DivisionByZero, + /// During synthesis, we constructed an unsatisfiable constraint system. + Unsatisfiable, + /// During synthesis, our polynomials ended up being too high of degree + PolynomialDegreeTooLarge, + /// During proof generation, we encountered an identity in the CRS + UnexpectedIdentity, + /// During verification, our verifying key was malformed. + MalformedVerifyingKey, + /// During CRS generation, we observed an unconstrained auxiliary variable + UnconstrainedVariable, +} + +impl ark_std::error::Error for SynthesisError {} + +impl fmt::Display for SynthesisError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match self { + SynthesisError::MissingCS => write!(f, "the constraint system was `None`"), + SynthesisError::AssignmentMissing => { + write!(f, "an assignment for a variable could not be computed") + } + SynthesisError::DivisionByZero => write!(f, "division by zero"), + SynthesisError::Unsatisfiable => write!(f, "unsatisfiable constraint system"), + SynthesisError::PolynomialDegreeTooLarge => write!(f, "polynomial degree is too large"), + SynthesisError::UnexpectedIdentity => { + write!(f, "encountered an identity element in the CRS") + } + SynthesisError::MalformedVerifyingKey => write!(f, "malformed verifying key"), + SynthesisError::UnconstrainedVariable => { + write!(f, "auxiliary variable was unconstrained") + } + } + } +} diff --git a/arkworks/snark/relations/src/r1cs/impl_lc.rs b/arkworks/snark/relations/src/r1cs/impl_lc.rs new file mode 100644 index 00000000..1af38964 --- /dev/null +++ b/arkworks/snark/relations/src/r1cs/impl_lc.rs @@ -0,0 +1,513 @@ +#![allow(clippy::suspicious_arithmetic_impl)] + +use crate::r1cs::{LinearCombination, Variable}; +use ark_ff::Field; +use ark_std::{ + ops::{Add, AddAssign, Deref, DerefMut, Mul, MulAssign, Neg, Sub}, + vec, + vec::Vec, +}; + +/// Generate a `LinearCombination` from arithmetic expressions involving +/// `Variable`s. +#[macro_export] +macro_rules! lc { + () => { + $crate::r1cs::LinearCombination::zero() + }; +} + +impl LinearCombination { + /// Create a new empty linear combination. + pub fn new() -> Self { + Default::default() + } + + /// Create a new empty linear combination. + pub fn zero() -> Self { + Self::new() + } + + /// Deduplicate entries in `self`. + pub fn compactify(&mut self) { + self.0.sort_by_key(|e| e.1); + let mut current_var = None; + let mut current_var_first_index = 0; + for i in 0..self.0.len() { + let (f, v) = self.0[i]; + if Some(v) == current_var { + self.0[current_var_first_index].0 += &f; + } else { + current_var = Some(v); + current_var_first_index = i; + } + } + self.0.dedup_by_key(|e| e.1); + } +} + +impl<'a, F: Field> Deref for LinearCombination { + type Target = Vec<(F, Variable)>; + + #[inline] + fn deref(&self) -> &Vec<(F, Variable)> { + &self.0 + } +} + +impl DerefMut for LinearCombination { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From<(F, Variable)> for LinearCombination { + #[inline] + fn from(input: (F, Variable)) -> Self { + LinearCombination(vec![input]) + } +} + +impl From for LinearCombination { + #[inline] + fn from(var: Variable) -> Self { + LinearCombination(vec![(F::one(), var)]) + } +} + +impl LinearCombination { + /// Negate the coefficients of all variables in `self`. + #[inline] + pub fn negate_in_place(&mut self) { + self.0.iter_mut().for_each(|(coeff, _)| *coeff = -(*coeff)); + } + + /// Get the location of a variable in `self`. + #[inline] + pub fn get_var_loc(&self, search_var: &Variable) -> Result { + if self.0.len() < 6 { + let mut found_index = 0; + for (i, (_, var)) in self.iter().enumerate() { + if var >= search_var { + found_index = i; + break; + } else { + found_index += 1; + } + } + Err(found_index) + } else { + self.0 + .binary_search_by_key(search_var, |&(_, cur_var)| cur_var) + } + } +} + +impl Add<(F, Variable)> for LinearCombination { + type Output = Self; + + #[inline] + fn add(mut self, coeff_var: (F, Variable)) -> Self { + self += coeff_var; + self + } +} + +impl AddAssign<(F, Variable)> for LinearCombination { + #[inline] + fn add_assign(&mut self, (coeff, var): (F, Variable)) { + match self.get_var_loc(&var) { + Ok(found) => self.0[found].0 += &coeff, + Err(not_found) => self.0.insert(not_found, (coeff, var)), + } + } +} + +impl Sub<(F, Variable)> for LinearCombination { + type Output = Self; + + #[inline] + fn sub(self, (coeff, var): (F, Variable)) -> Self { + self + (-coeff, var) + } +} + +impl Neg for LinearCombination { + type Output = Self; + + #[inline] + fn neg(mut self) -> Self { + self.negate_in_place(); + self + } +} + +impl Mul for LinearCombination { + type Output = Self; + + #[inline] + fn mul(mut self, scalar: F) -> Self { + self *= scalar; + self + } +} + +impl<'a, F: Field> Mul for &'a LinearCombination { + type Output = LinearCombination; + + #[inline] + fn mul(self, scalar: F) -> LinearCombination { + let mut cur = self.clone(); + cur *= scalar; + cur + } +} + +impl MulAssign for LinearCombination { + #[inline] + fn mul_assign(&mut self, scalar: F) { + self.0.iter_mut().for_each(|(coeff, _)| *coeff *= &scalar); + } +} + +impl Add for LinearCombination { + type Output = Self; + + #[inline] + fn add(self, other: Variable) -> LinearCombination { + self + (F::one(), other) + } +} + +impl<'a, F: Field> Add<&'a Variable> for LinearCombination { + type Output = Self; + + #[inline] + fn add(self, other: &'a Variable) -> LinearCombination { + self + *other + } +} + +impl<'a, F: Field> Sub<&'a Variable> for LinearCombination { + type Output = Self; + + #[inline] + fn sub(self, other: &'a Variable) -> LinearCombination { + self - *other + } +} + +impl Sub for LinearCombination { + type Output = LinearCombination; + + #[inline] + fn sub(self, other: Variable) -> LinearCombination { + self - (F::one(), other) + } +} + +fn op_impl( + cur: &LinearCombination, + other: &LinearCombination, + push_fn: F1, + combine_fn: F2, +) -> LinearCombination +where + F1: Fn(F) -> F, + F2: Fn(F, F) -> F, +{ + let mut new_vec = Vec::new(); + let mut i = 0; + let mut j = 0; + while i < cur.len() && j < other.len() { + let self_cur = &cur[i]; + let other_cur = &other[j]; + use core::cmp::Ordering; + match self_cur.1.cmp(&other_cur.1) { + Ordering::Greater => { + new_vec.push((push_fn(other[j].0), other[j].1)); + j += 1; + } + Ordering::Less => { + new_vec.push(*self_cur); + i += 1; + } + Ordering::Equal => { + new_vec.push((combine_fn(self_cur.0, other_cur.0), self_cur.1)); + i += 1; + j += 1; + } + }; + } + new_vec.extend_from_slice(&cur[i..]); + while j < other.0.len() { + new_vec.push((push_fn(other[j].0), other[j].1)); + j += 1; + } + LinearCombination(new_vec) +} + +impl Add<&LinearCombination> for &LinearCombination { + type Output = LinearCombination; + + fn add(self, other: &LinearCombination) -> LinearCombination { + if other.0.is_empty() { + return self.clone(); + } else if self.0.is_empty() { + return other.clone(); + } + op_impl( + self, + other, + |coeff| coeff, + |cur_coeff, other_coeff| cur_coeff + other_coeff, + ) + } +} + +impl Add> for &LinearCombination { + type Output = LinearCombination; + + fn add(self, other: LinearCombination) -> LinearCombination { + if self.0.is_empty() { + return other; + } else if other.0.is_empty() { + return self.clone(); + } + op_impl( + self, + &other, + |coeff| coeff, + |cur_coeff, other_coeff| cur_coeff + other_coeff, + ) + } +} + +impl<'a, F: Field> Add<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn add(self, other: &'a LinearCombination) -> LinearCombination { + if other.0.is_empty() { + return self; + } else if self.0.is_empty() { + return other.clone(); + } + op_impl( + &self, + other, + |coeff| coeff, + |cur_coeff, other_coeff| cur_coeff + other_coeff, + ) + } +} + +impl Add> for LinearCombination { + type Output = Self; + + fn add(self, other: Self) -> Self { + if other.0.is_empty() { + return self; + } else if self.0.is_empty() { + return other; + } + op_impl( + &self, + &other, + |coeff| coeff, + |cur_coeff, other_coeff| cur_coeff + other_coeff, + ) + } +} + +impl Sub<&LinearCombination> for &LinearCombination { + type Output = LinearCombination; + + fn sub(self, other: &LinearCombination) -> LinearCombination { + if other.0.is_empty() { + let cur = self.clone(); + return cur; + } else if self.0.is_empty() { + let mut other = other.clone(); + other.negate_in_place(); + return other; + } + + op_impl( + self, + other, + |coeff| -coeff, + |cur_coeff, other_coeff| cur_coeff - other_coeff, + ) + } +} + +impl<'a, F: Field> Sub<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn sub(self, other: &'a LinearCombination) -> LinearCombination { + if other.0.is_empty() { + return self; + } else if self.0.is_empty() { + let mut other = other.clone(); + other.negate_in_place(); + return other; + } + op_impl( + &self, + other, + |coeff| -coeff, + |cur_coeff, other_coeff| cur_coeff - other_coeff, + ) + } +} + +impl Sub> for &LinearCombination { + type Output = LinearCombination; + + fn sub(self, mut other: LinearCombination) -> LinearCombination { + if self.0.is_empty() { + other.negate_in_place(); + return other; + } else if other.0.is_empty() { + return self.clone(); + } + + op_impl( + self, + &other, + |coeff| -coeff, + |cur_coeff, other_coeff| cur_coeff - other_coeff, + ) + } +} + +impl Sub> for LinearCombination { + type Output = LinearCombination; + + fn sub(self, mut other: LinearCombination) -> LinearCombination { + if other.0.is_empty() { + return self; + } else if self.0.is_empty() { + other.negate_in_place(); + return other; + } + op_impl( + &self, + &other, + |coeff| -coeff, + |cur_coeff, other_coeff| cur_coeff - other_coeff, + ) + } +} + +impl Add<(F, &LinearCombination)> for &LinearCombination { + type Output = LinearCombination; + + fn add(self, (mul_coeff, other): (F, &LinearCombination)) -> LinearCombination { + if other.0.is_empty() { + return self.clone(); + } else if self.0.is_empty() { + let mut other = other.clone(); + other.mul_assign(mul_coeff); + return other; + } + op_impl( + self, + other, + |coeff| mul_coeff * coeff, + |cur_coeff, other_coeff| cur_coeff + mul_coeff * other_coeff, + ) + } +} + +impl<'a, F: Field> Add<(F, &'a LinearCombination)> for LinearCombination { + type Output = LinearCombination; + + fn add(self, (mul_coeff, other): (F, &'a LinearCombination)) -> LinearCombination { + if other.0.is_empty() { + return self; + } else if self.0.is_empty() { + let mut other = other.clone(); + other.mul_assign(mul_coeff); + return other; + } + op_impl( + &self, + other, + |coeff| mul_coeff * coeff, + |cur_coeff, other_coeff| cur_coeff + mul_coeff * other_coeff, + ) + } +} + +impl Add<(F, LinearCombination)> for &LinearCombination { + type Output = LinearCombination; + + fn add(self, (mul_coeff, mut other): (F, LinearCombination)) -> LinearCombination { + if other.0.is_empty() { + return self.clone(); + } else if self.0.is_empty() { + other.mul_assign(mul_coeff); + return other; + } + op_impl( + self, + &other, + |coeff| mul_coeff * coeff, + |cur_coeff, other_coeff| cur_coeff + mul_coeff * other_coeff, + ) + } +} + +impl Add<(F, Self)> for LinearCombination { + type Output = Self; + + fn add(self, (mul_coeff, other): (F, Self)) -> Self { + if other.0.is_empty() { + return self; + } else if self.0.is_empty() { + let mut other = other; + other.mul_assign(mul_coeff); + return other; + } + op_impl( + &self, + &other, + |coeff| mul_coeff * coeff, + |cur_coeff, other_coeff| cur_coeff + mul_coeff * other_coeff, + ) + } +} + +impl Sub<(F, &LinearCombination)> for &LinearCombination { + type Output = LinearCombination; + + fn sub(self, (coeff, other): (F, &LinearCombination)) -> LinearCombination { + self + (-coeff, other) + } +} + +impl<'a, F: Field> Sub<(F, &'a LinearCombination)> for LinearCombination { + type Output = LinearCombination; + + fn sub(self, (coeff, other): (F, &'a LinearCombination)) -> LinearCombination { + self + (-coeff, other) + } +} + +impl Sub<(F, LinearCombination)> for &LinearCombination { + type Output = LinearCombination; + + fn sub(self, (coeff, other): (F, LinearCombination)) -> LinearCombination { + self + (-coeff, other) + } +} + +impl<'a, F: Field> Sub<(F, LinearCombination)> for LinearCombination { + type Output = LinearCombination; + + fn sub(self, (coeff, other): (F, LinearCombination)) -> LinearCombination { + self + (-coeff, other) + } +} diff --git a/arkworks/snark/relations/src/r1cs/mod.rs b/arkworks/snark/relations/src/r1cs/mod.rs new file mode 100644 index 00000000..69adfa3f --- /dev/null +++ b/arkworks/snark/relations/src/r1cs/mod.rs @@ -0,0 +1,148 @@ +//! Core interface for working with Rank-1 Constraint Systems (R1CS). + +use ark_std::vec::Vec; + +/// A result type specialized to `SynthesisError`. +pub type Result = core::result::Result; + +#[macro_use] +mod impl_lc; +mod constraint_system; +mod error; +#[cfg(feature = "std")] +mod trace; + +#[cfg(feature = "std")] +pub use crate::r1cs::trace::{ConstraintLayer, ConstraintTrace, TraceStep, TracingMode}; + +pub use tracing::info_span; + +pub use ark_ff::{Field, ToConstraintField}; +pub use constraint_system::{ + ConstraintMatrices, ConstraintSynthesizer, ConstraintSystem, ConstraintSystemRef, Namespace, + OptimizationGoal, SynthesisMode, +}; +pub use error::SynthesisError; + +use core::cmp::Ordering; + +/// A sparse representation of constraint matrices. +pub type Matrix = Vec>; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +/// An opaque counter for symbolic linear combinations. +pub struct LcIndex(usize); + +/// Represents the different kinds of variables present in a constraint system. +#[derive(Copy, Clone, PartialEq, Debug, Eq)] +pub enum Variable { + /// Represents the "zero" constant. + Zero, + /// Represents of the "one" constant. + One, + /// Represents a public instance variable. + Instance(usize), + /// Represents a private witness variable. + Witness(usize), + /// Represents of a linear combination. + SymbolicLc(LcIndex), +} + +/// A linear combination of variables according to associated coefficients. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct LinearCombination(pub Vec<(F, Variable)>); + +/// Generate a `Namespace` with name `name` from `ConstraintSystem` `cs`. +/// `name` must be a `&'static str`. +#[macro_export] +macro_rules! ns { + ($cs:expr, $name:expr) => {{ + let span = $crate::r1cs::info_span!(target: "r1cs", $name); + let id = span.id(); + let _enter_guard = span.enter(); + core::mem::forget(_enter_guard); + core::mem::forget(span); + $crate::r1cs::Namespace::new($cs.clone(), id) + }}; +} + +impl Variable { + /// Is `self` the zero variable? + #[inline] + pub fn is_zero(&self) -> bool { + matches!(self, Variable::Zero) + } + + /// Is `self` the one variable? + #[inline] + pub fn is_one(&self) -> bool { + matches!(self, Variable::One) + } + + /// Is `self` an instance variable? + #[inline] + pub fn is_instance(&self) -> bool { + matches!(self, Variable::Instance(_)) + } + + /// Is `self` a witness variable? + #[inline] + pub fn is_witness(&self) -> bool { + matches!(self, Variable::Witness(_)) + } + + /// Is `self` a linear combination? + #[inline] + pub fn is_lc(&self) -> bool { + matches!(self, Variable::SymbolicLc(_)) + } + + /// Get the `LcIndex` in `self` if `self.is_lc()`. + #[inline] + pub fn get_lc_index(&self) -> Option { + match self { + Variable::SymbolicLc(index) => Some(*index), + _ => None, + } + } + + /// Returns `Some(usize)` if `!self.is_lc()`, and `None` otherwise. + #[inline] + pub fn get_index_unchecked(&self, witness_offset: usize) -> Option { + match self { + // The one variable always has index 0 + Variable::One => Some(0), + Variable::Instance(i) => Some(*i), + Variable::Witness(i) => Some(witness_offset + *i), + _ => None, + } + } +} + +impl PartialOrd for Variable { + fn partial_cmp(&self, other: &Self) -> Option { + use Variable::*; + match (self, other) { + (Zero, Zero) => Some(Ordering::Equal), + (One, One) => Some(Ordering::Equal), + (Zero, _) => Some(Ordering::Less), + (One, _) => Some(Ordering::Less), + (_, Zero) => Some(Ordering::Greater), + (_, One) => Some(Ordering::Greater), + + (Instance(i), Instance(j)) | (Witness(i), Witness(j)) => i.partial_cmp(j), + (Instance(_), Witness(_)) => Some(Ordering::Less), + (Witness(_), Instance(_)) => Some(Ordering::Greater), + + (SymbolicLc(i), SymbolicLc(j)) => i.partial_cmp(j), + (_, SymbolicLc(_)) => Some(Ordering::Less), + (SymbolicLc(_), _) => Some(Ordering::Greater), + } + } +} + +impl Ord for Variable { + fn cmp(&self, other: &Self) -> Ordering { + self.partial_cmp(other).unwrap() + } +} diff --git a/arkworks/snark/relations/src/r1cs/trace.rs b/arkworks/snark/relations/src/r1cs/trace.rs new file mode 100644 index 00000000..fe957058 --- /dev/null +++ b/arkworks/snark/relations/src/r1cs/trace.rs @@ -0,0 +1,333 @@ +// adapted from `tracing_error::{SpanTrace, ErrorLayer}`. + +use core::{ + any::{type_name, TypeId}, + fmt, + marker::PhantomData, +}; +use tracing::{span, Dispatch, Metadata, Subscriber}; +use tracing_subscriber::{ + layer::{self, Layer}, + registry::LookupSpan, +}; + +/// A subscriber [`Layer`] that enables capturing a trace of R1CS constraint +/// generation. +/// +/// [`Layer`]: https://docs.rs/tracing-subscriber/0.2.10/tracing_subscriber/layer/trait.Layer.html +/// [field formatter]: https://docs.rs/tracing-subscriber/0.2.10/tracing_subscriber/fmt/trait.FormatFields.html +/// [default format]: https://docs.rs/tracing-subscriber/0.2.10/tracing_subscriber/fmt/format/struct.DefaultFields.html +pub struct ConstraintLayer { + /// Mode of filtering. + pub mode: TracingMode, + + get_context: WithContext, + _subscriber: PhantomData, +} + +/// Instructs `ConstraintLayer` to conditionally filter out spans. +#[derive(PartialEq, Eq, Ord, PartialOrd, Hash, Debug)] +pub enum TracingMode { + /// Instructs `ConstraintLayer` to filter out any spans that *do not* have + /// `target == "r1cs"`. + OnlyConstraints, + /// Instructs `ConstraintLayer` to filter out any spans that *do* have + /// `target == "r1cs"`. + NoConstraints, + /// Instructs `ConstraintLayer` to not filter out any spans. + All, +} + +// this function "remembers" the types of the subscriber and the formatter, +// so that we can downcast to something aware of them without knowing those +// types at the callsite. +pub(crate) struct WithContext( + fn(&Dispatch, &span::Id, f: &mut dyn FnMut(&'static Metadata<'static>, &str) -> bool), +); + +impl Layer for ConstraintLayer +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn enabled(&self, metadata: &Metadata<'_>, _ctx: layer::Context<'_, S>) -> bool { + match self.mode { + TracingMode::OnlyConstraints => metadata.target() == "r1cs", + TracingMode::NoConstraints => metadata.target() != "r1cs", + TracingMode::All => true, + } + } + + /// Notifies this layer that a new span was constructed with the given + /// `Attributes` and `Id`. + fn new_span(&self, _attrs: &span::Attributes<'_>, _id: &span::Id, _ctx: layer::Context<'_, S>) { + } + + #[allow(unsafe_code, trivial_casts)] + unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { + match id { + id if id == TypeId::of::() => Some(self as *const _ as *const ()), + id if id == TypeId::of::() => { + Some(&self.get_context as *const _ as *const ()) + } + _ => None, + } + } +} + +impl ConstraintLayer +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + /// Returns a new `ConstraintLayer`. + /// + /// If `mode == TracingMode::OnlyConstraints`, the resulting layer will + /// filter out any spans whose `target != "r1cs"`. + /// + /// If `mode == TracingMode::NoConstraints`, the resulting layer will + /// filter out any spans whose `target == "r1cs"`. + /// + /// Finally, if `mode == TracingMode::All`, the resulting layer will + /// not filter out any spans. + pub fn new(mode: TracingMode) -> Self { + Self { + mode, + get_context: WithContext(Self::get_context), + _subscriber: PhantomData, + } + } + + fn get_context( + dispatch: &Dispatch, + id: &span::Id, + f: &mut dyn FnMut(&'static Metadata<'static>, &str) -> bool, + ) { + let subscriber = dispatch + .downcast_ref::() + .expect("subscriber should downcast to expected type; this is a bug!"); + let span = subscriber + .span(id) + .expect("registry should have a span for the current ID"); + let parents = span.parents(); + for span in std::iter::once(span).chain(parents) { + let cont = f(span.metadata(), ""); + if !cont { + break; + } + } + } +} + +impl WithContext { + pub(crate) fn with_context<'a>( + &self, + dispatch: &'a Dispatch, + id: &span::Id, + mut f: impl FnMut(&'static Metadata<'static>, &str) -> bool, + ) { + (self.0)(dispatch, id, &mut f) + } +} + +impl Default for ConstraintLayer +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn default() -> Self { + Self::new(TracingMode::All) + } +} + +impl fmt::Debug for ConstraintLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ConstraintLayer") + .field("subscriber", &format_args!("{}", type_name::())) + .finish() + } +} + +macro_rules! try_bool { + ($e:expr, $dest:ident) => {{ + let ret = $e.unwrap_or_else(|e| $dest = Err(e)); + + if $dest.is_err() { + return false; + } + + ret + }}; +} + +/// A captured trace of [`tracing`] spans that have `target = "r1cs"`. +/// +/// This type can be thought of as a relative of +/// [`std::backtrace::Backtrace`][`Backtrace`]. +/// However, rather than capturing the current call stack when it is +/// constructed, a `ConstraintTrace` instead captures the current [span] and its +/// [parents]. It allows inspection of the constraints that are left unsatisfied +/// by a particular witness assignment to an R1CS instance. +/// +/// # Formatting +/// +/// The `ConstraintTrace` type implements `fmt::Display`, formatting the span +/// trace similarly to how Rust formats panics. For example: +/// +/// ```text +/// 0: r1cs-std::bits::something +/// at r1cs-std/src/bits/test.rs:42 +/// 1: r1cs-std::bits::another_thing +/// at r1cs-std/src/bits/test.rs:15 +/// ``` +/// +/// [`tracing`]: https://docs.rs/tracing +/// [`Backtrace`]: https://doc.rust-lang.org/std/backtrace/struct.Backtrace.html +/// [span]: https://docs.rs/tracing/latest/tracing/span/index.html +/// [parents]: https://docs.rs/tracing/latest/tracing/span/index.html#span-relationships +#[derive(Clone, Debug)] +pub struct ConstraintTrace { + span: span::Span, +} + +// === impl ConstraintTrace === + +impl ConstraintTrace { + /// Capture the current span trace. + /// + /// # Examples + /// ```rust + /// use ark_relations::r1cs::ConstraintTrace; + /// + /// pub struct MyError { + /// trace: Option, + /// // ... + /// } + /// + /// # fn some_error_condition() -> bool { true } + /// + /// pub fn my_function(arg: &str) -> Result<(), MyError> { + /// let _span = tracing::info_span!(target: "r1cs", "In my_function"); + /// let _guard = _span.enter(); + /// if some_error_condition() { + /// return Err(MyError { + /// trace: ConstraintTrace::capture(), + /// // ... + /// }); + /// } + /// + /// // ... + /// # Ok(()) + /// } + /// ``` + pub fn capture() -> Option { + let span = span::Span::current(); + + if span.is_none() { + None + } else { + let trace = Self { span }; + Some(trace) + } + } + + /// Apply a function to all captured spans in the trace until it returns + /// `false`. + /// + /// This will call the provided function with a reference to the + /// [`Metadata`] and a formatted representation of the [fields] of each span + /// captured in the trace, starting with the span that was current when the + /// trace was captured. The function may return `true` or `false` to + /// indicate whether to continue iterating over spans; if it returns + /// `false`, no additional spans will be visited. + /// + /// [fields]: https://docs.rs/tracing/latest/tracing/field/index.html + /// [`Metadata`]: https://docs.rs/tracing/latest/tracing/struct.Metadata.html + fn with_spans(&self, f: impl FnMut(&'static Metadata<'static>, &str) -> bool) { + self.span.with_subscriber(|(id, s)| { + if let Some(getcx) = s.downcast_ref::() { + getcx.with_context(s, id, f); + } + }); + } + + /// Compute a `Vec` of `TraceStep`s, one for each `Span` on the path from + /// the root `Span`. + /// + /// The output starts from the root of the span tree. + pub fn path(&self) -> Vec { + let mut path = Vec::new(); + self.with_spans(|metadata, _| { + if metadata.target() == "r1cs" { + let n = metadata.name(); + let step = metadata + .module_path() + .map(|m| (n, m)) + .and_then(|(n, m)| metadata.file().map(|f| (n, m, f))) + .and_then(|(n, m, f)| metadata.line().map(|l| (n, m, f, l))); + if let Some((name, module_path, file, line)) = step { + let step = TraceStep { + name, + module_path, + file, + line, + }; + path.push(step); + } else { + return false; + } + } + true + }); + path.reverse(); // root first + path + } +} + +impl fmt::Display for ConstraintTrace { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut err = Ok(()); + let mut span = 0; + + self.with_spans(|metadata, _| { + if metadata.target() != "r1cs" { + return true; + } + if span > 0 { + try_bool!(write!(f, "\n",), err); + } + + try_bool!( + write!( + f, + "{:>4}: {}::{}", + span, + metadata.module_path().unwrap(), + metadata.name() + ), + err + ); + + if let Some((file, line)) = metadata + .file() + .and_then(|file| metadata.line().map(|line| (file, line))) + { + try_bool!(write!(f, "\n at {}:{}", file, line), err); + } + + span += 1; + true + }); + + err + } +} +/// A step in the trace of a constraint generation step. +#[derive(Debug, Clone, Copy)] +pub struct TraceStep { + /// Name of the constraint generating span. + pub name: &'static str, + /// Name of the module containing the constraint generating span. + pub module_path: &'static str, + /// Name of the file containing the constraint generating span. + pub file: &'static str, + /// Line number of the constraint generating span. + pub line: u32, +} diff --git a/arkworks/snark/rustfmt.toml b/arkworks/snark/rustfmt.toml new file mode 100644 index 00000000..71712138 --- /dev/null +++ b/arkworks/snark/rustfmt.toml @@ -0,0 +1,9 @@ +reorder_imports = true +wrap_comments = true +normalize_comments = true +use_try_shorthand = true +match_block_trailing_comma = true +use_field_init_shorthand = true +edition = "2018" +condense_wildcard_suffixes = true +merge_imports = true diff --git a/arkworks/snark/scripts/install-hook.sh b/arkworks/snark/scripts/install-hook.sh new file mode 100755 index 00000000..eafcf818 --- /dev/null +++ b/arkworks/snark/scripts/install-hook.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +# This script will install the provided directory ../.hooks as the hook +# directory for the present repo. See there for hooks, including a pre-commit +# hook that runs rustfmt on files before a commit. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HOOKS_DIR="${DIR}/../.hooks" + +git config core.hooksPath "$HOOKS_DIR" diff --git a/arkworks/snark/scripts/linkify_changelog.py b/arkworks/snark/scripts/linkify_changelog.py new file mode 100644 index 00000000..867ae14d --- /dev/null +++ b/arkworks/snark/scripts/linkify_changelog.py @@ -0,0 +1,31 @@ +import re +import sys +import fileinput +import os + +# Set this to the name of the repo, if you don't want it to be read from the filesystem. +# It assumes the changelog file is in the root of the repo. +repo_name = "" + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/arkworks-rs/template/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG.md +if len(sys.argv) < 2: + print("Must include path to changelog as the first argument to the script") + print("Example Usage: python3 linkify_changelog.py ../CHANGELOG.md") + exit() + +changelog_path = sys.argv[1] +if repo_name == "": + path = os.path.abspath(changelog_path) + components = path.split(os.path.sep) + repo_name = components[-2] + +for line in fileinput.input(inplace=True): + line = re.sub(r"\- #([0-9]*)", r"- [\\#\1](https://github.com/arkworks-rs/" + repo_name + r"/pull/\1)", line.rstrip()) + # edits the current file + print(line) \ No newline at end of file diff --git a/arkworks/snark/snark/Cargo.toml b/arkworks/snark/snark/Cargo.toml new file mode 100644 index 00000000..7da4a472 --- /dev/null +++ b/arkworks/snark/snark/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "ark-snark" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for SNARK traits" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/snark" +documentation = "https://docs.rs/ark-snark/" +keywords = ["zero-knowledge", "cryptography", "zkSNARK", "SNARK"] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +ark-ff = { path = "../../algebra/ff", version = "^0.3.0", default-features = false } +ark-std = { path = "../../std", version = "^0.3.0", default-features = false } +ark-relations = { version = "^0.3.0", path = "../relations", default-features = false } diff --git a/arkworks/snark/snark/src/lib.rs b/arkworks/snark/snark/src/lib.rs new file mode 100644 index 00000000..7832f3e8 --- /dev/null +++ b/arkworks/snark/snark/src/lib.rs @@ -0,0 +1,130 @@ +//! This crate contains traits that define the basic behaviour of SNARKs. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn( + unused, + future_incompatible, + nonstandard_style, + rust_2018_idioms, + missing_docs +)] +#![forbid(unsafe_code)] + +use ark_ff::{PrimeField, ToBytes}; +use ark_relations::r1cs::ConstraintSynthesizer; +use ark_std::rand::{CryptoRng, RngCore}; +use core::fmt::Debug; + +/// The basic functionality for a SNARK. +pub trait SNARK { + /// The information required by the prover to produce a proof for a specific + /// circuit *C*. + type ProvingKey: Clone; + + /// The information required by the verifier to check a proof for a specific + /// circuit *C*. + type VerifyingKey: Clone + ToBytes; + + /// The proof output by the prover. + type Proof: Clone; + + /// This contains the verification key, but preprocessed to enable faster + /// verification. + type ProcessedVerifyingKey: Clone; + + /// Errors encountered during setup, proving, or verification. + type Error: 'static + ark_std::error::Error; + + /// Takes in a description of a computation (specified in R1CS constraints), + /// and samples proving and verification keys for that circuit. + fn circuit_specific_setup, R: RngCore + CryptoRng>( + circuit: C, + rng: &mut R, + ) -> Result<(Self::ProvingKey, Self::VerifyingKey), Self::Error>; + + /// Generates a proof of satisfaction of the arithmetic circuit C (specified + /// as R1CS constraints). + fn prove, R: RngCore + CryptoRng>( + circuit_pk: &Self::ProvingKey, + circuit: C, + rng: &mut R, + ) -> Result; + + /// Checks that `proof` is a valid proof of the satisfaction of circuit + /// encoded in `circuit_vk`, with respect to the public input `public_input`, + /// specified as R1CS constraints. + fn verify( + circuit_vk: &Self::VerifyingKey, + public_input: &[F], + proof: &Self::Proof, + ) -> Result { + let pvk = Self::process_vk(circuit_vk)?; + Self::verify_with_processed_vk(&pvk, public_input, proof) + } + + /// Preprocesses `circuit_vk` to enable faster verification. + fn process_vk( + circuit_vk: &Self::VerifyingKey, + ) -> Result; + + /// Checks that `proof` is a valid proof of the satisfaction of circuit + /// encoded in `circuit_pvk`, with respect to the public input `public_input`, + /// specified as R1CS constraints. + fn verify_with_processed_vk( + circuit_pvk: &Self::ProcessedVerifyingKey, + public_input: &[F], + proof: &Self::Proof, + ) -> Result; +} + +/// A SNARK with (only) circuit-specific setup. +pub trait CircuitSpecificSetupSNARK: SNARK { + /// The setup algorithm for circuit-specific SNARKs. By default, this + /// just invokes `>::circuit_specific_setup(...)`. + fn setup, R: RngCore + CryptoRng>( + circuit: C, + rng: &mut R, + ) -> Result<(Self::ProvingKey, Self::VerifyingKey), Self::Error> { + >::circuit_specific_setup(circuit, rng) + } +} + +/// A helper type for universal-setup SNARKs, which must infer their computation +/// size bounds. +pub enum UniversalSetupIndexError { + /// The provided universal public parameters were insufficient to encode + /// the given circuit. + NeedLargerBound(Bound), + /// Other errors occurred during indexing. + Other(E), +} + +/// A SNARK with universal setup. That is, a SNARK where the trusted setup is +/// circuit-independent. +pub trait UniversalSetupSNARK: SNARK { + /// Specifies how to bound the size of public parameters required to + /// generate the index proving and verification keys for a given + /// circuit. + type ComputationBound: Clone + Default + Debug; + /// Specifies the type of universal public parameters. + type PublicParameters: Clone + Debug; + + /// Specifies how to bound the size of public parameters required to + /// generate the index proving and verification keys for a given + /// circuit. + fn universal_setup( + compute_bound: &Self::ComputationBound, + rng: &mut R, + ) -> Result; + + /// Indexes the public parameters according to the circuit `circuit`, and + /// outputs circuit-specific proving and verification keys. + fn index, R: RngCore + CryptoRng>( + pp: &Self::PublicParameters, + circuit: C, + rng: &mut R, + ) -> Result< + (Self::ProvingKey, Self::VerifyingKey), + UniversalSetupIndexError, + >; +} diff --git a/arkworks/std/.github/ISSUE_TEMPLATE/bug_report.md b/arkworks/std/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..e01ca941 --- /dev/null +++ b/arkworks/std/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! + +--- + +∂ + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + + + diff --git a/arkworks/std/.github/ISSUE_TEMPLATE/feature_request.md b/arkworks/std/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..7d5ed5df --- /dev/null +++ b/arkworks/std/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Feature Request +about: Create a proposal to request a feature + +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/arkworks/std/.github/PULL_REQUEST_TEMPLATE.md b/arkworks/std/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..37f2f6c1 --- /dev/null +++ b/arkworks/std/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +## Description + + + +closes: #XXXX + +--- + +Before we can merge this PR, please make sure that all the following items have been +checked off. If any of the checklist items are not applicable, please leave them but +write a little note why. + +- [ ] Targeted PR against correct branch (master) +- [ ] Linked to Github issue with discussion and accepted design OR have an explanation in the PR that describes this work. +- [ ] Wrote unit tests +- [ ] Updated relevant documentation in the code +- [ ] Added a relevant changelog entry to the `Pending` section in `CHANGELOG.md` +- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/arkworks/std/.github/dependabot.yml b/arkworks/std/.github/dependabot.yml new file mode 100644 index 00000000..5cde1657 --- /dev/null +++ b/arkworks/std/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 diff --git a/arkworks/std/.github/workflows/ci.yml b/arkworks/std/.github/workflows/ci.yml new file mode 100644 index 00000000..81d33852 --- /dev/null +++ b/arkworks/std/.github/workflows/ci.yml @@ -0,0 +1,120 @@ +name: CI +on: + pull_request: + push: + branches: + - master +env: + RUST_BACKTRACE: 1 + +jobs: + style: + name: Check Style + runs-on: ubuntu-latest + steps: + + - name: Checkout + uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + test: + name: Test + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + strategy: + matrix: + rust: + - stable + - nightly + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Check examples + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all + + - name: Check examples with all features on stable + uses: actions-rs/cargo@v1 + with: + command: check + args: --examples --all-features --all + if: matrix.rust == 'stable' + + - name: Check benchmarks on nightly + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-features --examples --all --benches + if: matrix.rust == 'nightly' + + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: "--all \ + --all-features" + + check_no_std: + name: Check no_std + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: thumbv6m-none-eabi + override: true + + - name: Install Rust ARM64 (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: aarch64-unknown-none + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: ark-std + run: | + cargo build -p ark-std --no-default-features --target thumbv6m-none-eabi + cargo check --examples -p ark-std --no-default-features --target thumbv6m-none-eabi diff --git a/arkworks/std/.gitignore b/arkworks/std/.gitignore new file mode 100644 index 00000000..9b5e101e --- /dev/null +++ b/arkworks/std/.gitignore @@ -0,0 +1,11 @@ +target +Cargo.lock +.DS_Store +.idea +*.iml +*.ipynb_checkpoints +*.pyc +*.sage.py +params +*.swp +*.swo diff --git a/arkworks/std/.hooks/pre-commit b/arkworks/std/.hooks/pre-commit new file mode 100755 index 00000000..8d4d19fe --- /dev/null +++ b/arkworks/std/.hooks/pre-commit @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +rustfmt --version &>/dev/null +if [ $? != 0 ]; then + printf "[pre_commit] \033[0;31merror\033[0m: \"rustfmt\" not available. \n" + printf "[pre_commit] \033[0;31merror\033[0m: rustfmt can be installed via - \n" + printf "[pre_commit] $ rustup component add rustfmt \n" + exit 1 +fi + +problem_files=() + +# collect ill-formatted files +for file in $(git diff --name-only --cached); do + if [ ${file: -3} == ".rs" ]; then + rustfmt +stable --check $file &>/dev/null + if [ $? != 0 ]; then + problem_files+=($file) + fi + fi +done + +if [ ${#problem_files[@]} == 0 ]; then + # done + printf "[pre_commit] rustfmt \033[0;32mok\033[0m \n" +else + # reformat the files that need it and re-stage them. + printf "[pre_commit] the following files were rustfmt'd before commit: \n" + for file in ${problem_files[@]}; do + rustfmt +stable $file + git add $file + printf "\033[0;32m $file\033[0m \n" + done +fi + +exit 0 diff --git a/arkworks/std/AUTHORS b/arkworks/std/AUTHORS new file mode 100644 index 00000000..39f2ca64 --- /dev/null +++ b/arkworks/std/AUTHORS @@ -0,0 +1,6 @@ +Sean Bowe +Alessandro Chiesa +Matthew Green +Ian Miers +Pratyush Mishra +Howard Wu diff --git a/arkworks/std/CHANGELOG.md b/arkworks/std/CHANGELOG.md new file mode 100644 index 00000000..3da30136 --- /dev/null +++ b/arkworks/std/CHANGELOG.md @@ -0,0 +1,23 @@ +## Pending + +### Breaking changes + +### Features + +### Improvements + +### Bug fixes + +## v0.3.0 + +### Breaking changes + +- [\#32](https://github.com/arkworks-rs/utils/pull/32) Bump `rand` to 0.8 and remove the use of `rand_xorshift`. + +### Features + +- [\#34](https://github.com/arkworks-rs/utils/pull/34) Re-export `num_traits::{One, Zero}` from `ark-std`. + +### Improvements + +### Bug fixes diff --git a/arkworks/std/Cargo.toml b/arkworks/std/Cargo.toml new file mode 100644 index 00000000..c5e19520 --- /dev/null +++ b/arkworks/std/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "ark-std" +version = "0.3.0" +authors = [ "arkworks contributors" ] +description = "A library for no_std compatibility" +homepage = "https://arkworks.rs" +repository = "https://github.com/arkworks-rs/utils" +documentation = "https://docs.rs/ark-std/" +keywords = [ "no_std" ] +categories = ["cryptography"] +include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +license = "MIT/Apache-2.0" +edition = "2018" + +[dependencies] +rand = { version = "0.8", default-features = false, features = ["std_rng"] } +rayon = { version = "1", optional = true } +colored = { version = "2", optional = true } +num-traits = { version = "0.2", default-features = false } + +[features] +default = [ "std" ] +std = [] +parallel = [ "rayon", "std" ] +print-trace = [ "std", "colored" ] + +[profile.release] +opt-level = 3 +lto = "thin" +incremental = true + +[profile.bench] +opt-level = 3 +debug = false +rpath = false +lto = "thin" +incremental = true +debug-assertions = false + +[profile.dev] +opt-level = 0 + +[profile.test] +opt-level = 3 +lto = "thin" +incremental = true +debug-assertions = true +debug = true diff --git a/arkworks/std/LICENSE-APACHE b/arkworks/std/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/arkworks/std/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/arkworks/std/LICENSE-MIT b/arkworks/std/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/arkworks/std/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/arkworks/std/README.md b/arkworks/std/README.md new file mode 100644 index 00000000..4a38fb73 --- /dev/null +++ b/arkworks/std/README.md @@ -0,0 +1,62 @@ +

arkworks::utils

+ +

+ + + + +

+ +The arkworks ecosystem consists of Rust libraries for designing and working with __zero knowledge succinct non-interactive arguments (zkSNARKs)__. This repository contains `ark-std`, a library that serves as a compatibility layer for `no_std` use cases, and also contains useful methods and types used by the rest of the arkworks ecosystem. + +This library is released under the MIT License and the Apache v2 License (see [License](#license)). + +**WARNING:** This is an academic proof-of-concept prototype, and in particular has not received careful code review. This implementation is NOT ready for production use. + +## Build guide + +The library compiles on the `stable` toolchain of the Rust compiler. To install the latest version of Rust, first install `rustup` by following the instructions [here](https://rustup.rs/), or via your platform's package manager. Once `rustup` is installed, install the Rust toolchain by invoking: +```bash +rustup install stable +``` + +After that, use `cargo`, the standard Rust build tool, to build the libraries: +```bash +git clone https://github.com/arkworks-rs/utils.git +cd utils +cargo build --release +``` + +## Tests +This library comes with comprehensive unit and integration tests for each of the provided crates. Run the tests with: +```bash +cargo test --all +``` + +## Benchmarks + +To run the benchmarks, install the nightly Rust toolchain, via `rustup install nightly`, and then run the following command: +```bash +cargo +nightly bench +``` + +## License + +The crates in this repository are licensed under either of the following licenses, at your discretion. + + * Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. + +[zexe]: https://ia.cr/2018/962 + +## Acknowledgements + +This work was supported by: +a Google Faculty Award; +the National Science Foundation; +the UC Berkeley Center for Long-Term Cybersecurity; +and donations from the Ethereum Foundation, the Interchain Foundation, and Qtum. + +An earlier version of this library was developed as part of the paper *"[ZEXE: Enabling Decentralized Private Computation][zexe]"*. diff --git a/arkworks/std/rustfmt.toml b/arkworks/std/rustfmt.toml new file mode 100644 index 00000000..71712138 --- /dev/null +++ b/arkworks/std/rustfmt.toml @@ -0,0 +1,9 @@ +reorder_imports = true +wrap_comments = true +normalize_comments = true +use_try_shorthand = true +match_block_trailing_comma = true +use_field_init_shorthand = true +edition = "2018" +condense_wildcard_suffixes = true +merge_imports = true diff --git a/arkworks/std/scripts/install-hook.sh b/arkworks/std/scripts/install-hook.sh new file mode 100755 index 00000000..eafcf818 --- /dev/null +++ b/arkworks/std/scripts/install-hook.sh @@ -0,0 +1,9 @@ +#!/bin/env bash +# This script will install the provided directory ../.hooks as the hook +# directory for the present repo. See there for hooks, including a pre-commit +# hook that runs rustfmt on files before a commit. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +HOOKS_DIR="${DIR}/../.hooks" + +git config core.hooksPath "$HOOKS_DIR" diff --git a/arkworks/std/scripts/linkify_changelog.yml b/arkworks/std/scripts/linkify_changelog.yml new file mode 100644 index 00000000..0cbe85f1 --- /dev/null +++ b/arkworks/std/scripts/linkify_changelog.yml @@ -0,0 +1,20 @@ +name: Linkify Changelog + +on: + workflow_dispatch + +jobs: + linkify: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Add links + run: python3 scripts/linkify_changelog.py CHANGELOG.md + - name: Commit + run: | + git config user.name github-actions + git config user.email github-actions@github.com + git add . + git commit -m "Linkify Changelog" + git push \ No newline at end of file diff --git a/arkworks/std/src/error.rs b/arkworks/std/src/error.rs new file mode 100644 index 00000000..49419e38 --- /dev/null +++ b/arkworks/std/src/error.rs @@ -0,0 +1,54 @@ +use crate::boxed::Box; +use crate::fmt::{self, Debug, Display}; +use crate::string::String; + +pub trait Error: core::fmt::Debug + core::fmt::Display { + fn source(&self) -> Option<&(dyn Error + 'static)> { + None + } +} + +impl<'a, E: Error + 'a> From for Box { + fn from(err: E) -> Self { + Box::new(err) + } +} + +impl<'a, E: Error + Send + Sync + 'a> From for Box { + fn from(err: E) -> Box { + Box::new(err) + } +} + +impl Error for Box {} + +impl From for Box { + #[inline] + fn from(err: String) -> Box { + struct StringError(String); + + impl Error for StringError {} + + impl Display for StringError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } + } + + // Purposefully skip printing "StringError(..)" + impl Debug for StringError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.0, f) + } + } + + Box::new(StringError(err)) + } +} + +impl<'a> From<&'a str> for Box { + #[inline] + fn from(err: &'a str) -> Box { + From::from(String::from(err)) + } +} diff --git a/arkworks/std/src/io/error.rs b/arkworks/std/src/io/error.rs new file mode 100644 index 00000000..5c71ca8e --- /dev/null +++ b/arkworks/std/src/io/error.rs @@ -0,0 +1,269 @@ +use crate::boxed::Box; +use crate::convert::From; +use crate::error; +use crate::fmt; + +/// The error type for I/O operations of the [`Read`], [`Write`], [`Seek`], and +/// associated traits. +/// +/// Errors mostly originate from the underlying OS, but custom instances of +/// `Error` can be created with crafted error messages and a particular value of +/// [`ErrorKind`]. +/// +/// [`Read`]: crate::io::Read +/// [`Write`]: crate::io::Write +/// [`Seek`]: crate::io::Seek +pub struct Error { + repr: Repr, +} + +pub type Result = core::result::Result; + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.repr, f) + } +} + +enum Repr { + Simple(ErrorKind), + Custom(Box), +} + +#[derive(Debug)] +struct Custom { + kind: ErrorKind, + error: Box, +} + +/// A list specifying general categories of I/O error. +/// +/// This list is intended to grow over time and it is not recommended to +/// exhaustively match against it. +/// +/// It is used with the [`io::Error`] type. +/// +/// [`io::Error`]: Error +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[allow(deprecated)] +#[non_exhaustive] +pub enum ErrorKind { + /// An entity was not found, often a file. + NotFound, + /// The operation lacked the necessary privileges to complete. + PermissionDenied, + /// The connection was refused by the remote server. + ConnectionRefused, + /// The connection was reset by the remote server. + ConnectionReset, + /// The connection was aborted (terminated) by the remote server. + ConnectionAborted, + /// The network operation failed because it was not connected yet. + NotConnected, + /// A socket address could not be bound because the address is already in + /// use elsewhere. + AddrInUse, + /// A nonexistent interface was requested or the requested address was not + /// local. + AddrNotAvailable, + /// The operation failed because a pipe was closed. + BrokenPipe, + /// An entity already exists, often a file. + AlreadyExists, + /// The operation needs to block to complete, but the blocking operation was + /// requested to not occur. + WouldBlock, + /// A parameter was incorrect. + InvalidInput, + /// Data not valid for the operation were encountered. + /// + /// Unlike [`InvalidInput`], this typically means that the operation + /// parameters were valid, however the error was caused by malformed + /// input data. + /// + /// For example, a function that reads a file into a string will error with + /// `InvalidData` if the file's contents are not valid UTF-8. + /// + /// [`InvalidInput`]: ErrorKind::InvalidInput + InvalidData, + /// The I/O operation's timeout expired, causing it to be canceled. + TimedOut, + /// An error returned when an operation could not be completed because a + /// call to [`write`] returned [`Ok(0)`]. + /// + /// This typically means that an operation could only succeed if it wrote a + /// particular number of bytes but only a smaller number of bytes could be + /// written. + /// + /// [`write`]: crate::io::Write::write + /// [`Ok(0)`]: Ok + WriteZero, + /// This operation was interrupted. + /// + /// Interrupted operations can typically be retried. + Interrupted, + /// Any I/O error not part of this list. + /// + /// Errors that are `Other` now may move to a different or a new + /// [`ErrorKind`] variant in the future. It is not recommended to match + /// an error against `Other` and to expect any additional characteristics, + /// e.g., a specific [`Error::raw_os_error`] return value. + Other, + + /// An error returned when an operation could not be completed because an + /// "end of file" was reached prematurely. + /// + /// This typically means that an operation could only succeed if it read a + /// particular number of bytes but only a smaller number of bytes could be + /// read. + UnexpectedEof, +} + +impl ErrorKind { + pub(crate) fn as_str(&self) -> &'static str { + match *self { + ErrorKind::NotFound => "entity not found", + ErrorKind::PermissionDenied => "permission denied", + ErrorKind::ConnectionRefused => "connection refused", + ErrorKind::ConnectionReset => "connection reset", + ErrorKind::ConnectionAborted => "connection aborted", + ErrorKind::NotConnected => "not connected", + ErrorKind::AddrInUse => "address in use", + ErrorKind::AddrNotAvailable => "address not available", + ErrorKind::BrokenPipe => "broken pipe", + ErrorKind::AlreadyExists => "entity already exists", + ErrorKind::WouldBlock => "operation would block", + ErrorKind::InvalidInput => "invalid input parameter", + ErrorKind::InvalidData => "invalid data", + ErrorKind::TimedOut => "timed out", + ErrorKind::WriteZero => "write zero", + ErrorKind::Interrupted => "operation interrupted", + ErrorKind::Other => "other os error", + ErrorKind::UnexpectedEof => "unexpected end of file", + } + } +} + +/// Intended for use for errors not exposed to the user, where allocating onto +/// the heap (for normal construction via Error::new) is too costly. +impl From for Error { + /// Converts an [`ErrorKind`] into an [`Error`]. + /// + /// This conversion allocates a new error with a simple representation of error kind. + /// + /// # Examples + /// + /// ``` + /// use ark_std::io::{Error, ErrorKind}; + /// + /// let not_found = ErrorKind::NotFound; + /// let error = Error::from(not_found); + /// assert_eq!("entity not found", format!("{}", error)); + /// ``` + #[inline] + fn from(kind: ErrorKind) -> Error { + Error { + repr: Repr::Simple(kind), + } + } +} + +impl Error { + /// Creates a new I/O error from a known kind of error as well as an + /// arbitrary error payload. + /// + /// This function is used to generically create I/O errors which do not + /// originate from the OS itself. The `error` argument is an arbitrary + /// payload which will be contained in this [`Error`]. + /// + /// # Examples + /// + /// ``` + /// use ark_std::io::{Error, ErrorKind}; + /// + /// // errors can be created from strings + /// let custom_error = Error::new(ErrorKind::Other, "oh no!"); + /// + /// // errors can also be created from other errors + /// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error); + /// ``` + pub fn new(kind: ErrorKind, error: E) -> Error + where + E: Into>, + { + Self::_new(kind, error.into()) + } + + fn _new(kind: ErrorKind, error: Box) -> Error { + Error { + repr: Repr::Custom(Box::new(Custom { kind, error })), + } + } + + pub fn get_ref(&self) -> Option<&(dyn error::Error + Send + Sync + 'static)> { + match self.repr { + Repr::Simple(..) => None, + Repr::Custom(ref c) => Some(&*c.error), + } + } + + pub fn get_mut(&mut self) -> Option<&mut (dyn error::Error + Send + Sync + 'static)> { + match self.repr { + Repr::Simple(..) => None, + Repr::Custom(ref mut c) => Some(&mut *c.error), + } + } + + /// Consumes the `Error`, returning its inner error (if any). + /// + /// If this [`Error`] was constructed via [`new`] then this function will + /// return [`Some`], otherwise it will return [`None`]. + /// + /// [`new`]: Error::new + pub fn into_inner(self) -> Option> { + match self.repr { + Repr::Simple(..) => None, + Repr::Custom(c) => Some(c.error), + } + } + + /// Returns the corresponding [`ErrorKind`] for this error. + pub fn kind(&self) -> ErrorKind { + match self.repr { + Repr::Custom(ref c) => c.kind, + Repr::Simple(kind) => kind, + } + } +} + +impl fmt::Debug for Repr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Repr::Custom(ref c) => fmt::Debug::fmt(&c, fmt), + Repr::Simple(kind) => fmt.debug_tuple("Kind").field(&kind).finish(), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.repr { + Repr::Custom(ref c) => c.error.fmt(fmt), + Repr::Simple(kind) => write!(fmt, "{}", kind.as_str()), + } + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self.repr { + Repr::Simple(..) => None, + Repr::Custom(ref c) => c.error.source(), + } + } +} + +fn _assert_error_is_sync_send() { + fn _is_sync_send() {} + _is_sync_send::(); +} diff --git a/arkworks/std/src/io/mod.rs b/arkworks/std/src/io/mod.rs new file mode 100644 index 00000000..9d8c22b3 --- /dev/null +++ b/arkworks/std/src/io/mod.rs @@ -0,0 +1,544 @@ +//! no-std io replacement +use crate::{cmp, convert::TryInto, mem, vec::Vec}; + +mod error; +pub use error::*; + +pub mod prelude { + pub use super::{Read, Result, Write}; +} + +/// The `Read` trait allows for reading bytes from a source. +/// +/// Implementors of the `Read` trait are called 'readers'. +/// +/// Readers are defined by one required method, [`read()`]. Each call to [`read()`] +/// will attempt to pull bytes from this source into a provided buffer. A +/// number of other methods are implemented in terms of [`read()`], giving +/// implementors a number of ways to read bytes while only needing to implement +/// a single method. +/// +/// Readers are intended to be composable with one another. Many implementors +/// throughout [`ark_std::io`] take and provide types which implement the `Read` +/// trait. +/// +/// Please note that each call to [`read()`] may involve a system call, and +/// therefore, using something that implements [`BufRead`], such as +/// [`BufReader`], will be more efficient. +/// +/// +/// Read from [`&str`] because [`&[u8]`][slice] implements `Read`: +/// +/// ```no_run +/// # use ark_std::io; +/// use ark_std::io::prelude::*; +/// +/// fn main() -> Result<()> { +/// let mut b = "This string will be read".as_bytes(); +/// let mut buffer = [0; 10]; +/// +/// // read up to 10 bytes +/// b.read(&mut buffer)?; +/// +/// Ok(()) +/// } +/// ``` +/// +/// [`read()`]: trait.Read.html#tymethod.read +/// [`ark_std::io`]: ../../std/io/index.html +/// [`BufRead`]: trait.BufRead.html +/// [`BufReader`]: struct.BufReader.html +/// [`&str`]: ../../std/primitive.str.html +/// [slice]: ../../std/primitive.slice.html +pub trait Read { + /// Pull some bytes from this source into the specified buffer, returning + /// how many bytes were read. + /// + /// This function does not provide any guarantees about whether it blocks + /// waiting for data, but if an object needs to block for a read but cannot + /// it will typically signal this via an [`Err`] return value. + /// + /// If the return value of this method is [`Ok(n)`], then it must be + /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates + /// that the buffer `buf` has been filled in with `n` bytes of data from this + /// source. If `n` is `0`, then it can indicate that the the buffer + /// specified was 0 bytes in length. + /// + /// No guarantees are provided about the contents of `buf` when this + /// function is called, implementations cannot rely on any property of the + /// contents of `buf` being true. It is recommended that implementations + /// only write data to `buf` instead of reading its contents. + /// + /// # Errors + /// + /// If this function encounters any form of I/O or other error, an error + /// variant will be returned. If an error is returned then it must be + /// guaranteed that no bytes were read. + /// + /// An error of the [`ErrorKind::Interrupted`] kind is non-fatal and the read + /// operation should be retried if there is nothing else to do. + /// + fn read(&mut self, buf: &mut [u8]) -> Result; + + /// Read the exact number of bytes required to fill `buf`. + /// + /// This function reads as many bytes as necessary to completely fill the + /// specified buffer `buf`. + /// + /// No guarantees are provided about the contents of `buf` when this + /// function is called, implementations cannot rely on any property of the + /// contents of `buf` being true. It is recommended that implementations + /// only write data to `buf` instead of reading its contents. + /// + /// # Errors + /// + /// If this function encounters an error of the kind + /// [`ErrorKind::Interrupted`] then the error is ignored and the operation + /// will continue. + /// + /// If any other read error is encountered then this function immediately + /// returns. The contents of `buf` are unspecified in this case. + /// + /// If this function returns an error, it is unspecified how many bytes it + /// has read, but it will never read more than would be necessary to + /// completely fill the buffer. + fn read_exact(&mut self, mut buf: &mut [u8]) -> Result<()> { + while !buf.is_empty() { + match self.read(buf) { + Ok(0) => break, + Ok(n) => { + let tmp = buf; + buf = &mut tmp[n..]; + } + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + if !buf.is_empty() { + Err(Error::new( + ErrorKind::UnexpectedEof, + "failed to fill whole buffer", + )) + } else { + Ok(()) + } + } + + /// Creates a "by reference" adaptor for this instance of `Read`. + /// + /// The returned adaptor also implements `Read` and will simply borrow this + /// current reader. + fn by_ref(&mut self) -> &mut Self + where + Self: Sized, + { + self + } +} + +pub trait Write { + /// Write a buffer into this writer, returning how many bytes were written. + /// + /// This function will attempt to write the entire contents of `buf`, but + /// the entire write may not succeed, or the write may also generate an + /// error. A call to `write` represents *at most one* attempt to write to + /// any wrapped object. + /// + /// Calls to `write` are not guaranteed to block waiting for data to be + /// written, and a write which would otherwise block can be indicated through + /// an [`Err`] variant. + /// + /// If the return value is [`Ok(n)`] then it must be guaranteed that + /// `0 <= n <= buf.len()`. A return value of `0` typically means that the + /// underlying object is no longer able to accept bytes and will likely not + /// be able to in the future as well, or that the buffer provided is empty. + /// + /// # Errors + /// + /// Each call to `write` may generate an I/O error indicating that the + /// operation could not be completed. If an error is returned then no bytes + /// in the buffer were written to this writer. + /// + /// It is **not** considered an error if the entire buffer could not be + /// written to this writer. + /// + /// An error of the [`ErrorKind::Interrupted`] kind is non-fatal and the + /// write operation should be retried if there is nothing else to do. + /// + /// [`Err`]: ../../std/result/enum.Result.html#variant.Err + /// [`Ok(n)`]: ../../std/result/enum.Result.html#variant.Ok + /// [`ErrorKind::Interrupted`]: ../../std/io/enum.ErrorKind.html#variant.Interrupted + fn write(&mut self, buf: &[u8]) -> Result; + + /// Flush this output stream, ensuring that all intermediately buffered + /// contents reach their destination. + /// + /// # Errors + /// + /// It is considered an error if not all bytes could be written due to + /// I/O errors or EOF being reached. + /// + fn flush(&mut self) -> Result<()>; + + /// Attempts to write an entire buffer into this writer. + /// + /// This method will continuously call [`write`] until there is no more data + /// to be written or an error of non-[`ErrorKind::Interrupted`] kind is + /// returned. This method will not return until the entire buffer has been + /// successfully written or such an error occurs. The first error that is + /// not of [`ErrorKind::Interrupted`] kind generated from this method will be + /// returned. + /// + /// # Errors + /// + /// This function will return the first error of + /// non-[`ErrorKind::Interrupted`] kind that [`write`] returns. + /// + /// [`ErrorKind::Interrupted`]: ../../std/io/enum.ErrorKind.html#variant.Interrupted + /// [`write`]: #tymethod.write + fn write_all(&mut self, mut buf: &[u8]) -> Result<()> { + while !buf.is_empty() { + match self.write(buf) { + Ok(0) => { + return Err(Error::new( + ErrorKind::WriteZero, + "failed to write whole buffer", + )) + } + Ok(n) => buf = &buf[n..], + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + Ok(()) + } + + /// Creates a "by reference" adaptor for this instance of `Write`. + /// + /// The returned adaptor also implements `Write` and will simply borrow this + /// current writer. + fn by_ref(&mut self) -> &mut Self + where + Self: Sized, + { + self + } +} + +impl Read for &mut R { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> Result { + (**self).read(buf) + } + + #[inline] + fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> { + (**self).read_exact(buf) + } +} + +impl Read for &[u8] { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> Result { + let amt = cmp::min(buf.len(), self.len()); + let (a, b) = self.split_at(amt); + + // First check if the amount of bytes we want to read is small: + // `copy_from_slice` will generally expand to a call to `memcpy`, and + // for a single byte the overhead is significant. + if amt == 1 { + buf[0] = a[0]; + } else { + buf[..amt].copy_from_slice(a); + } + + *self = b; + Ok(amt) + } + + #[inline] + fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> { + if buf.len() > self.len() { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "failed to fill whole buffer", + )); + } + let (a, b) = self.split_at(buf.len()); + + // First check if the amount of bytes we want to read is small: + // `copy_from_slice` will generally expand to a call to `memcpy`, and + // for a single byte the overhead is significant. + if buf.len() == 1 { + buf[0] = a[0]; + } else { + buf.copy_from_slice(a); + } + + *self = b; + Ok(()) + } +} + +impl Write for &mut W { + #[inline] + fn write(&mut self, buf: &[u8]) -> Result { + (**self).write(buf) + } + + #[inline] + fn flush(&mut self) -> Result<()> { + (**self).flush() + } + + #[inline] + fn write_all(&mut self, buf: &[u8]) -> Result<()> { + (**self).write_all(buf) + } +} + +impl Write for &mut [u8] { + fn write(&mut self, data: &[u8]) -> Result { + let amt = cmp::min(data.len(), self.len()); + let (a, b) = mem::replace(self, &mut []).split_at_mut(amt); + a.copy_from_slice(&data[..amt]); + *self = b; + Ok(amt) + } + + #[inline] + fn write_all(&mut self, data: &[u8]) -> Result<()> { + if self.write(data)? == data.len() { + Ok(()) + } else { + Err(Error::new( + ErrorKind::WriteZero, + "failed to write whole buffer", + )) + } + } + + #[inline] + fn flush(&mut self) -> Result<()> { + Ok(()) + } +} + +impl Write for Vec { + #[inline] + fn write(&mut self, buf: &[u8]) -> Result { + self.extend_from_slice(buf); + Ok(buf.len()) + } + + #[inline] + fn write_all(&mut self, buf: &[u8]) -> Result<()> { + self.extend_from_slice(buf); + Ok(()) + } + + #[inline] + fn flush(&mut self) -> Result<()> { + Ok(()) + } +} + +///////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////// + +/// This data structure is used as a workaround for current design of `ToBytes` +/// which does not allow multiple writes to `&mut [u8]`. +pub struct Cursor { + inner: T, + pos: u64, +} + +impl Cursor { + /// Creates a new cursor wrapping the provided underlying in-memory buffer. + /// + /// Cursor initial position is `0` even if underlying buffer (e.g., `Vec`) + /// is not empty. So writing to cursor starts with overwriting `Vec` + /// content, not with appending to it. + /// + /// # Examples + /// + /// ``` + /// use ark_std::io::Cursor; + /// + /// let buff = Cursor::new(Vec::new()); + /// # fn force_inference(_: &Cursor>) {} + /// # force_inference(&buff); + /// ``` + pub fn new(inner: T) -> Self { + Cursor { inner, pos: 0 } + } + + /// Consumes this cursor, returning the underlying value. + /// + /// # Examples + /// + /// ``` + /// use ark_std::io::Cursor; + /// + /// let buff = Cursor::new(Vec::new()); + /// # fn force_inference(_: &Cursor>) {} + /// # force_inference(&buff); + /// + /// let vec = buff.into_inner(); + /// ``` + pub fn into_inner(self) -> T { + self.inner + } + + /// Gets a reference to the underlying value in this cursor. + /// + /// # Examples + /// + /// ``` + /// use ark_std::io::Cursor; + /// + /// let buff = Cursor::new(Vec::new()); + /// # fn force_inference(_: &Cursor>) {} + /// # force_inference(&buff); + /// + /// let reference = buff.get_ref(); + /// ``` + pub fn get_ref(&self) -> &T { + &self.inner + } + + /// Gets a mutable reference to the underlying value in this cursor. + /// + /// Care should be taken to avoid modifying the internal I/O state of the + /// underlying value as it may corrupt this cursor's position. + /// + /// # Examples + /// + /// ``` + /// use ark_std::io::Cursor; + /// + /// let mut buff = Cursor::new(Vec::new()); + /// # fn force_inference(_: &Cursor>) {} + /// # force_inference(&buff); + /// + /// let reference = buff.get_mut(); + /// ``` + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } + + /// Returns the current position of this cursor. + pub fn position(&self) -> u64 { + self.pos + } + + /// Sets the position of this cursor. + /// + /// # Examples + /// + /// ``` + /// use ark_std::io::Cursor; + /// + /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]); + /// + /// assert_eq!(buff.position(), 0); + /// + /// buff.set_position(2); + /// assert_eq!(buff.position(), 2); + /// + /// buff.set_position(4); + /// assert_eq!(buff.position(), 4); + /// ``` + pub fn set_position(&mut self, pos: u64) { + self.pos = pos; + } +} + +impl Read for Cursor +where + T: AsRef<[u8]>, +{ + fn read(&mut self, buf: &mut [u8]) -> Result { + let n = Read::read(&mut self.get_buf()?, buf)?; + self.pos += n as u64; + Ok(n) + } + + fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> { + let n = buf.len(); + Read::read_exact(&mut self.get_buf()?, buf)?; + self.pos += n as u64; + Ok(()) + } +} + +impl Cursor +where + T: AsRef<[u8]>, +{ + fn get_buf(&mut self) -> Result<&[u8]> { + let amt = cmp::min(self.pos, self.inner.as_ref().len() as u64); + Ok(&self.inner.as_ref()[(amt as usize)..]) + } +} + +impl Write for Cursor<&mut [u8]> { + #[inline] + fn write(&mut self, buf: &[u8]) -> Result { + slice_write(&mut self.pos, self.inner, buf) + } + + #[inline] + fn flush(&mut self) -> Result<()> { + Ok(()) + } +} + +impl Write for Cursor> { + fn write(&mut self, buf: &[u8]) -> Result { + vec_write(&mut self.pos, &mut self.inner, buf) + } + + #[inline] + fn flush(&mut self) -> Result<()> { + Ok(()) + } +} + +// Non-resizing write implementation +#[inline] +fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> Result { + let pos = cmp::min(*pos_mut, slice.len() as u64); + let amt = (&mut slice[(pos as usize)..]).write(buf)?; + *pos_mut += amt as u64; + Ok(amt) +} + +fn vec_write(pos_mut: &mut u64, vec: &mut Vec, buf: &[u8]) -> Result { + let pos: usize = (*pos_mut).try_into().map_err(|_| { + Error::new( + ErrorKind::InvalidInput, + "cursor position exceeds maximum possible vector length", + ) + })?; + // Make sure the internal buffer is as least as big as where we + // currently are + let len = vec.len(); + if len < pos { + // use `resize` so that the zero filling is as efficient as possible + vec.resize(pos, 0); + } + // Figure out what bytes will be used to overwrite what's currently + // there (left), and what will be appended on the end (right) + { + let space = vec.len() - pos; + let (left, right) = buf.split_at(cmp::min(space, buf.len())); + vec[pos..pos + left.len()].copy_from_slice(left); + vec.extend_from_slice(right); + } + + // Bump us forward + *pos_mut = (pos + buf.len()) as u64; + Ok(buf.len()) +} diff --git a/arkworks/std/src/lib.rs b/arkworks/std/src/lib.rs new file mode 100644 index 00000000..921d57ed --- /dev/null +++ b/arkworks/std/src/lib.rs @@ -0,0 +1,212 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +#[doc(hidden)] +extern crate alloc; + +#[cfg(not(feature = "std"))] +pub use alloc::*; + +#[cfg(not(feature = "std"))] +pub use core::*; + +#[cfg(not(feature = "std"))] +pub mod fmt { + pub use alloc::fmt::*; + pub use core::fmt::*; +} + +#[cfg(not(feature = "std"))] +pub mod borrow { + pub use alloc::borrow::*; + pub use core::borrow::*; +} + +#[cfg(not(feature = "std"))] +pub mod slice { + pub use alloc::slice::*; + pub use core::slice::*; +} + +#[cfg(not(feature = "std"))] +pub mod str { + pub use alloc::str::*; + pub use core::str::*; +} + +#[cfg(not(feature = "std"))] +pub mod io; + +#[cfg(not(feature = "std"))] +pub mod error; + +#[cfg(feature = "std")] +#[doc(hidden)] +pub use std::*; + +mod rand_helper; +pub use rand_helper::*; + +pub mod perf_trace; + +pub use num_traits::{One, Zero}; + +/// Returns the ceiling of the base-2 logarithm of `x`. +/// +/// ``` +/// use ark_std::log2; +/// +/// assert_eq!(log2(16), 4); +/// assert_eq!(log2(17), 5); +/// assert_eq!(log2(1), 0); +/// assert_eq!(log2(0), 0); +/// assert_eq!(log2(usize::MAX), (core::mem::size_of::() * 8) as u32); +/// assert_eq!(log2(1 << 15), 15); +/// assert_eq!(log2(2usize.pow(18)), 18); +/// ``` +pub fn log2(x: usize) -> u32 { + if x == 0 { + 0 + } else if x.is_power_of_two() { + 1usize.leading_zeros() - x.leading_zeros() + } else { + 0usize.leading_zeros() - x.leading_zeros() + } +} + +/// Creates parallel iterator over refs if `parallel` feature is enabled. +/// Additionally, if the object being iterated implements `IndexedParallelIterator`, +/// then one can specify a minimum size for iteration. +#[macro_export] +macro_rules! cfg_iter { + ($e: expr, $min_len: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.par_iter().with_min_len($min_len); + + #[cfg(not(feature = "parallel"))] + let result = $e.iter(); + + result + }}; + ($e: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.par_iter(); + + #[cfg(not(feature = "parallel"))] + let result = $e.iter(); + + result + }}; +} + +/// Creates parallel iterator over mut refs if `parallel` feature is enabled. +/// Additionally, if the object being iterated implements `IndexedParallelIterator`, +/// then one can specify a minimum size for iteration. +#[macro_export] +macro_rules! cfg_iter_mut { + ($e: expr, $min_len: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.par_iter_mut().with_min_len($min_len); + + #[cfg(not(feature = "parallel"))] + let result = $e.iter_mut(); + + result + }}; + ($e: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.par_iter_mut(); + + #[cfg(not(feature = "parallel"))] + let result = $e.iter_mut(); + + result + }}; +} + +/// Creates parallel iterator if `parallel` feature is enabled. +/// Additionally, if the object being iterated implements `IndexedParallelIterator`, +/// then one can specify a minimum size for iteration. +#[macro_export] +macro_rules! cfg_into_iter { + ($e: expr, $min_len: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.into_par_iter().with_min_len($min_len); + + #[cfg(not(feature = "parallel"))] + let result = $e.into_iter(); + + result + }}; + ($e: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.into_par_iter(); + + #[cfg(not(feature = "parallel"))] + let result = $e.into_iter(); + + result + }}; +} + +/// Returns an iterator over `chunk_size` elements of the slice at a +/// time. +#[macro_export] +macro_rules! cfg_chunks { + ($e: expr, $size: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.par_chunks($size); + + #[cfg(not(feature = "parallel"))] + let result = $e.chunks($size); + + result + }}; +} + +/// Returns an iterator over `chunk_size` mutable elements of the slice at a +/// time. +#[macro_export] +macro_rules! cfg_chunks_mut { + ($e: expr, $size: expr) => {{ + #[cfg(feature = "parallel")] + let result = $e.par_chunks_mut($size); + + #[cfg(not(feature = "parallel"))] + let result = $e.chunks_mut($size); + + result + }}; +} + +#[cfg(test)] +mod test { + use super::*; + #[cfg(feature = "parallel")] + use rayon::prelude::*; + + #[test] + fn test_cfg_macros() { + #[cfg(feature = "parallel")] + println!("In parallel mode"); + + let mut thing = crate::vec![1, 2, 3, 4, 5u64]; + println!("Iterating"); + cfg_iter!(&thing).for_each(|i| println!("{:?}", i)); + println!("Iterating Mut"); + cfg_iter_mut!(&mut thing).for_each(|i| *i += 1); + println!("Iterating By Value"); + cfg_into_iter!(thing.clone()).for_each(|i| println!("{:?}", i)); + println!("Chunks"); + cfg_chunks!(&thing, 2).for_each(|chunk| println!("{:?}", chunk)); + println!("Chunks Mut"); + cfg_chunks_mut!(&mut thing, 2).for_each(|chunk| println!("{:?}", chunk)); + + println!("Iterating"); + cfg_iter!(&thing, 3).for_each(|i| println!("{:?}", i)); + println!("Iterating Mut"); + cfg_iter_mut!(&mut thing, 3).for_each(|i| *i += 1); + println!("Iterating By Value"); + cfg_into_iter!(thing, 3).for_each(|i| println!("{:?}", i)); + } +} diff --git a/arkworks/std/src/perf_trace.rs b/arkworks/std/src/perf_trace.rs new file mode 100644 index 00000000..cfda3f03 --- /dev/null +++ b/arkworks/std/src/perf_trace.rs @@ -0,0 +1,224 @@ +#![allow(unused_imports)] +//! This module contains macros for logging to stdout a trace of wall-clock time required +//! to execute annotated code. One can use this code as follows: +//! ``` +//! use ark_std::{start_timer, end_timer}; +//! let start = start_timer!(|| "Addition of two integers"); +//! let c = 5 + 7; +//! end_timer!(start); +//! ``` +//! The foregoing code should log the following to stdout. +//! ```text +//! Start: Addition of two integers +//! End: Addition of two integers... 1ns +//! ``` +//! +//! These macros can be arbitrarily nested, and the nested nature is made apparent +//! in the output. For example, the following snippet: +//! ``` +//! use ark_std::{start_timer, end_timer}; +//! let start = start_timer!(|| "Addition of two integers"); +//! let start2 = start_timer!(|| "Inner"); +//! let c = 5 + 7; +//! end_timer!(start2); +//! end_timer!(start); +//! ``` +//! should print out the following: +//! ```text +//! Start: Addition of two integers +//! Start: Inner +//! End: Inner ... 1ns +//! End: Addition of two integers... 1ns +//! ``` +//! +//! Additionally, one can use the `add_to_trace` macro to log additional context +//! in the output. +pub use self::inner::*; + +#[macro_use] +#[cfg(feature = "print-trace")] +pub mod inner { + pub use colored::Colorize; + + // print-trace requires std, so these imports are well-defined + pub use std::{ + format, println, + string::{String, ToString}, + sync::atomic::{AtomicUsize, Ordering}, + time::Instant, + }; + + pub static NUM_INDENT: AtomicUsize = AtomicUsize::new(0); + pub const PAD_CHAR: &str = "·"; + + pub struct TimerInfo { + pub msg: String, + pub time: Instant, + } + + #[macro_export] + macro_rules! start_timer { + ($msg:expr) => {{ + use $crate::perf_trace::inner::{ + compute_indent, AtomicUsize, Colorize, Instant, Ordering, ToString, NUM_INDENT, + PAD_CHAR, + }; + + let msg = $msg(); + let start_info = "Start:".yellow().bold(); + let indent_amount = 2 * NUM_INDENT.fetch_add(0, Ordering::Relaxed); + let indent = compute_indent(indent_amount); + + $crate::perf_trace::println!("{}{:8} {}", indent, start_info, msg); + NUM_INDENT.fetch_add(1, Ordering::Relaxed); + $crate::perf_trace::TimerInfo { + msg: msg.to_string(), + time: Instant::now(), + } + }}; + } + + #[macro_export] + macro_rules! end_timer { + ($time:expr) => {{ + end_timer!($time, || ""); + }}; + ($time:expr, $msg:expr) => {{ + use $crate::perf_trace::inner::{ + compute_indent, format, AtomicUsize, Colorize, Instant, Ordering, ToString, + NUM_INDENT, PAD_CHAR, + }; + + let time = $time.time; + let final_time = time.elapsed(); + let final_time = { + let secs = final_time.as_secs(); + let millis = final_time.subsec_millis(); + let micros = final_time.subsec_micros() % 1000; + let nanos = final_time.subsec_nanos() % 1000; + if secs != 0 { + format!("{}.{:03}s", secs, millis).bold() + } else if millis > 0 { + format!("{}.{:03}ms", millis, micros).bold() + } else if micros > 0 { + format!("{}.{:03}µs", micros, nanos).bold() + } else { + format!("{}ns", final_time.subsec_nanos()).bold() + } + }; + + let end_info = "End:".green().bold(); + let message = format!("{} {}", $time.msg, $msg()); + + NUM_INDENT.fetch_sub(1, Ordering::Relaxed); + let indent_amount = 2 * NUM_INDENT.fetch_add(0, Ordering::Relaxed); + let indent = compute_indent(indent_amount); + + // Todo: Recursively ensure that *entire* string is of appropriate + // width (not just message). + $crate::perf_trace::println!( + "{}{:8} {:. {{ + use $crate::perf_trace::{ + compute_indent, compute_indent_whitespace, format, AtomicUsize, Colorize, Instant, + Ordering, ToString, NUM_INDENT, PAD_CHAR, + }; + + let start_msg = "StartMsg".yellow().bold(); + let end_msg = "EndMsg".green().bold(); + let title = $title(); + let start_msg = format!("{}: {}", start_msg, title); + let end_msg = format!("{}: {}", end_msg, title); + + let start_indent_amount = 2 * NUM_INDENT.fetch_add(0, Ordering::Relaxed); + let start_indent = compute_indent(start_indent_amount); + + let msg_indent_amount = 2 * NUM_INDENT.fetch_add(0, Ordering::Relaxed) + 2; + let msg_indent = compute_indent_whitespace(msg_indent_amount); + let mut final_message = "\n".to_string(); + for line in $msg().lines() { + final_message += &format!("{}{}\n", msg_indent, line,); + } + + // Todo: Recursively ensure that *entire* string is of appropriate + // width (not just message). + $crate::perf_trace::println!("{}{}", start_indent, start_msg); + $crate::perf_trace::println!("{}{}", msg_indent, final_message,); + $crate::perf_trace::println!("{}{}", start_indent, end_msg); + }}; + } + + pub fn compute_indent_whitespace(indent_amount: usize) -> String { + let mut indent = String::new(); + for _ in 0..indent_amount { + indent.push_str(" "); + } + indent + } + + pub fn compute_indent(indent_amount: usize) -> String { + let mut indent = String::new(); + for _ in 0..indent_amount { + indent.push_str(&PAD_CHAR.white()); + } + indent + } +} + +#[macro_use] +#[cfg(not(feature = "print-trace"))] +mod inner { + pub struct TimerInfo; + + #[macro_export] + macro_rules! start_timer { + ($msg:expr) => { + $crate::perf_trace::TimerInfo + }; + } + #[macro_export] + macro_rules! add_to_trace { + ($title:expr, $msg:expr) => { + let _ = $msg; + }; + } + + #[macro_export] + macro_rules! end_timer { + ($time:expr, $msg:expr) => { + let _ = $msg; + let _ = $time; + }; + ($time:expr) => { + let _ = $time; + }; + } +} + +mod tests { + use super::*; + + #[test] + fn print_start_end() { + let start = start_timer!(|| "Hello"); + end_timer!(start); + } + + #[test] + fn print_add() { + let start = start_timer!(|| "Hello"); + add_to_trace!(|| "HelloMsg", || "Hello, I\nAm\nA\nMessage"); + end_timer!(start); + } +} diff --git a/arkworks/std/src/rand_helper.rs b/arkworks/std/src/rand_helper.rs new file mode 100644 index 00000000..59ee3852 --- /dev/null +++ b/arkworks/std/src/rand_helper.rs @@ -0,0 +1,32 @@ +use rand::{ + distributions::{Distribution, Standard}, + rngs::StdRng, + Rng, +}; + +pub use rand; + +pub trait UniformRand: Sized { + fn rand(rng: &mut R) -> Self; +} + +impl UniformRand for T +where + Standard: Distribution, +{ + #[inline] + fn rand(rng: &mut R) -> Self { + rng.sample(Standard) + } +} + +/// Should be used only for tests, not for any real world usage. +pub fn test_rng() -> StdRng { + use rand::SeedableRng; + // arbitrary seed + let seed = [ + 1, 0, 0, 0, 23, 0, 0, 0, 200, 1, 0, 0, 210, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + ]; + rand::rngs::StdRng::from_seed(seed) +} diff --git a/src/algebra.rs b/src/algebra.rs index 4eb01546..e5daf270 100644 --- a/src/algebra.rs +++ b/src/algebra.rs @@ -18,12 +18,12 @@ pub struct BracketShare { mac: Vec<(Plaintext, Vec)>, } -pub enum MpcField<F: Field, S: FieldShare<F>> { - // Angle(AngleShare), - // Bracket(BracketShare), - Public(F), - Shared(S), -} +// pub enum MpcField<F: Field, S: FieldShare<F>> { +// // Angle(AngleShare), +// // Bracket(BracketShare), +// Public(F), +// Shared(S), +// } impl AngleShare { pub fn rand<T: Rng>(rng: &mut T) -> AngleShare { diff --git a/src/groth16.rs b/src/groth16.rs index 899fc0f4..fe254ab9 100644 --- a/src/groth16.rs +++ b/src/groth16.rs @@ -38,40 +38,40 @@ mod tests { assert!(!Groth16::<Bls12_377>::verify(&circuit_vk, &[a], &proof).unwrap()); } - #[test] - fn test_mpc() { - let mut rng = rand::thread_rng(); + // #[test] + // fn test_mpc() { + // let mut rng = rand::thread_rng(); - // let a = Fr::rand(&mut rng); - // let b = Fr::rand(&mut rng); + // // let a = Fr::rand(&mut rng); + // // let b = Fr::rand(&mut rng); - let a = AngleShare::rand(&mut rng); - let b = AngleShare::rand(&mut rng); + // let a = AngleShare::rand(&mut rng); + // let b = AngleShare::rand(&mut rng); - let mut c = a; - c = c * b; + // let mut c = a; + // c = c * b; - let circuit = MyCircuit::<Fr> { - a: Some(a), - b: Some(b), - }; + // let circuit = MyCircuit::<Fr> { + // a: Some(a), + // b: Some(b), + // }; - // let params = generate_random_parameters(circuit, &mut rng); - let (circuit_pk, circuit_vk) = - Groth16::<Bls12_377>::circuit_specific_setup(circuit.clone(), &mut rng).unwrap(); + // // let params = generate_random_parameters(circuit, &mut rng); + // let (circuit_pk, circuit_vk) = + // Groth16::<Bls12_377>::circuit_specific_setup(circuit.clone(), &mut rng).unwrap(); - // let pvk = prepare_verifying_key::<E>(&params.vk); + // // let pvk = prepare_verifying_key::<E>(&params.vk); - // let mpc_proof = prover::create_random_proof(circuit, &circuit_pk, &mut rng); + // // let mpc_proof = prover::create_random_proof(circuit, &circuit_pk, &mut rng); - // let proof = mpc_proof.reveal(); + // // let proof = mpc_proof.reveal(); - // TODO: implement reveal - // let pub_a = a.reveal(); - // let pub_c = c.reveal(); + // // TODO: implement reveal + // // let pub_a = a.reveal(); + // // let pub_c = c.reveal(); - // assert!(verify_proof(&pvk, &proof, &[pub_c]).unwrap()); - // assert!(Groth16::<Bls12_377>::verify(&circuit_vk, &[pub_c], &proof).unwrap()); - // assert!(!Groth16::<Bls12_377>::verify(&circuit_vk, &[pub_a], &proof).unwrap()); - } + // // assert!(verify_proof(&pvk, &proof, &[pub_c]).unwrap()); + // // assert!(Groth16::<Bls12_377>::verify(&circuit_vk, &[pub_c], &proof).unwrap()); + // // assert!(!Groth16::<Bls12_377>::verify(&circuit_vk, &[pub_a], &proof).unwrap()); + // } } diff --git a/src/main.rs b/src/main.rs index cc5a9777..9cf226d0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,15 +1,15 @@ #![allow(dead_code)] -// mod algebra; +mod algebra; mod circuit; -// mod groth16; +mod groth16; mod input_circuit; -mod marlin; +// mod marlin; mod preprocessing; mod reveal; -mod share; +// mod share; mod she; -mod wire; +// mod wire; use ark_bls12_377::{Bls12_377, Fr, FrParameters}; use ark_crypto_primitives::CommitmentScheme; diff --git a/src/wire/field.rs b/src/wire/field.rs index d1285ae4..c12931cf 100644 --- a/src/wire/field.rs +++ b/src/wire/field.rs @@ -372,7 +372,11 @@ impl<F: PrimeField, S: FieldShare<F>> FftField for MpcField<F, S> { } } -impl<F: PrimeField, S: FieldShare<F>> PrimeField for MpcField<F, S> { +impl<F: PrimeField, S: FieldShare<F>> PrimeField for MpcField<F, S> +where + Self: From<<F as PrimeField>::BigInt>, + Self: Into<<F as PrimeField>::BigInt>, +{ type Params = F::Params; type BigInt = F::BigInt;