From 01ccd2ad73825fde107b8b8e09ee07faccff68de Mon Sep 17 00:00:00 2001 From: Florin F <156660445+florin5f@users.noreply.github.com> Date: Wed, 12 Feb 2025 15:32:03 +0100 Subject: [PATCH] feat: plain zk prover and zk verifier working --- co-noir/co-builder/Cargo.toml | 2 +- co-noir/co-builder/src/honk_curve.rs | 13 + .../co-builder/src/polynomials/polynomial.rs | 102 +++++++- co-noir/co-builder/src/prelude.rs | 2 +- co-noir/co-noir/examples/co_noir_party0.rs | 4 +- co-noir/co-noir/examples/co_noir_party1.rs | 4 +- co-noir/co-noir/examples/co_noir_party2.rs | 4 +- co-noir/co-noir/src/bin/co-noir.rs | 13 +- co-noir/co-noir/src/bin/plaindriver.rs | 9 +- co-noir/co-ultrahonk/tests/plaindriver.rs | 3 +- co-noir/ultrahonk/Cargo.toml | 2 + co-noir/ultrahonk/Readme.md | 2 +- co-noir/ultrahonk/src/decider/mod.rs | 1 + co-noir/ultrahonk/src/decider/prover.rs | 83 +++++- .../ultrahonk/src/decider/shplemini/mod.rs | 3 +- .../ultrahonk/src/decider/shplemini/prover.rs | 198 ++++++++++++-- .../src/decider/shplemini/verifier.rs | 142 +++++++++- .../src/decider/small_subgroup_ipa.rs | 247 ++++++++++++++++++ co-noir/ultrahonk/src/decider/sumcheck/mod.rs | 3 + .../ultrahonk/src/decider/sumcheck/prover.rs | 153 ++++++++++- .../src/decider/sumcheck/round_prover.rs | 150 ++++++++++- .../src/decider/sumcheck/round_verifier.rs | 16 +- .../src/decider/sumcheck/verifier.rs | 63 ++++- .../ultrahonk/src/decider/sumcheck/zk_data.rs | 222 ++++++++++++++++ co-noir/ultrahonk/src/decider/types.rs | 3 + co-noir/ultrahonk/src/decider/univariate.rs | 9 + co-noir/ultrahonk/src/decider/verifier.rs | 37 ++- co-noir/ultrahonk/src/lib.rs | 3 + co-noir/ultrahonk/src/prover.rs | 21 +- co-noir/ultrahonk/src/sponge_hasher.rs | 2 +- co-noir/ultrahonk/src/transcript.rs | 7 +- co-noir/ultrahonk/src/types.rs | 2 +- co-noir/ultrahonk/src/verifier.rs | 20 +- co-noir/ultrahonk/tests/plain.rs | 33 ++- tests/tests/noir/proof_tests/plain.rs | 6 +- tests/tests/noir/proof_tests/rep3.rs | 6 +- tests/tests/noir/proof_tests/shamir.rs | 3 +- 37 files changed, 1482 insertions(+), 111 deletions(-) create mode 100644 co-noir/ultrahonk/src/decider/small_subgroup_ipa.rs create mode 100644 co-noir/ultrahonk/src/decider/sumcheck/zk_data.rs diff --git a/co-noir/co-builder/Cargo.toml b/co-noir/co-builder/Cargo.toml index e31645522..e499fa05e 100644 --- a/co-noir/co-builder/Cargo.toml +++ b/co-noir/co-builder/Cargo.toml @@ -25,6 +25,6 @@ num-traits.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true +rand.workspace = true [dev-dependencies] -rand.workspace = true diff --git a/co-noir/co-builder/src/honk_curve.rs b/co-noir/co-builder/src/honk_curve.rs index 43d6202b7..0ca6aa215 100644 --- a/co-noir/co-builder/src/honk_curve.rs +++ b/co-noir/co-builder/src/honk_curve.rs @@ -1,11 +1,13 @@ use ark_ec::pairing::Pairing; use ark_ff::{One, PrimeField}; use num_bigint::BigUint; +use std::str::FromStr; // Des describes the PrimeField used for the Transcript pub trait HonkCurve: Pairing { const NUM_BASEFIELD_ELEMENTS: usize; const NUM_SCALARFIELD_ELEMENTS: usize; + const SUBGROUP_SIZE: usize; fn g1_affine_from_xy(x: Self::BaseField, y: Self::BaseField) -> Self::G1Affine; fn g1_affine_to_xy(p: &Self::G1Affine) -> (Self::BaseField, Self::BaseField); @@ -21,11 +23,14 @@ pub trait HonkCurve: Pairing { // For the elliptic curve relation fn get_curve_b() -> Self::ScalarField; + + fn get_subgroup_generator() -> Self::ScalarField; } impl HonkCurve for ark_bn254::Bn254 { const NUM_BASEFIELD_ELEMENTS: usize = 2; const NUM_SCALARFIELD_ELEMENTS: usize = 1; + const SUBGROUP_SIZE: usize = 256; fn g1_affine_from_xy(x: ark_bn254::Fq, y: ark_bn254::Fq) -> ark_bn254::G1Affine { ark_bn254::G1Affine::new(x, y) @@ -62,6 +67,14 @@ impl HonkCurve for ark_bn254::Bn254 { // We are getting grumpkin::b, which is -17 -ark_bn254::Fr::from(17) } + + fn get_subgroup_generator() -> Self::ScalarField { + ark_bn254::Fr::from_str( + "3478517300119284901893091970156912948790432420133812234316178878452092729974", + ) + .map_err(|_| eyre::eyre!("Failed to parse subgroup generator")) + .unwrap() + } } const NUM_LIMB_BITS: u32 = 68; diff --git a/co-noir/co-builder/src/polynomials/polynomial.rs b/co-noir/co-builder/src/polynomials/polynomial.rs index 68ffb28fb..c198189ae 100644 --- a/co-noir/co-builder/src/polynomials/polynomial.rs +++ b/co-noir/co-builder/src/polynomials/polynomial.rs @@ -2,8 +2,9 @@ use ark_ff::PrimeField; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial as _}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use num_traits::Zero; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::ops::{AddAssign, Index, IndexMut, SubAssign}; +use std::ops::{AddAssign, Index, IndexMut, MulAssign, SubAssign}; #[derive(Clone, Debug, Default)] pub struct Polynomial { @@ -203,6 +204,63 @@ impl Polynomial { let poly = DensePolynomial::from_coefficients_slice(&self.coefficients); poly.evaluate(&point) } + pub fn random(size: usize) -> Self { + let mut rng = rand::thread_rng(); + let coefficients = (0..size).map(|_| F::rand(&mut rng)).collect(); + Self { coefficients } + } + + pub fn evaluate_mle(&self, evaluation_points: &[F]) -> F { + if self.coefficients.is_empty() { + return F::zero(); + } + + let n = evaluation_points.len(); + let dim = (self.coefficients.len() - 1) + .next_power_of_two() + .trailing_zeros() as usize; // Round up to next power of 2 + + // To simplify handling of edge cases, we assume that the index space is always a power of 2 + assert_eq!(self.coefficients.len(), 1 << n); + + // We first fold over dim rounds l = 0,...,dim-1. + // in round l, n_l is the size of the buffer containing the Polynomial partially evaluated + // at u₀,..., u_l. + // In round 0, this is half the size of dim + let mut n_l = 1 << (dim - 1); + let mut tmp = vec![F::zero(); n_l]; + + // Note below: i * 2 + 1 + offset might equal virtual_size. This used to subtlely be handled by extra capacity + // padding (and there used to be no assert time checks, which this constant helps with). + for (i, val) in tmp.iter_mut().enumerate().take(n_l) { + *val = self.coefficients[i * 2] + + evaluation_points[0] * (self.coefficients[i * 2 + 1] - self.coefficients[i * 2]); + } + + // partially evaluate the dim-1 remaining points + for (l, val) in evaluation_points.iter().enumerate().take(dim).skip(1) { + n_l = 1 << (dim - l - 1); + + for i in 0..n_l { + tmp[i] = tmp[i * 2] + *val * (tmp[i * 2 + 1] - tmp[i * 2]); + } + } + // for l in 1..dim { + // n_l = 1 << (dim - l - 1); + // u_l = evaluation_points[l]; + // for i in 0..n_l { + // tmp[i] = tmp[i * 2] + u_l * (tmp[i * 2 + 1] - tmp[i * 2]); + // } + // } + let mut result = tmp[0]; + + // We handle the "trivial" dimensions which are full of zeros. + for &point in &evaluation_points[dim..n] { + result *= F::one() - point; + } + + result + } } impl Index for Polynomial { @@ -242,3 +300,45 @@ impl SubAssign<&[F]> for Polynomial { } } } + +impl MulAssign for Polynomial { + fn mul_assign(&mut self, rhs: F) { + for l in self.coefficients.iter_mut() { + *l *= rhs; + } + } +} + +pub struct RowDisablingPolynomial { + pub eval_at_0: F, + pub eval_at_1: F, +} + +impl Default for RowDisablingPolynomial { + fn default() -> Self { + Self { + eval_at_0: F::one(), + eval_at_1: F::one(), + } + } +} +impl RowDisablingPolynomial { + pub fn update_evaluations(&mut self, round_challenge: F, round_idx: usize) { + if round_idx == 1 { + self.eval_at_0 = F::zero(); + } + if round_idx >= 2 { + self.eval_at_1 *= round_challenge; + } + } + + pub fn evaluate_at_challenge(multivariate_challenge: &[F], log_circuit_size: usize) -> F { + let mut evaluation_at_multivariate_challenge = F::one(); + + for val in multivariate_challenge.iter().take(log_circuit_size).skip(2) { + evaluation_at_multivariate_challenge *= val; + } + + F::one() - evaluation_at_multivariate_challenge + } +} diff --git a/co-noir/co-builder/src/prelude.rs b/co-noir/co-builder/src/prelude.rs index b81b58edc..1d2369916 100644 --- a/co-noir/co-builder/src/prelude.rs +++ b/co-noir/co-builder/src/prelude.rs @@ -7,7 +7,7 @@ pub use crate::honk_curve::HonkCurve; pub use crate::keys::proving_key::ProvingKey; pub use crate::keys::verification_key::VerifyingKey; pub use crate::keys::verification_key::VerifyingKeyBarretenberg; -pub use crate::polynomials::polynomial::Polynomial; +pub use crate::polynomials::polynomial::{Polynomial, RowDisablingPolynomial}; pub use crate::polynomials::polynomial_types::Polynomials; pub use crate::polynomials::polynomial_types::{ PrecomputedEntities, ProverWitnessEntities, PRECOMPUTED_ENTITIES_SIZE, diff --git a/co-noir/co-noir/examples/co_noir_party0.rs b/co-noir/co-noir/examples/co_noir_party0.rs index 78ac39e73..e6ebb37fd 100644 --- a/co-noir/co-noir/examples/co_noir_party0.rs +++ b/co-noir/co-noir/examples/co_noir_party0.rs @@ -67,6 +67,7 @@ fn main() -> Result<()> { let inputs = co_noir::parse_input(dir.join("poseidon/Prover.toml"), &program_artifact)?; let recursive = true; + let has_zk = false; // parse crs let crs_size = co_noir::compute_circuit_size::(&constraint_system, recursive)?; @@ -95,7 +96,8 @@ fn main() -> Result<()> { let (proof, _) = Rep3CoUltraHonk::<_, _, Poseidon2Sponge>::prove(net, pk, &prover_crs)?; // verify proof - assert!(UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk).context("while verifying proof")?); + assert!(UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk, has_zk) + .context("while verifying proof")?); Ok(()) } diff --git a/co-noir/co-noir/examples/co_noir_party1.rs b/co-noir/co-noir/examples/co_noir_party1.rs index 2b5394a1d..727073529 100644 --- a/co-noir/co-noir/examples/co_noir_party1.rs +++ b/co-noir/co-noir/examples/co_noir_party1.rs @@ -64,6 +64,7 @@ fn main() -> Result<()> { let constraint_system = Utils::get_constraint_system_from_artifact(&program_artifact, true); let recursive = true; + let has_zk = false; // parse crs let crs_size = co_noir::compute_circuit_size::(&constraint_system, recursive)?; @@ -87,7 +88,8 @@ fn main() -> Result<()> { let (proof, _) = Rep3CoUltraHonk::<_, _, Poseidon2Sponge>::prove(net, pk, &prover_crs)?; // verify proof - assert!(UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk).context("while verifying proof")?); + assert!(UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk, has_zk) + .context("while verifying proof")?); Ok(()) } diff --git a/co-noir/co-noir/examples/co_noir_party2.rs b/co-noir/co-noir/examples/co_noir_party2.rs index 6083ce617..dd7b6ca5f 100644 --- a/co-noir/co-noir/examples/co_noir_party2.rs +++ b/co-noir/co-noir/examples/co_noir_party2.rs @@ -64,6 +64,7 @@ fn main() -> Result<()> { let constraint_system = Utils::get_constraint_system_from_artifact(&program_artifact, true); let recursive = true; + let has_zk = false; // parse crs let crs_size = co_noir::compute_circuit_size::(&constraint_system, recursive)?; @@ -87,7 +88,8 @@ fn main() -> Result<()> { let (proof, _) = Rep3CoUltraHonk::<_, _, Poseidon2Sponge>::prove(net, pk, &prover_crs)?; // verify proof - assert!(UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk).context("while verifying proof")?); + assert!(UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk, has_zk) + .context("while verifying proof")?); Ok(()) } diff --git a/co-noir/co-noir/src/bin/co-noir.rs b/co-noir/co-noir/src/bin/co-noir.rs index d6c74651a..cc192166c 100644 --- a/co-noir/co-noir/src/bin/co-noir.rs +++ b/co-noir/co-noir/src/bin/co-noir.rs @@ -609,6 +609,9 @@ pub struct VerifyCli { #[arg(long)] #[serde(skip_serializing_if = "::std::option::Option::is_none")] pub crs: Option, + /// Verify a ZK proof + #[arg(long)] + pub has_zk: bool, } /// Config for `verify` @@ -622,6 +625,8 @@ pub struct VerifyConfig { pub vk: PathBuf, /// The path to the verifier crs file pub crs: PathBuf, + /// Verify a ZK proof + pub has_zk: bool, } /// Cli arguments for `verify` @@ -1683,6 +1688,7 @@ fn run_verify(config: VerifyConfig) -> color_eyre::Result { let vk_path: PathBuf = config.vk; let crs_path = config.crs; let hasher = config.hasher; + let has_zk = config.has_zk; // parse proof file let proof_u8 = std::fs::read(&proof).context("while reading proof file")?; @@ -1701,11 +1707,10 @@ fn run_verify(config: VerifyConfig) -> color_eyre::Result { tracing::info!("Starting proof verification..."); let start = Instant::now(); let res = match hasher { - TranscriptHash::POSEIDON => { - UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk).context("while verifying proof")? - } + TranscriptHash::POSEIDON => UltraHonk::<_, Poseidon2Sponge>::verify(proof, vk, has_zk) + .context("while verifying proof")?, TranscriptHash::KECCAK => { - UltraHonk::<_, Keccak256>::verify(proof, vk).context("while verifying proof")? + UltraHonk::<_, Keccak256>::verify(proof, vk, has_zk).context("while verifying proof")? } }; let duration_ms = start.elapsed().as_micros() as f64 / 1000.; diff --git a/co-noir/co-noir/src/bin/plaindriver.rs b/co-noir/co-noir/src/bin/plaindriver.rs index e5c6f2eda..4e671e715 100644 --- a/co-noir/co-noir/src/bin/plaindriver.rs +++ b/co-noir/co-noir/src/bin/plaindriver.rs @@ -165,6 +165,7 @@ fn main() -> color_eyre::Result { let circuit_path = config.circuit; let hasher = config.hasher; let out_dir = config.out_dir; + let has_zk = false; // Read circuit let program_artifact = Utils::get_program_artifact_from_file(&circuit_path) @@ -241,9 +242,11 @@ fn main() -> color_eyre::Result { // Verify the proof let is_valid = match hasher { - TranscriptHash::POSEIDON => UltraHonk::<_, Poseidon2Sponge>::verify(proof, verifying_key) - .context("While verifying proof")?, - TranscriptHash::KECCAK => UltraHonk::<_, Keccak256>::verify(proof, verifying_key) + TranscriptHash::POSEIDON => { + UltraHonk::<_, Poseidon2Sponge>::verify(proof, verifying_key, has_zk) + .context("While verifying proof")? + } + TranscriptHash::KECCAK => UltraHonk::<_, Keccak256>::verify(proof, verifying_key, has_zk) .context("While verifying proof")?, }; diff --git a/co-noir/co-ultrahonk/tests/plaindriver.rs b/co-noir/co-ultrahonk/tests/plaindriver.rs index 59406e05a..e8114448f 100644 --- a/co-noir/co-ultrahonk/tests/plaindriver.rs +++ b/co-noir/co-ultrahonk/tests/plaindriver.rs @@ -22,6 +22,7 @@ fn plaindriver_test>( ) { const CRS_PATH_G1: &str = "../co-builder/src/crs/bn254_g1.dat"; const CRS_PATH_G2: &str = "../co-builder/src/crs/bn254_g2.dat"; + let has_zk = false; let constraint_system = Utils::get_constraint_system_from_file(circuit_file, true).unwrap(); let witness = Utils::get_witness_from_file(witness_file).unwrap(); @@ -54,7 +55,7 @@ fn plaindriver_test>( let read_proof = HonkProof::from_buffer(&read_proof_u8).unwrap(); assert_eq!(proof, read_proof); - let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key).unwrap(); + let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key, has_zk).unwrap(); assert!(is_valid); } diff --git a/co-noir/ultrahonk/Cargo.toml b/co-noir/ultrahonk/Cargo.toml index 8a3ccea76..ea1a1df25 100644 --- a/co-noir/ultrahonk/Cargo.toml +++ b/co-noir/ultrahonk/Cargo.toml @@ -14,6 +14,7 @@ acir.workspace = true ark-bn254.workspace = true ark-ec.workspace = true ark-ff.workspace = true +ark-poly.workspace = true co-builder = { version = "0.2.0", path = "../co-builder" } mpc-core = { version = "0.7.0", path = "../../mpc-core" } eyre.workspace = true @@ -23,6 +24,7 @@ num-bigint.workspace = true serde_json.workspace = true sha3 = { workspace = true } tracing.workspace = true +rand.workspace = true [dev-dependencies] rand.workspace = true diff --git a/co-noir/ultrahonk/Readme.md b/co-noir/ultrahonk/Readme.md index 2801939d3..ce9d11ae0 100644 --- a/co-noir/ultrahonk/Readme.md +++ b/co-noir/ultrahonk/Readme.md @@ -19,7 +19,7 @@ cmake --preset clang16 -DCMAKE_BUILD_TYPE=RelWithDebInfo .. cmake --build . ``` -The prover in this repository, i.e., ``UltraHonk::prove`` in `src/prover.rs`, is compatible with `UltraProver_` in Barretenberg. Similar, the ``Ultrahnok::verify`` verifier in `src/verifier.rs` is compatible with `UltraVerifier_` in Barretenberg. +The prover in this repository, i.e., ``UltraHonk::prove`` in `src/prover.rs`, is compatible with `UltraProver_` in Barretenberg. Similar, the ``UltraHonk::verify`` verifier in `src/verifier.rs` is compatible with `UltraVerifier_` in Barretenberg. Currently, the circuit builder related code in `src/parse/` is only compatible with basic field arithmetic gates from Noir, stay tuned for more features. diff --git a/co-noir/ultrahonk/src/decider/mod.rs b/co-noir/ultrahonk/src/decider/mod.rs index bfe3826ba..5cb994702 100644 --- a/co-noir/ultrahonk/src/decider/mod.rs +++ b/co-noir/ultrahonk/src/decider/mod.rs @@ -2,6 +2,7 @@ pub(crate) mod barycentric; pub(crate) mod prover; pub(crate) mod relations; pub(crate) mod shplemini; +pub(crate) mod small_subgroup_ipa; pub(crate) mod sumcheck; pub(crate) mod types; pub(crate) mod univariate; diff --git a/co-noir/ultrahonk/src/decider/prover.rs b/co-noir/ultrahonk/src/decider/prover.rs index 7070bb3f2..fc2c9e329 100644 --- a/co-noir/ultrahonk/src/decider/prover.rs +++ b/co-noir/ultrahonk/src/decider/prover.rs @@ -1,6 +1,11 @@ -use super::{shplemini::ShpleminiOpeningClaim, sumcheck::SumcheckOutput, types::ProverMemory}; +use super::{ + shplemini::ShpleminiOpeningClaim, + sumcheck::{zk_data::ZKSumcheckData, SumcheckOutput}, + types::ProverMemory, +}; use crate::{ + decider::small_subgroup_ipa::SmallSubgroupIPAProver, transcript::{Transcript, TranscriptFieldType, TranscriptHasher}, types::HonkProof, Utils, @@ -14,13 +19,20 @@ use std::marker::PhantomData; pub(crate) struct Decider< P: HonkCurve, H: TranscriptHasher, + const SIZE: usize, > { pub(super) memory: ProverMemory

, phantom_data: PhantomData

, phantom_hasher: PhantomData, } -impl, H: TranscriptHasher> Decider { +impl< + P: HonkCurve, + H: TranscriptHasher, + const SIZE: usize, + > Decider +{ + pub const SUBGROUP_SIZE: usize = 256; pub(crate) fn new(memory: ProverMemory

) -> Self { Self { memory, @@ -40,7 +52,7 @@ impl, H: TranscriptHasher // Computes the coefficients for the quotient polynomial q(X) = (p(X) - v) / (X - r) through an FFT quotient.factor_roots(&pair.challenge); let quotient_commitment = Utils::commit("ient.coefficients, crs)?; - // AZTEC TODO(#479): for now we compute the KZG commitment directly to unify the KZG and IPA interfaces but in the + // AZTEC TODO(#479): compute_opening_proof // future we might need to adjust this to use the incoming alternative to work queue (i.e. variation of // pthreads) or even the work queue itself transcript.send_point_to_verifier::

("KZG:W".to_string(), quotient_commitment.into()); @@ -55,10 +67,26 @@ impl, H: TranscriptHasher fn execute_relation_check_rounds( &self, transcript: &mut Transcript, + crs: &ProverCrs

, circuit_size: u32, - ) -> SumcheckOutput { - // This is just Sumcheck.prove - self.sumcheck_prove(transcript, circuit_size) + has_zk: bool, + ) -> (SumcheckOutput, Option>) { + if has_zk { + let log_subgroup_size = Utils::get_msb64(Self::SUBGROUP_SIZE as u64); + let commitment_key = crs.monomials[..1 << (log_subgroup_size + 1)].to_vec(); + let mut zk_sumcheck_data: ZKSumcheckData

= ZKSumcheckData::

::new::( + Utils::get_msb64(circuit_size as u64) as usize, + transcript, + &commitment_key, + ); + ( + self.sumcheck_prove_zk(transcript, circuit_size, &mut zk_sumcheck_data), + Some(zk_sumcheck_data), + ) + } else { + // This is just Sumcheck.prove + (self.sumcheck_prove(transcript, circuit_size), None) + } } /** @@ -73,10 +101,34 @@ impl, H: TranscriptHasher circuit_size: u32, crs: &ProverCrs

, sumcheck_output: SumcheckOutput, + has_zk: bool, + zk_sumcheck_data: Option<&mut ZKSumcheckData

>, ) -> HonkProofResult<()> { - let prover_opening_claim = - self.shplemini_prove(transcript, circuit_size, crs, sumcheck_output)?; - Self::compute_opening_proof(prover_opening_claim, transcript, crs) + if !has_zk { + let prover_opening_claim = + self.shplemini_prove(transcript, circuit_size, crs, sumcheck_output, None)?; + Self::compute_opening_proof(prover_opening_claim, transcript, crs) + } else { + // SmallSubgroupIPA small_subgroup_ipa_prover( + // zk_sumcheck_data, sumcheck_output.challenge, sumcheck_output.claimed_libra_evaluation, transcript, ck); + + let small_subgroup_ipa_prover = SmallSubgroupIPAProver::<_>::new::( + zk_sumcheck_data.unwrap(), + &sumcheck_output.challenges, + sumcheck_output.claimed_libra_evaluation.unwrap(), + transcript, + crs, + ); + let witness_polynomials = small_subgroup_ipa_prover.get_witness_polynomials(); + let prover_opening_claim = self.shplemini_prove( + transcript, + circuit_size, + crs, + sumcheck_output, + Some(witness_polynomials), + )?; + Self::compute_opening_proof(prover_opening_claim, transcript, crs) + } } pub(crate) fn prove( @@ -84,15 +136,24 @@ impl, H: TranscriptHasher circuit_size: u32, crs: &ProverCrs

, mut transcript: Transcript, + has_zk: bool, ) -> HonkProofResult> { tracing::trace!("Decider prove"); // Run sumcheck subprotocol. - let sumcheck_output = self.execute_relation_check_rounds(&mut transcript, circuit_size); + let (sumcheck_output, mut zk_sumcheck_data) = + self.execute_relation_check_rounds(&mut transcript, crs, circuit_size, has_zk); // Fiat-Shamir: rho, y, x, z // Execute Zeromorph multilinear PCS - self.execute_pcs_rounds(&mut transcript, circuit_size, crs, sumcheck_output)?; + self.execute_pcs_rounds( + &mut transcript, + circuit_size, + crs, + sumcheck_output, + has_zk, + zk_sumcheck_data.as_mut(), + )?; Ok(transcript.get_proof()) } } diff --git a/co-noir/ultrahonk/src/decider/shplemini/mod.rs b/co-noir/ultrahonk/src/decider/shplemini/mod.rs index 8b72fb72d..d147966cf 100644 --- a/co-noir/ultrahonk/src/decider/shplemini/mod.rs +++ b/co-noir/ultrahonk/src/decider/shplemini/mod.rs @@ -11,11 +11,12 @@ pub(crate) struct ShpleminiVerifierOpeningClaim { pub(crate) scalars: Vec, pub(crate) commitments: Vec, } +#[derive(Clone)] pub(crate) struct ShpleminiOpeningClaim { pub(crate) polynomial: Polynomial, pub(crate) opening_pair: OpeningPair, } - +#[derive(Clone)] pub(crate) struct OpeningPair { pub(crate) challenge: F, pub(crate) evaluation: F, diff --git a/co-noir/ultrahonk/src/decider/shplemini/prover.rs b/co-noir/ultrahonk/src/decider/shplemini/prover.rs index b0680586e..a6108a1a6 100644 --- a/co-noir/ultrahonk/src/decider/shplemini/prover.rs +++ b/co-noir/ultrahonk/src/decider/shplemini/prover.rs @@ -7,7 +7,7 @@ use crate::{ decider::{shplemini::OpeningPair, verifier::DeciderVerifier}, transcript::{Transcript, TranscriptFieldType, TranscriptHasher}, types::AllEntities, - Utils, CONST_PROOF_SIZE_LOG_N, + Utils, CONST_PROOF_SIZE_LOG_N, NUM_LIBRA_EVALUATIONS, }; use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; @@ -16,7 +16,12 @@ use co_builder::{ HonkProofResult, }; -impl, H: TranscriptHasher> Decider { +impl< + P: HonkCurve, + H: TranscriptHasher, + const SIZE: usize, + > Decider +{ fn get_f_polynomials(polys: &AllEntities>) -> PolyF> { PolyF { precomputed: &polys.precomputed, @@ -30,13 +35,37 @@ impl, H: TranscriptHasher } } + #[expect(clippy::type_complexity)] fn compute_batched_polys( &self, transcript: &mut Transcript, - n: usize, - ) -> (Polynomial, Polynomial) { + multilinear_challenge: &[P::ScalarField], + log_n: usize, + commitment_key: &ProverCrs

, + has_zk: bool, + ) -> HonkProofResult<(Polynomial, Polynomial)> { let f_polynomials = Self::get_f_polynomials(&self.memory.polys); let g_polynomials = Self::get_g_polynomials(&self.memory.polys); + let n = 1 << log_n; + let mut batched_unshifted = Polynomial::new_zero(n); // batched unshifted polynomials + + // To achieve ZK, we mask the batched polynomial by a random polynomial of the same size + if has_zk { + batched_unshifted = Polynomial::::random(n); + let masking_poly_comm = Utils::commit(&batched_unshifted.coefficients, commitment_key)?; + transcript.send_point_to_verifier::

( + "Gemini:masking_poly_comm".to_string(), + masking_poly_comm.into(), + ); + // In the provers, the size of multilinear_challenge is CONST_PROOF_SIZE_LOG_N, but we need to evaluate the + // hiding polynomial as multilinear in log_n variables + let masking_poly_eval = + batched_unshifted.evaluate_mle(&multilinear_challenge[0..log_n]); + transcript.send_fr_to_verifier::

( + "Gemini:masking_poly_eval".to_string(), + masking_poly_eval, + ); + } // Generate batching challenge \rho and powers 1,...,\rho^{m-1} let rho = transcript.get_challenge::

("rho".to_string()); @@ -49,7 +78,11 @@ impl, H: TranscriptHasher // evaluations produced by sumcheck of h_i = g_i_shifted. let mut rho_challenge = P::ScalarField::ONE; - let mut batched_unshifted = Polynomial::new_zero(n); // batched unshifted polynomials + + if has_zk { + // ρ⁰ is used to batch the hiding polynomial + rho_challenge *= rho; + } for f_poly in f_polynomials.iter() { batched_unshifted.add_scaled_slice(f_poly, &rho_challenge); @@ -63,7 +96,7 @@ impl, H: TranscriptHasher rho_challenge *= rho; } - (batched_unshifted, batched_to_be_shifted) + Ok((batched_unshifted, batched_to_be_shifted)) } // /** @@ -107,13 +140,19 @@ impl, H: TranscriptHasher multilinear_challenge: Vec, log_n: usize, commitment_key: &ProverCrs

, + has_zk: bool, transcript: &mut Transcript, ) -> HonkProofResult>> { tracing::trace!("Gemini prove"); - let n = 1 << log_n; // Compute batched polynomials - let (batched_unshifted, batched_to_be_shifted) = self.compute_batched_polys(transcript, n); + let (batched_unshifted, batched_to_be_shifted) = self.compute_batched_polys( + transcript, + &multilinear_challenge, + log_n, + commitment_key, + has_zk, + )?; // Construct the batched polynomial A₀(X) = F(X) + G↺(X) = F(X) + G(X)/X let mut a_0 = batched_unshifted.to_owned(); @@ -128,15 +167,23 @@ impl, H: TranscriptHasher .enumerate() { let res = Utils::commit(&f_poly.coefficients, commitment_key)?; - transcript.send_point_to_verifier::

(format!("Gemini:a_{}", l + 1), res.into()); + transcript.send_point_to_verifier::

(format!("Gemini:FOLD_{}", l + 1), res.into()); } let res = P::G1Affine::generator(); for l in fold_polynomials.len()..CONST_PROOF_SIZE_LOG_N - 1 { - transcript.send_point_to_verifier::

(format!("Gemini:a_{}", l + 1), res); + transcript.send_point_to_verifier::

(format!("Gemini:FOLD_{}", l + 1), res); } let r_challenge: P::ScalarField = transcript.get_challenge::

("Gemini:r".to_string()); + let gemini_challenge_in_small_subgroup: bool = + has_zk && (r_challenge.pow([P::SUBGROUP_SIZE as u64]) == P::ScalarField::one()); + + if gemini_challenge_in_small_subgroup { + //TODO + panic!("Gemini evaluation challenge is in the SmallSubgroup."); + } + let (a_0_pos, a_0_neg) = Self::compute_partially_evaluated_batch_polynomials( batched_unshifted, batched_to_be_shifted, @@ -283,7 +330,8 @@ impl, H: TranscriptHasher }); // Compute univariate opening queries rₗ = r^{2ˡ} for l = 0, 1, ..., m-1 - let r_squares = DeciderVerifier::::powers_of_evaluation_challenge(r_challenge, log_n); + let r_squares = + DeciderVerifier::::powers_of_evaluation_challenge(r_challenge, log_n); // Compute the remaining m opening pairs {−r^{2ˡ}, Aₗ(−r^{2ˡ})}, l = 1, ..., m-1. @@ -315,10 +363,13 @@ impl, H: TranscriptHasher opening_claims: Vec>, commitment_key: &ProverCrs

, transcript: &mut Transcript, + libra_opening_claims: Option>>, ) -> HonkProofResult> { tracing::trace!("Shplonk prove"); let nu = transcript.get_challenge::

("Shplonk:nu".to_string()); - let batched_quotient = Self::compute_batched_quotient(&opening_claims, nu); + + let batched_quotient = + Self::compute_batched_quotient(&opening_claims, nu, libra_opening_claims.clone()); let batched_quotient_commitment = Utils::commit(&batched_quotient.coefficients, commitment_key)?; transcript.send_point_to_verifier::

( @@ -333,6 +384,7 @@ impl, H: TranscriptHasher batched_quotient, nu, z, + libra_opening_claims, )) } @@ -342,17 +394,34 @@ impl, H: TranscriptHasher circuit_size: u32, crs: &ProverCrs

, sumcheck_output: SumcheckOutput, + libra_polynomials: Option<[Polynomial; NUM_LIBRA_EVALUATIONS]>, ) -> HonkProofResult> { + let has_zk = libra_polynomials.is_some(); + tracing::trace!("Shplemini prove"); let log_circuit_size = Utils::get_msb32(circuit_size); let opening_claims = self.gemini_prove( sumcheck_output.challenges, log_circuit_size as usize, crs, + has_zk, transcript, )?; - let batched_claim = self.shplonk_prove(opening_claims, crs, transcript)?; - Ok(batched_claim) + + if has_zk { + let gemini_r = opening_claims[0].opening_pair.challenge; + let libra_opening_claims = Self::compute_libra_opening_claims( + gemini_r, + libra_polynomials.expect("we have ZK"), + transcript, + ); + let batched_claim = + self.shplonk_prove(opening_claims, crs, transcript, Some(libra_opening_claims))?; + Ok(batched_claim) + } else { + let batched_claim = self.shplonk_prove(opening_claims, crs, transcript, None)?; + Ok(batched_claim) + } } /** @@ -370,8 +439,10 @@ impl, H: TranscriptHasher batched_quotient_q: Polynomial, nu_challenge: P::ScalarField, z_challenge: P::ScalarField, + libra_opening_claims: Option>>, ) -> ShpleminiOpeningClaim { tracing::trace!("Compute partially evaluated batched quotient"); + let has_zk = libra_opening_claims.is_some(); let num_opening_claims = opening_claims.len(); let mut inverse_vanishing_evals: Vec = @@ -379,14 +450,21 @@ impl, H: TranscriptHasher for claim in &opening_claims { inverse_vanishing_evals.push(z_challenge - claim.opening_pair.challenge); } + if has_zk { + // Add the terms (z - uₖ) for k = 0, …, d−1 where d is the number of rounds in Sumcheck + for claim in libra_opening_claims.clone().unwrap() { + inverse_vanishing_evals.push(z_challenge - claim.opening_pair.challenge); + } + } inverse_vanishing_evals.iter_mut().for_each(|x| { x.inverse_in_place(); }); let mut g = batched_quotient_q; - + let len = opening_claims.len(); let mut current_nu = P::ScalarField::one(); - for (idx, claim) in opening_claims.into_iter().enumerate() { + let mut idx = 0; + for claim in opening_claims.into_iter() { let mut tmp = claim.polynomial; tmp[0] -= claim.opening_pair.evaluation; let scaling_factor = current_nu * inverse_vanishing_evals[idx]; @@ -394,6 +472,26 @@ impl, H: TranscriptHasher g.add_scaled(&tmp, &-scaling_factor); current_nu *= nu_challenge; + idx += 1; + } + + // Take into account the constant proof size in Gemini + for _ in len..CONST_PROOF_SIZE_LOG_N + 2 { + current_nu *= nu_challenge; + } + + if has_zk { + for claim in libra_opening_claims.unwrap().into_iter() { + // Compute individual claim quotient tmp = ( fⱼ(X) − vⱼ) / ( X − xⱼ ) + let mut tmp = claim.polynomial; + tmp[0] -= claim.opening_pair.evaluation; + let scaling_factor = current_nu * inverse_vanishing_evals[idx]; // = νʲ / (z − xⱼ ) + + // Add the claim quotient to the batched quotient polynomial + g.add_scaled(&tmp, &-scaling_factor); + current_nu *= nu_challenge; + idx += 1; + } } ShpleminiOpeningClaim { @@ -415,10 +513,20 @@ impl, H: TranscriptHasher pub(crate) fn compute_batched_quotient( opening_claims: &Vec>, nu_challenge: P::ScalarField, + libra_opening_claims: Option>>, ) -> Polynomial { + let has_zk = libra_opening_claims.is_some(); tracing::trace!("Compute batched quotient"); // Find n, the maximum size of all polynomials fⱼ(X) let mut max_poly_size: usize = 0; + + if has_zk { + // Max size of the polynomials in Libra opening claims is Curve::SUBGROUP_SIZE*2 + 2; we round it up to the + // next power of 2 + let log_subgroup_size = Utils::get_msb32(P::SUBGROUP_SIZE as u32); + max_poly_size = 1 << (log_subgroup_size + 1); + } + for claim in opening_claims { max_poly_size = max_poly_size.max(claim.polynomial.len()); } @@ -426,6 +534,7 @@ impl, H: TranscriptHasher // Q(X) = ∑ⱼ νʲ ⋅ ( fⱼ(X) − vⱼ) / ( X − xⱼ ) let mut q = Polynomial::new_zero(max_poly_size); let mut current_nu = P::ScalarField::one(); + for claim in opening_claims { // Compute individual claim quotient tmp = ( fⱼ(X) − vⱼ) / ( X − xⱼ ) let mut tmp = claim.polynomial.clone(); @@ -437,7 +546,64 @@ impl, H: TranscriptHasher current_nu *= nu_challenge; } + // We use the same batching challenge for Gemini and Libra opening claims. The number of the claims + // batched before adding Libra commitments and evaluations is bounded by CONST_PROOF_SIZE_LOG_N+2 + if has_zk { + for _ in opening_claims.len()..CONST_PROOF_SIZE_LOG_N + 2 { + current_nu *= nu_challenge; + } + let libra_claims = libra_opening_claims.unwrap().clone(); + for claim in libra_claims { + // Compute individual claim quotient tmp = ( fⱼ(X) − vⱼ) / ( X − xⱼ ) + let mut tmp = claim.polynomial.clone(); + tmp[0] -= claim.opening_pair.evaluation; + tmp.factor_roots(&claim.opening_pair.challenge); + + // Add the claim quotient to the batched quotient polynomial + q.add_scaled(&tmp, ¤t_nu); + current_nu *= nu_challenge; + } + } + // Return batched quotient polynomial Q(X) q } + + /** + * @brief For ZK Flavors: Evaluate the polynomials used in SmallSubgroupIPA argument, send the evaluations to the + * verifier, and populate a vector of the opening claims. + * + */ + fn compute_libra_opening_claims( + gemini_r: P::ScalarField, + libra_polynomials: [Polynomial; NUM_LIBRA_EVALUATIONS], + transcript: &mut Transcript, + ) -> Vec> { + let mut libra_opening_claims = Vec::new(); + + let subgroup_generator = P::get_subgroup_generator(); + + let libra_eval_labels = [ + "Libra:concatenation_eval", + "Libra:shifted_big_sum_eval", + "Libra:big_sum_eval", + "Libra:quotient_eval", + ]; + let evaluation_points = [gemini_r, gemini_r * subgroup_generator, gemini_r, gemini_r]; + + for (idx, &label) in libra_eval_labels.iter().enumerate() { + let new_claim = ShpleminiOpeningClaim { + polynomial: libra_polynomials[idx].clone(), + opening_pair: OpeningPair { + challenge: evaluation_points[idx], + evaluation: libra_polynomials[idx].eval_poly(evaluation_points[idx]), + }, + }; + transcript + .send_fr_to_verifier::

(label.to_string(), new_claim.opening_pair.evaluation); + libra_opening_claims.push(new_claim); + } + + libra_opening_claims + } } diff --git a/co-noir/ultrahonk/src/decider/shplemini/verifier.rs b/co-noir/ultrahonk/src/decider/shplemini/verifier.rs index 4e220a6bf..da4d98828 100644 --- a/co-noir/ultrahonk/src/decider/shplemini/verifier.rs +++ b/co-noir/ultrahonk/src/decider/shplemini/verifier.rs @@ -10,14 +10,17 @@ use crate::{ prelude::TranscriptFieldType, transcript::{Transcript, TranscriptHasher}, verifier::HonkVerifyResult, - Utils, CONST_PROOF_SIZE_LOG_N, + Utils, CONST_PROOF_SIZE_LOG_N, NUM_LIBRA_COMMITMENTS, NUM_LIBRA_EVALUATIONS, }; use ark_ec::AffineRepr; use ark_ff::{Field, One, Zero}; use co_builder::prelude::HonkCurve; -impl, H: TranscriptHasher> - DeciderVerifier +impl< + P: HonkCurve, + H: TranscriptHasher, + const SIZE: usize, + > DeciderVerifier { pub fn get_g_shift_evaluations( evaluations: &ClaimedEvaluations, @@ -109,13 +112,26 @@ impl, H: TranscriptHasher circuit_size: u32, multivariate_challenge: Vec, transcript: &mut Transcript, - // const std::vector>& concatenation_group_commitments = {}, - // RefSpan concatenated_evaluations = {} + libra_commitments: Option>, + _libra_univariate_evaluation: Option, // this is for consistency_checker, not yet implemented + // const std::vector>& concatenation_group_commitments = {}, + // RefSpan concatenated_evaluations = {} ) -> HonkVerifyResult> { tracing::trace!("Compute batch opening claim"); // Extract log_circuit_size let log_circuit_size = Utils::get_msb32(circuit_size); + let has_zk = libra_commitments.is_some(); + + let mut hiding_polynomial_commitment = P::G1Affine::default(); + let mut batched_evaluation = P::ScalarField::zero(); + if has_zk { + hiding_polynomial_commitment = transcript + .receive_point_from_prover::

("Gemini:masking_poly_comm".to_string())?; + batched_evaluation = + transcript.receive_fr_from_prover::

("Gemini:masking_poly_eval".to_string())?; + } + // Get the challenge ρ to batch commitments to multilinear polynomials and their shifts let multivariate_batching_challenge = transcript.get_challenge::

("rho".to_string()); @@ -135,6 +151,20 @@ impl, H: TranscriptHasher CONST_PROOF_SIZE_LOG_N, ); + let mut libra_evaluations: Vec<_> = Vec::with_capacity(NUM_LIBRA_EVALUATIONS); + if has_zk { + libra_evaluations.push( + transcript.receive_fr_from_prover::

("Libra:concatenation_eval".to_string())?, + ); + libra_evaluations.push( + transcript.receive_fr_from_prover::

("Libra:shifted_big_sum_eval".to_string())?, + ); + libra_evaluations + .push(transcript.receive_fr_from_prover::

("Libra:big_sum_eval".to_string())?); + libra_evaluations + .push(transcript.receive_fr_from_prover::

("Libra:quotient_eval".to_string())?); + } + // Process Shplonk transcript data: // - Get Shplonk batching challenge let shplonk_batching_challenge = transcript.get_challenge::

("Shplonk:nu".to_string()); @@ -199,15 +229,20 @@ impl, H: TranscriptHasher // } // } + if has_zk { + opening_claim.commitments.push(hiding_polynomial_commitment); + opening_claim.scalars.push(-unshifted_scalar); + } + // Place the commitments to prover polynomials in the commitments vector. Compute the evaluation of the // batched multilinear polynomial. Populate the vector of scalars for the final batch mul - let mut batched_evaluation = P::ScalarField::zero(); self.batch_multivariate_opening_claims( &multivariate_batching_challenge, &unshifted_scalar, &shifted_scalar, &mut opening_claim, &mut batched_evaluation, + has_zk, ); // Place the commitments to Gemini Aᵢ to the vector of commitments, compute the contributions from @@ -237,6 +272,27 @@ impl, H: TranscriptHasher constant_term_accumulator += gemini_evaluations[0] * shplonk_batching_challenge * inverse_vanishing_evals[1]; + // TACEO TODO: BB removes repeated commitments here to reduce the number of scalar muls + // remove_repeated_commitments(commitments, scalars, repeated_commitments, has_zk); + + // For ZK flavors, the sumcheck output contains the evaluations of Libra univariates that submitted to the + // ShpleminiVerifier, otherwise this argument is set to be empty + if has_zk { + Self::add_zk_data( + &mut opening_claim.commitments, + &mut opening_claim.scalars, + &mut constant_term_accumulator, + &libra_commitments.unwrap().as_slice().try_into().unwrap(), + &libra_evaluations.as_slice().try_into().unwrap(), + &gemini_evaluation_challenge, + &shplonk_batching_challenge, + &shplonk_evaluation_challenge, + )?; + + // *consistency_checked = SmallSubgroupIPAVerifier::check_evaluations_consistency( + // libra_evaluations, gemini_evaluation_challenge, multivariate_challenge, libra_univariate_evaluation); + } + // Finalize the batch opening claim opening_claim.commitments.push(P::G1Affine::generator()); opening_claim.scalars.push(constant_term_accumulator); @@ -356,12 +412,18 @@ impl, H: TranscriptHasher shifted_scalar: &P::ScalarField, opening_claim: &mut ShpleminiVerifierOpeningClaim

, batched_evaluation: &mut P::ScalarField, + has_zk: bool, // concatenated_scalars: Vec, // concatenation_group_commitments: &[Vec], // concatenated_evaluations: &[P::ScalarField], ) { tracing::trace!("Batch multivariate opening claims"); + let mut current_batching_challenge = P::ScalarField::one(); + if has_zk { + // ρ⁰ is used to batch the hiding polynomial which has already been added to the commitments vector + current_batching_challenge *= multivariate_batching_challenge; + } let unshifted_evaluations = Self::get_f_evaluations(&self.memory.claimed_evaluations); let shifted_evaluations = Self::get_g_shift_evaluations(&self.memory.claimed_evaluations); let unshifted_commitments = Self::get_f_comms(&self.memory.verifier_commitments); @@ -487,4 +549,72 @@ impl, H: TranscriptHasher opening_claim.commitments.push(fold_commitments[j]); } } + + /** + * @brief Add the opening data corresponding to Libra masking univariates to the batched opening claim + * + * @details After verifying ZK Sumcheck, the verifier has to validate the claims about the evaluations of Libra + * univariates used to mask Sumcheck round univariates. To minimize the overhead of such openings, we continue + * the Shplonk batching started in Gemini, i.e. we add new claims multiplied by a suitable power of the Shplonk + * batching challenge and re-use the evaluation challenge sampled to prove the evaluations of Gemini + * polynomials. + * + * @param commitments + * @param scalars + * @param libra_commitments + * @param libra_univariate_evaluations + * @param multivariate_challenge + * @param shplonk_batching_challenge + * @param shplonk_evaluation_challenge + */ + #[expect(clippy::too_many_arguments)] + fn add_zk_data( + commitments: &mut Vec, + scalars: &mut Vec, + constant_term_accumulator: &mut P::ScalarField, + libra_commitments: &[P::G1Affine; NUM_LIBRA_COMMITMENTS], + libra_evaluations: &[P::ScalarField; NUM_LIBRA_EVALUATIONS], + gemini_evaluation_challenge: &P::ScalarField, + shplonk_batching_challenge: &P::ScalarField, + shplonk_evaluation_challenge: &P::ScalarField, + ) -> HonkVerifyResult<()> { + // Compute current power of Shplonk batching challenge taking into account the const proof size + let mut shplonk_challenge_power = P::ScalarField::one(); + for _ in 0..(CONST_PROOF_SIZE_LOG_N + 2) { + shplonk_challenge_power *= *shplonk_batching_challenge; + } + + // Add Libra commitments to the vector of commitments + for &commitment in libra_commitments.iter() { + commitments.push(commitment); + } + + // Compute corresponding scalars and the correction to the constant term + let mut denominators = [P::ScalarField::zero(); NUM_LIBRA_EVALUATIONS]; + let mut batching_scalars = [P::ScalarField::zero(); NUM_LIBRA_EVALUATIONS]; + let subgroup_generator = P::get_subgroup_generator(); + + // Compute Shplonk denominators and invert them + denominators[0] = + P::ScalarField::one() / (*shplonk_evaluation_challenge - *gemini_evaluation_challenge); + denominators[1] = P::ScalarField::one() + / (*shplonk_evaluation_challenge - subgroup_generator * *gemini_evaluation_challenge); + denominators[2] = denominators[0]; + denominators[3] = denominators[0]; + + // Compute the scalars to be multiplied against the commitments [libra_concatenated], [big_sum], [big_sum], and + // [libra_quotient] + for idx in 0..NUM_LIBRA_EVALUATIONS { + let scaling_factor = denominators[idx] * shplonk_challenge_power; + batching_scalars[idx] = -scaling_factor; + shplonk_challenge_power *= *shplonk_batching_challenge; + *constant_term_accumulator += scaling_factor * libra_evaluations[idx]; + } + + // To save a scalar mul, add the sum of the batching scalars corresponding to the big sum evaluations + scalars.push(batching_scalars[0]); + scalars.push(batching_scalars[1] + batching_scalars[2]); + scalars.push(batching_scalars[3]); + Ok(()) + } } diff --git a/co-noir/ultrahonk/src/decider/small_subgroup_ipa.rs b/co-noir/ultrahonk/src/decider/small_subgroup_ipa.rs new file mode 100644 index 000000000..891ea0d0b --- /dev/null +++ b/co-noir/ultrahonk/src/decider/small_subgroup_ipa.rs @@ -0,0 +1,247 @@ +use ark_ec::pairing::Pairing; +use ark_ff::One; +use ark_ff::Zero; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use co_builder::prelude::{HonkCurve, Polynomial, ProverCrs}; + +use crate::prelude::TranscriptHasher; +use crate::prelude::Univariate; +use crate::Utils; +use crate::CONST_PROOF_SIZE_LOG_N; +use crate::{prelude::Transcript, transcript::TranscriptFieldType}; + +use super::sumcheck::zk_data::ZKSumcheckData; +use super::sumcheck::zk_data::LIBRA_UNIVARIATES_LENGTH; + +pub(crate) struct SmallSubgroupIPAProver { + interpolation_domain: Vec, + concatenated_polynomial: Polynomial, + libra_concatenated_lagrange_form: Polynomial, + challenge_polynomial: Polynomial, + challenge_polynomial_lagrange: Polynomial, + big_sum_polynomial_unmasked: Polynomial, + big_sum_polynomial: Polynomial, + big_sum_lagrange_coeffs: Vec, + batched_polynomial: Polynomial, + batched_quotient: Polynomial, +} + +impl> SmallSubgroupIPAProver

{ + const SUBGROUP_SIZE: usize = P::SUBGROUP_SIZE; + const BATCHED_POLYNOMIAL_LENGTH: usize = 2 * P::SUBGROUP_SIZE + 2; + const QUOTIENT_LENGTH: usize = Self::SUBGROUP_SIZE + 2; + pub(crate) fn new>( + zk_sumcheck_data: &ZKSumcheckData

, + multivariate_challenge: &[P::ScalarField], + claimed_ipa_eval: P::ScalarField, + transcript: &mut Transcript, + commitment_key: &ProverCrs

, + ) -> Self { + let mut prover = SmallSubgroupIPAProver { + interpolation_domain: zk_sumcheck_data.interpolation_domain.to_vec(), + + concatenated_polynomial: zk_sumcheck_data.libra_concatenated_monomial_form.clone(), + libra_concatenated_lagrange_form: zk_sumcheck_data + .libra_concatenated_lagrange_form + .clone(), + challenge_polynomial: Polynomial::new_zero(Self::SUBGROUP_SIZE), + challenge_polynomial_lagrange: Polynomial::new_zero(Self::SUBGROUP_SIZE), + big_sum_polynomial_unmasked: Polynomial::new_zero(Self::SUBGROUP_SIZE), + big_sum_polynomial: Polynomial::new_zero(Self::SUBGROUP_SIZE + 3), + big_sum_lagrange_coeffs: vec![P::ScalarField::zero(); Self::SUBGROUP_SIZE], + batched_polynomial: Polynomial::new_zero(Self::BATCHED_POLYNOMIAL_LENGTH), + batched_quotient: Polynomial::new_zero(Self::QUOTIENT_LENGTH), + }; + + // Reallocate the commitment key if necessary. This is an edge case with SmallSubgroupIPA since it has + // polynomials that may exceed the circuit size. + // if (commitment_key->dyadic_size < SUBGROUP_SIZE + 3) { + // commitment_key = std::make_shared(Self::SUBGROUP_SIZE + 3); + // } + + // if P::is_bn254() { + // prover.bn_evaluation_domain = zk_sumcheck_data.bn_evaluation_domain.clone(); + // } + + prover.compute_challenge_polynomial(multivariate_challenge); + prover.compute_big_sum_polynomial(); + let libra_big_sum_commitment = + Utils::commit(&prover.big_sum_polynomial.coefficients, commitment_key).unwrap(); + transcript.send_point_to_verifier::

( + "Libra:big_sum_commitment".to_string(), + libra_big_sum_commitment.into(), + ); + + prover.compute_batched_polynomial(claimed_ipa_eval); + prover.compute_batched_quotient(); + + let libra_quotient_commitment = + Utils::commit(&prover.batched_quotient.coefficients, commitment_key).unwrap(); + transcript.send_point_to_verifier::

( + "Libra:quotient_commitment".to_string(), + libra_quotient_commitment.into(), + ); + + prover + } + + fn compute_challenge_polynomial(&mut self, multivariate_challenge: &[P::ScalarField]) { + let mut coeffs_lagrange_basis = vec![P::ScalarField::zero(); Self::SUBGROUP_SIZE]; + coeffs_lagrange_basis[0] = P::ScalarField::one(); + + for (challenge_idx, &challenge) in multivariate_challenge + .iter() + .enumerate() + .take(CONST_PROOF_SIZE_LOG_N) + { + let poly_to_concatenate_start = 1 + LIBRA_UNIVARIATES_LENGTH * challenge_idx; + coeffs_lagrange_basis[poly_to_concatenate_start] = P::ScalarField::one(); + for idx in (poly_to_concatenate_start + 1) + ..(poly_to_concatenate_start + LIBRA_UNIVARIATES_LENGTH) + { + coeffs_lagrange_basis[idx] = coeffs_lagrange_basis[idx - 1] * challenge; + } + } + + self.challenge_polynomial_lagrange = Polynomial { + coefficients: coeffs_lagrange_basis.clone(), + }; + + let domain = GeneralEvaluationDomain::::new(Self::SUBGROUP_SIZE) + .ok_or(eyre::eyre!("Polynomial Degree too large")) + .unwrap(); + let challenge_polynomial_ifft = domain.ifft(&coeffs_lagrange_basis); + self.challenge_polynomial = Polynomial { + coefficients: challenge_polynomial_ifft, + }; + } + + fn compute_big_sum_polynomial(&mut self) { + self.big_sum_lagrange_coeffs[0] = P::ScalarField::zero(); + + for idx in 1..Self::SUBGROUP_SIZE { + let prev_idx = idx - 1; + self.big_sum_lagrange_coeffs[idx] = self.big_sum_lagrange_coeffs[prev_idx] + + self.challenge_polynomial_lagrange.coefficients[prev_idx] + * self.libra_concatenated_lagrange_form.coefficients[prev_idx]; + } + let domain = GeneralEvaluationDomain::::new(Self::SUBGROUP_SIZE) + .ok_or(eyre::eyre!("Polynomial Degree too large")) + .unwrap(); + let big_sum_ifft = domain.ifft(&self.big_sum_lagrange_coeffs); + self.big_sum_polynomial_unmasked = Polynomial { + coefficients: big_sum_ifft, + }; + + let masking_term = Univariate::::get_random(); + self.big_sum_polynomial += &self.big_sum_polynomial_unmasked.clone().coefficients; + + for idx in 0..masking_term.evaluations.len() { + self.big_sum_polynomial.coefficients[idx] -= masking_term.evaluations[idx]; + self.big_sum_polynomial.coefficients[idx + Self::SUBGROUP_SIZE] += + masking_term.evaluations[idx]; + } + } + + fn compute_batched_polynomial(&mut self, claimed_evaluation: P::ScalarField) { + let mut shifted_big_sum = Polynomial::new_zero(Self::SUBGROUP_SIZE + 3); + + for idx in 0..(Self::SUBGROUP_SIZE + 3) { + shifted_big_sum.coefficients[idx] = self.big_sum_polynomial.coefficients[idx] + * self.interpolation_domain[idx % Self::SUBGROUP_SIZE]; + } + + let (lagrange_first, lagrange_last) = Self::compute_lagrange_polynomials(); + + for i in 0..self.concatenated_polynomial.coefficients.len() { + for j in 0..self.challenge_polynomial.coefficients.len() { + self.batched_polynomial.coefficients[i + j] -= + self.concatenated_polynomial.coefficients[i] + * self.challenge_polynomial.coefficients[j]; + } + } + + for idx in 0..shifted_big_sum.coefficients.len() { + self.batched_polynomial.coefficients[idx] += + shifted_big_sum.coefficients[idx] - self.big_sum_polynomial.coefficients[idx]; + } + + for idx in (1..self.batched_polynomial.coefficients.len()).rev() { + self.batched_polynomial.coefficients[idx] = + self.batched_polynomial.coefficients[idx - 1]; + } + self.batched_polynomial.coefficients[0] = P::ScalarField::zero(); + // 2. Subtract 1/g(A(gX) - A(X) - F(X) * G(X)) + for idx in 0..self.batched_polynomial.coefficients.len() - 1 { + let tmp = self.batched_polynomial.coefficients[idx + 1]; + self.batched_polynomial.coefficients[idx] -= + tmp * self.interpolation_domain[Self::SUBGROUP_SIZE - 1]; + } + + for i in 0..self.big_sum_polynomial.coefficients.len() { + for j in 0..Self::SUBGROUP_SIZE { + self.batched_polynomial.coefficients[i + j] += self.big_sum_polynomial.coefficients + [i] + * (lagrange_first.coefficients[j] + lagrange_last.coefficients[j]); + } + } + + for idx in 0..Self::SUBGROUP_SIZE { + self.batched_polynomial.coefficients[idx] -= + lagrange_last.coefficients[idx] * claimed_evaluation; + } + } + + fn compute_batched_quotient(&mut self) { + let mut remainder = self.batched_polynomial.clone(); + for idx in (Self::SUBGROUP_SIZE..Self::BATCHED_POLYNOMIAL_LENGTH).rev() { + self.batched_quotient.coefficients[idx - Self::SUBGROUP_SIZE] = + remainder.coefficients[idx]; + let tmp = remainder.coefficients[idx]; + remainder.coefficients[idx - Self::SUBGROUP_SIZE] += tmp; + } + self.batched_polynomial = remainder; + } + + fn compute_lagrange_polynomials() -> (Polynomial, Polynomial) { + let mut lagrange_coeffs = vec![P::ScalarField::zero(); Self::SUBGROUP_SIZE]; + lagrange_coeffs[0] = P::ScalarField::one(); + + let domain = GeneralEvaluationDomain::::new(Self::SUBGROUP_SIZE) + .ok_or(eyre::eyre!("Polynomial Degree too large")) + .unwrap(); + let lagrange_first_ifft = domain.ifft(&lagrange_coeffs); + + let lagrange_first_monomial = Polynomial { + coefficients: lagrange_first_ifft, + }; + + lagrange_coeffs[0] = P::ScalarField::zero(); + lagrange_coeffs[Self::SUBGROUP_SIZE - 1] = P::ScalarField::one(); + + let lagrange_last_ifft = domain.ifft(&lagrange_coeffs); + + let lagrange_last_monomial = Polynomial { + coefficients: lagrange_last_ifft, + }; + + (lagrange_first_monomial, lagrange_last_monomial) + } + + pub(crate) fn get_witness_polynomials(&self) -> [Polynomial; 4] { + [ + self.concatenated_polynomial.clone(), + self.big_sum_polynomial.clone(), + self.big_sum_polynomial.clone(), + self.batched_quotient.clone(), + ] + } + + // fn get_batched_polynomial(&self) -> &Polynomial { + // &self.batched_polynomial + // } + + // fn get_challenge_polynomial(&self) -> &Polynomial { + // &self.challenge_polynomial + // } +} diff --git a/co-noir/ultrahonk/src/decider/sumcheck/mod.rs b/co-noir/ultrahonk/src/decider/sumcheck/mod.rs index 21c7125df..598c4c913 100644 --- a/co-noir/ultrahonk/src/decider/sumcheck/mod.rs +++ b/co-noir/ultrahonk/src/decider/sumcheck/mod.rs @@ -2,6 +2,7 @@ pub(crate) mod prover; pub(crate) mod round_prover; pub(crate) mod round_verifier; pub(crate) mod verifier; +pub(crate) mod zk_data; use super::types::ClaimedEvaluations; use ark_ff::PrimeField; @@ -9,9 +10,11 @@ use ark_ff::PrimeField; pub(crate) struct SumcheckOutput { pub(crate) _claimed_evaluations: ClaimedEvaluations, pub(crate) challenges: Vec, + pub(crate) claimed_libra_evaluation: Option, } pub struct SumcheckVerifierOutput { pub multivariate_challenge: Vec, pub verified: bool, + pub claimed_libra_evaluation: Option, } diff --git a/co-noir/ultrahonk/src/decider/sumcheck/prover.rs b/co-noir/ultrahonk/src/decider/sumcheck/prover.rs index cc41f58bd..a2f471bee 100644 --- a/co-noir/ultrahonk/src/decider/sumcheck/prover.rs +++ b/co-noir/ultrahonk/src/decider/sumcheck/prover.rs @@ -1,14 +1,24 @@ use crate::decider::prover::Decider; use crate::decider::sumcheck::round_prover::{SumcheckProverRound, SumcheckRoundOutput}; use crate::decider::sumcheck::SumcheckOutput; -use crate::decider::types::{ClaimedEvaluations, GateSeparatorPolynomial, PartiallyEvaluatePolys}; +use crate::decider::types::{ + ClaimedEvaluations, GateSeparatorPolynomial, PartiallyEvaluatePolys, + BATCHED_RELATION_PARTIAL_LENGTH, BATCHED_RELATION_PARTIAL_LENGTH_ZK, +}; use crate::transcript::{Transcript, TranscriptFieldType, TranscriptHasher}; use crate::types::AllEntities; use crate::{Utils, CONST_PROOF_SIZE_LOG_N}; -use co_builder::prelude::HonkCurve; +use co_builder::prelude::{HonkCurve, RowDisablingPolynomial}; + +use super::zk_data::ZKSumcheckData; // Keep in mind, the UltraHonk protocol (UltraFlavor) does not per default have ZK -impl, H: TranscriptHasher> Decider { +impl< + P: HonkCurve, + H: TranscriptHasher, + const SIZE: usize, + > Decider +{ pub(crate) fn partially_evaluate_init( partially_evaluated_poly: &mut PartiallyEvaluatePolys, polys: &AllEntities>, @@ -151,7 +161,132 @@ impl, H: TranscriptHasher } // Zero univariates are used to pad the proof to the fixed size CONST_PROOF_SIZE_LOG_N. - let zero_univariate = SumcheckRoundOutput::::default(); + let zero_univariate = + SumcheckRoundOutput::::default(); + for idx in multivariate_d as usize..CONST_PROOF_SIZE_LOG_N { + transcript.send_fr_iter_to_verifier::( + format!("Sumcheck:univariate_{}", idx), + &zero_univariate.evaluations, + ); + let round_challenge = transcript.get_challenge::

(format!("Sumcheck:u_{}", idx)); + multivariate_challenge.push(round_challenge); + } + + // Claimed evaluations of Prover polynomials are extracted and added to the transcript. When Flavor has ZK, the + // evaluations of all witnesses are masked. + let multivariate_evaluations = Self::extract_claimed_evaluations(partially_evaluated_polys); + Self::add_evals_to_transcript(transcript, &multivariate_evaluations); + + SumcheckOutput { + _claimed_evaluations: multivariate_evaluations, + challenges: multivariate_challenge, + claimed_libra_evaluation: None, + } + } + + pub(crate) fn sumcheck_prove_zk( + &self, + transcript: &mut Transcript, + circuit_size: u32, + zk_sumcheck_data: &mut ZKSumcheckData

, + ) -> SumcheckOutput { + tracing::trace!("Sumcheck prove"); + + //TODO // Ensure that the length of Sumcheck Round Univariates does not exceed the length of Libra masking + // polynomials. + // ASSERT(BATCHED_RELATION_PARTIAL_LENGTH <= Flavor::Curve::LIBRA_UNIVARIATES_LENGTH); + + let multivariate_n = circuit_size; + let multivariate_d = Utils::get_msb64(multivariate_n as u64); + + let mut sum_check_round = SumcheckProverRound::new(multivariate_n as usize); + let mut row_disabling_polynomial = RowDisablingPolynomial::::default(); + let mut gate_separators = GateSeparatorPolynomial::new( + self.memory.relation_parameters.gate_challenges.to_owned(), + multivariate_d as usize, + ); + + let mut multivariate_challenge = Vec::with_capacity(multivariate_d as usize); + let round_idx = 0; + + tracing::trace!("Sumcheck prove round {}", round_idx); + + // In the first round, we compute the first univariate polynomial and populate the book-keeping table of + // #partially_evaluated_polynomials, which has \f$ n/2 \f$ rows and \f$ N \f$ columns. When the Flavor has ZK, + // compute_univariate also takes into account the zk_sumcheck_data. + let round_univariate = sum_check_round.compute_univariate_zk::

( + round_idx, + &self.memory.relation_parameters, + &gate_separators, + &self.memory.polys, + zk_sumcheck_data, + &mut row_disabling_polynomial, + ); + + // Place the evaluations of the round univariate into transcript. + transcript.send_fr_iter_to_verifier::( + "Sumcheck:univariate_0".to_string(), + &round_univariate.evaluations, + ); + let round_challenge = transcript.get_challenge::

("Sumcheck:u_0".to_string()); + multivariate_challenge.push(round_challenge); + + // Prepare sumcheck book-keeping table for the next round + let mut partially_evaluated_polys = + PartiallyEvaluatePolys::new(multivariate_n as usize >> 1); + Self::partially_evaluate_init( + &mut partially_evaluated_polys, + &self.memory.polys, + multivariate_n as usize, + &round_challenge, + ); + zk_sumcheck_data.update_zk_sumcheck_data(round_challenge, round_idx); + + row_disabling_polynomial.update_evaluations(round_challenge, round_idx); + + gate_separators.partially_evaluate(round_challenge); + sum_check_round.round_size >>= 1; // AZTEC TODO(#224)(Cody): Maybe partially_evaluate should do this and + // release memory? // All but final round + // We operate on partially_evaluated_polynomials in place. + for round_idx in 1..multivariate_d as usize { + tracing::trace!("Sumcheck prove round {}", round_idx); + // Write the round univariate to the transcript + + let round_univariate = sum_check_round.compute_univariate_zk::

( + round_idx, + &self.memory.relation_parameters, + &gate_separators, + &partially_evaluated_polys, + zk_sumcheck_data, + &mut row_disabling_polynomial, + ); + + // Place the evaluations of the round univariate into transcript. + transcript.send_fr_iter_to_verifier::( + format!("Sumcheck:univariate_{}", round_idx), + &round_univariate.evaluations, + ); + let round_challenge = + transcript.get_challenge::

(format!("Sumcheck:u_{}", round_idx)); + multivariate_challenge.push(round_challenge); + // Prepare sumcheck book-keeping table for the next round + Self::partially_evaluate_inplace( + &mut partially_evaluated_polys, + sum_check_round.round_size, + &round_challenge, + ); + // Prepare evaluation masking and libra structures for the next round (for ZK Flavors) + zk_sumcheck_data.update_zk_sumcheck_data(round_challenge, round_idx); + row_disabling_polynomial.update_evaluations(round_challenge, round_idx); + + gate_separators.partially_evaluate(round_challenge); + sum_check_round.round_size >>= 1; + } + tracing::trace!("Completed {multivariate_d} rounds of sumcheck"); + + // Zero univariates are used to pad the proof to the fixed size CONST_PROOF_SIZE_LOG_N. + let zero_univariate = + SumcheckRoundOutput::::default(); for idx in multivariate_d as usize..CONST_PROOF_SIZE_LOG_N { transcript.send_fr_iter_to_verifier::( format!("Sumcheck:univariate_{}", idx), @@ -166,9 +301,19 @@ impl, H: TranscriptHasher let multivariate_evaluations = Self::extract_claimed_evaluations(partially_evaluated_polys); Self::add_evals_to_transcript(transcript, &multivariate_evaluations); + // The evaluations of Libra uninvariates at \f$ g_0(u_0), \ldots, g_{d-1} (u_{d-1}) \f$ are added to the + // transcript. + let mut libra_evaluation = zk_sumcheck_data.constant_term; + for libra_eval in zk_sumcheck_data.libra_evaluations.clone() { + libra_evaluation += libra_eval; + } + transcript + .send_fr_to_verifier::

("Libra:claimed_evaluation".to_string(), libra_evaluation); + SumcheckOutput { _claimed_evaluations: multivariate_evaluations, challenges: multivariate_challenge, + claimed_libra_evaluation: Some(libra_evaluation), } } } diff --git a/co-noir/ultrahonk/src/decider/sumcheck/round_prover.rs b/co-noir/ultrahonk/src/decider/sumcheck/round_prover.rs index da4816876..b23a1e716 100644 --- a/co-noir/ultrahonk/src/decider/sumcheck/round_prover.rs +++ b/co-noir/ultrahonk/src/decider/sumcheck/round_prover.rs @@ -1,6 +1,13 @@ -use super::super::{ - types::{GateSeparatorPolynomial, RelationParameters, MAX_PARTIAL_RELATION_LENGTH}, - univariate::Univariate, +use super::{ + super::{ + types::{GateSeparatorPolynomial, RelationParameters, MAX_PARTIAL_RELATION_LENGTH}, + univariate::Univariate, + }, + zk_data::ZKSumcheckData, +}; +use crate::decider::{ + sumcheck::zk_data::LIBRA_UNIVARIATES_LENGTH, + types::{BATCHED_RELATION_PARTIAL_LENGTH, BATCHED_RELATION_PARTIAL_LENGTH_ZK}, }; use crate::{ decider::{ @@ -21,9 +28,9 @@ use crate::{ types::AllEntities, }; use ark_ff::PrimeField; -use co_builder::prelude::HonkCurve; +use co_builder::prelude::{HonkCurve, RowDisablingPolynomial}; -pub(crate) type SumcheckRoundOutput = Univariate; +pub(crate) type SumcheckRoundOutput = Univariate; pub(crate) struct SumcheckProverRound { pub(crate) round_size: usize, @@ -61,8 +68,8 @@ impl SumcheckProverRound { * @param result Round univariate \f$ \tilde{S}^i\f$ represented by its evaluations over \f$ \{0,\ldots, D\} \f$. * @param gate_sparators Round \f$pow_{\beta}\f$-factor \f$ ( (1−X_i) + X_i\cdot \beta_i )\f$. */ - fn extend_and_batch_univariates( - result: &mut SumcheckRoundOutput, + fn extend_and_batch_univariates( + result: &mut SumcheckRoundOutput, univariate_accumulators: AllRelationAcc, gate_sparators: &GateSeparatorPolynomial, ) { @@ -94,11 +101,11 @@ impl SumcheckProverRound { * @param challenge Challenge \f$\alpha\f$. * @param gate_sparators Round \f$pow_{\beta}\f$-factor given by \f$ ( (1−u_i) + u_i\cdot \beta_i )\f$. */ - fn batch_over_relations_univariates( + fn batch_over_relations_univariates( mut univariate_accumulators: AllRelationAcc, alphas: &[F; crate::NUM_ALPHAS], gate_sparators: &GateSeparatorPolynomial, - ) -> SumcheckRoundOutput { + ) -> SumcheckRoundOutput { tracing::trace!("batch over relations"); let running_challenge = F::one(); @@ -209,7 +216,7 @@ impl SumcheckProverRound { relation_parameters: &RelationParameters, gate_sparators: &GateSeparatorPolynomial, polynomials: &AllEntities>, - ) -> SumcheckRoundOutput { + ) -> SumcheckRoundOutput { tracing::trace!("Sumcheck round {}", round_index); // Barretenberg uses multithreading here @@ -240,4 +247,127 @@ impl SumcheckProverRound { gate_sparators, ) } + + pub(crate) fn compute_univariate_zk>( + &self, + round_index: usize, + relation_parameters: &RelationParameters, + gate_sparators: &GateSeparatorPolynomial, + polynomials: &AllEntities>, + zk_sumcheck_data: &ZKSumcheckData

, + row_disabling_polynomial: &mut RowDisablingPolynomial, + ) -> SumcheckRoundOutput { + tracing::trace!("Sumcheck round {}", round_index); + + // Barretenberg uses multithreading here + + // Construct extended edge containers + let mut extended_edge = ProverUnivariates::::default(); + + let mut univariate_accumulators = AllRelationAcc::::default(); + + // Accumulate the contribution from each sub-relation accross each edge of the hyper-cube + for edge_idx in (0..self.round_size).step_by(2) { + Self::extend_edges(&mut extended_edge, polynomials, edge_idx); + // Compute the \f$ \ell \f$-th edge's univariate contribution, + // scale it by the corresponding \f$ pow_{\beta} \f$ contribution and add it to the accumulators for \f$ + // \tilde{S}^i(X_i) \f$. If \f$ \ell \f$'s binary representation is given by \f$ (\ell_{i+1},\ldots, + // \ell_{d-1})\f$, the \f$ pow_{\beta}\f$-contribution is \f$\beta_{i+1}^{\ell_{i+1}} \cdot \ldots \cdot + // \beta_{d-1}^{\ell_{d-1}}\f$. + Self::accumulate_relation_univariates::

( + &mut univariate_accumulators, + &extended_edge, + relation_parameters, + &gate_sparators.beta_products[(edge_idx >> 1) * gate_sparators.periodicity], + ); + } + + let contribution_from_disabled_rows = Self::compute_disabled_contribution::

( + polynomials, + relation_parameters, + gate_sparators, + &self.round_size, + round_index, + row_disabling_polynomial, + ); + + let libra_round_univariate = + Self::compute_libra_round_univariate(zk_sumcheck_data, round_index); + + let round_univariate = + Self::batch_over_relations_univariates::<_, BATCHED_RELATION_PARTIAL_LENGTH_ZK>( + univariate_accumulators, + &relation_parameters.alphas, + gate_sparators, + ); + round_univariate + libra_round_univariate - contribution_from_disabled_rows + } + + fn compute_libra_round_univariate>( + zk_sumcheck_data: &ZKSumcheckData

, + round_idx: usize, + ) -> SumcheckRoundOutput { + let mut libra_round_univariate = + Univariate::::default(); + + // select the i'th column of Libra book-keeping table + let current_column = &zk_sumcheck_data.libra_univariates[round_idx]; + // the evaluation of Libra round univariate at k=0...D are equal to \f$\texttt{libra_univariates}_{i}(k)\f$ + // corrected by the Libra running sum + for idx in 0..LIBRA_UNIVARIATES_LENGTH { + libra_round_univariate.evaluations[idx] = current_column + .eval_poly(P::ScalarField::from(idx as u64)) + + zk_sumcheck_data.libra_running_sum; + } + if MAX_PARTIAL_RELATION_LENGTH + 2 == LIBRA_UNIVARIATES_LENGTH { + libra_round_univariate + } else { + todo!("case not implemented yet") + // libra_round_univariate + // .extend_to::<{ SumcheckRoundOutput::{MAX_PARTIAL_RELATION_LENGTH + 2} }>() + } + } + + fn compute_disabled_contribution>( + polynomials: &AllEntities>, + relation_parameters: &RelationParameters, + gate_sparators: &GateSeparatorPolynomial, + round_size: &usize, + round_idx: usize, + row_disabling_polynomial: &RowDisablingPolynomial, + ) -> SumcheckRoundOutput { + let mut univariate_accumulators = AllRelationAcc::::default(); + let mut extended_edges = ProverUnivariates::::default(); + // In Round 0, we have to compute the contribution from 2 edges: n - 1 = (1,1,...,1) and n-4 = (0,1,...,1). + let start_edge_idx = if round_idx == 0 { + round_size - 4 + } else { + round_size - 2 + }; + + for edge_idx in (start_edge_idx..*round_size).step_by(2) { + Self::extend_edges(&mut extended_edges, polynomials, edge_idx); + Self::accumulate_relation_univariates::

( + &mut univariate_accumulators, + &extended_edges, + relation_parameters, + &gate_sparators.beta_products[(edge_idx >> 1) * gate_sparators.periodicity], + ); + } + let mut result = Self::batch_over_relations_univariates( + univariate_accumulators, + &relation_parameters.alphas, + gate_sparators, + ); + + let mut row_disabling_factor = + Univariate::::default(); + row_disabling_factor.extend_from(&[ + row_disabling_polynomial.eval_at_0, + row_disabling_polynomial.eval_at_1, + ]); + result *= row_disabling_factor; + + result + } } diff --git a/co-noir/ultrahonk/src/decider/sumcheck/round_verifier.rs b/co-noir/ultrahonk/src/decider/sumcheck/round_verifier.rs index 2ef1545b5..bb71d6f76 100644 --- a/co-noir/ultrahonk/src/decider/sumcheck/round_verifier.rs +++ b/co-noir/ultrahonk/src/decider/sumcheck/round_verifier.rs @@ -19,18 +19,19 @@ use crate::{ use ark_ff::{One, Zero}; use co_builder::prelude::HonkCurve; -pub(crate) struct SumcheckVerifierRound> { +pub(crate) struct SumcheckVerifierRound, const SIZE: usize> { pub(crate) target_total_sum: P::ScalarField, pub(crate) round_failed: bool, } -impl> Default for SumcheckVerifierRound

{ +impl, const SIZE: usize> Default + for SumcheckVerifierRound +{ fn default() -> Self { Self::new() } } - -impl> SumcheckVerifierRound

{ +impl, const SIZE: usize> SumcheckVerifierRound { pub(crate) fn new() -> Self { Self { target_total_sum: P::ScalarField::zero(), @@ -40,14 +41,17 @@ impl> SumcheckVerifierRound

{ pub(crate) fn compute_next_target_sum( &mut self, - univariate: &SumcheckRoundOutput, + univariate: &SumcheckRoundOutput, round_challenge: P::ScalarField, ) { tracing::trace!("Compute target sum"); self.target_total_sum = univariate.evaluate(round_challenge); } - pub(crate) fn check_sum(&mut self, univariate: &SumcheckRoundOutput) -> bool { + pub(crate) fn check_sum( + &mut self, + univariate: &SumcheckRoundOutput, + ) -> bool { tracing::trace!("Check sum"); let total_sum = univariate.evaluations[0] + univariate.evaluations[1]; let sumcheck_round_failed = self.target_total_sum != total_sum; diff --git a/co-noir/ultrahonk/src/decider/sumcheck/verifier.rs b/co-noir/ultrahonk/src/decider/sumcheck/verifier.rs index 077515257..f4a2488f4 100644 --- a/co-noir/ultrahonk/src/decider/sumcheck/verifier.rs +++ b/co-noir/ultrahonk/src/decider/sumcheck/verifier.rs @@ -2,7 +2,6 @@ use super::SumcheckVerifierOutput; use crate::{ decider::{ sumcheck::{round_prover::SumcheckRoundOutput, round_verifier::SumcheckVerifierRound}, - types::MAX_PARTIAL_RELATION_LENGTH, verifier::DeciderVerifier, }, prelude::{GateSeparatorPolynomial, TranscriptFieldType}, @@ -11,16 +10,22 @@ use crate::{ verifier::HonkVerifyResult, Utils, CONST_PROOF_SIZE_LOG_N, }; +use ark_ff::One; +use ark_ff::PrimeField; use co_builder::prelude::HonkCurve; // Keep in mind, the UltraHonk protocol (UltraFlavor) does not per default have ZK -impl, H: TranscriptHasher> - DeciderVerifier +impl< + P: HonkCurve, + H: TranscriptHasher, + const SIZE: usize, + > DeciderVerifier { pub(crate) fn sumcheck_verify( &mut self, transcript: &mut Transcript, circuit_size: u32, + has_zk: bool, ) -> HonkVerifyResult> { tracing::trace!("Sumcheck verify"); @@ -37,7 +42,17 @@ impl, H: TranscriptHasher return Err(eyre::eyre!("Number of variables in multivariate is 0")); } - let mut sum_check_round = SumcheckVerifierRound::

::default(); + let mut sum_check_round = SumcheckVerifierRound::::default(); + let mut libra_challenge = P::ScalarField::one(); + if has_zk { + // If running zero-knowledge sumcheck the target total sum is corrected by the claimed sum of libra masking + // multivariate over the hypercube + + let libra_total_sum = + transcript.receive_fr_from_prover::

("Libra:Sum".to_string())?; + libra_challenge = transcript.get_challenge::

("Libra:Challenge".to_string()); + sum_check_round.target_total_sum += libra_total_sum * libra_challenge; + } let mut multivariate_challenge = Vec::with_capacity(multivariate_d as usize); @@ -45,10 +60,8 @@ impl, H: TranscriptHasher tracing::trace!("Sumcheck verify round {}", round_idx); let round_univariate_label = format!("Sumcheck:univariate_{}", round_idx); - let evaluations = transcript - .receive_fr_array_from_verifier::( - round_univariate_label, - )?; + let evaluations = + transcript.receive_fr_array_from_verifier::(round_univariate_label)?; let round_univariate = SumcheckRoundOutput { evaluations }; let round_challenge = @@ -85,19 +98,49 @@ impl, H: TranscriptHasher // Evaluate the Honk relation at the point (u_0, ..., u_{d-1}) using claimed evaluations of prover polynomials. - let full_honk_purported_value = - SumcheckVerifierRound::

::compute_full_relation_purported_value( + let mut full_honk_purported_value = + SumcheckVerifierRound::::compute_full_relation_purported_value( &self.memory.claimed_evaluations, &self.memory.relation_parameters, gate_separators, ); + let mut libra_evaluation = P::ScalarField::one(); + // For ZK Flavors: the evaluation of the Row Disabling Polynomial at the sumcheck challenge + if has_zk { + libra_evaluation = + transcript.receive_fr_from_prover::

("Libra:claimed_evaluation".to_string())?; + // No recursive flavor, otherwise we need to make some modifications to the following + + let correcting_factor = evaluate_at_challenge::( + &multivariate_challenge, + multivariate_d as usize, + ); + + full_honk_purported_value = + full_honk_purported_value * correcting_factor + libra_evaluation * libra_challenge; + } + let checked = full_honk_purported_value == sum_check_round.target_total_sum; verified = verified && checked; Ok(SumcheckVerifierOutput { multivariate_challenge, verified, + claimed_libra_evaluation: if has_zk { Some(libra_evaluation) } else { None }, }) } } + +fn evaluate_at_challenge( + multivariate_challenge: &[F], + log_circuit_size: usize, +) -> F { + let mut evaluation_at_multivariate_challenge = F::one(); + + for &challenge in &multivariate_challenge[2..log_circuit_size] { + evaluation_at_multivariate_challenge *= challenge; + } + + F::one() - evaluation_at_multivariate_challenge +} diff --git a/co-noir/ultrahonk/src/decider/sumcheck/zk_data.rs b/co-noir/ultrahonk/src/decider/sumcheck/zk_data.rs new file mode 100644 index 000000000..3e88fb168 --- /dev/null +++ b/co-noir/ultrahonk/src/decider/sumcheck/zk_data.rs @@ -0,0 +1,222 @@ +use crate::prelude::Transcript; +use crate::prelude::TranscriptHasher; +use crate::prelude::Univariate; +use crate::transcript::TranscriptFieldType; +use crate::Utils; +use ark_ec::pairing::Pairing; +use ark_ff::One; +use ark_ff::UniformRand; +use ark_ff::Zero; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use co_builder::prelude::HonkCurve; +use co_builder::prelude::Polynomial; +use co_builder::prelude::ProverCrs; + +const SUBGROUP_SIZE: usize = 256; +pub const LIBRA_UNIVARIATES_LENGTH: usize = 9; + +pub(crate) struct ZKSumcheckData { + pub(crate) constant_term: P::ScalarField, + pub(crate) interpolation_domain: [P::ScalarField; SUBGROUP_SIZE], + pub(crate) libra_concatenated_lagrange_form: Polynomial, + pub(crate) libra_concatenated_monomial_form: Polynomial, + pub(crate) libra_univariates: Vec>, + pub(crate) log_circuit_size: usize, + pub(crate) libra_scaling_factor: P::ScalarField, + pub(crate) libra_challenge: P::ScalarField, + pub(crate) libra_total_sum: P::ScalarField, + pub(crate) libra_running_sum: P::ScalarField, + pub(crate) libra_evaluations: Vec, + pub(crate) _univariate_length: usize, +} + +impl> ZKSumcheckData

{ + pub(crate) fn new>( + multivariate_d: usize, + transcript: &mut Transcript, + commitment_key: &[P::G1Affine], + ) -> Self { + let mut rng = rand::thread_rng(); + let constant_term = P::ScalarField::rand(&mut rng); + let libra_challenge = P::ScalarField::rand(&mut rng); + let libra_univariates = + Self::generate_libra_univariates(multivariate_d, LIBRA_UNIVARIATES_LENGTH); + let log_circuit_size = multivariate_d; + let univariate_length = LIBRA_UNIVARIATES_LENGTH; + + let mut data = ZKSumcheckData { + constant_term, + interpolation_domain: [P::ScalarField::zero(); SUBGROUP_SIZE], + libra_concatenated_lagrange_form: Polynomial::new_zero(SUBGROUP_SIZE), + libra_concatenated_monomial_form: Polynomial::new_zero(SUBGROUP_SIZE + 2), + libra_univariates, + log_circuit_size, + libra_scaling_factor: P::ScalarField::one(), + libra_challenge, + libra_total_sum: P::ScalarField::zero(), + libra_running_sum: P::ScalarField::zero(), + libra_evaluations: Vec::new(), + _univariate_length: univariate_length, + }; + + data.create_interpolation_domain(); + data.compute_concatenated_libra_polynomial(); + // If proving_key is provided, commit to the concatenated and masked libra polynomial + if !commitment_key.is_empty() { + let libra_commitment = Utils::commit( + &data.libra_concatenated_monomial_form.coefficients, + &ProverCrs::

{ + monomials: commitment_key.to_vec(), + }, + ) + .unwrap(); + transcript.send_point_to_verifier::

( + "Libra:concatenation_commitment".to_string(), + libra_commitment.into(), + ); + } + + // Compute the total sum of the Libra polynomials + data.libra_total_sum = Self::compute_libra_total_sum( + &data.libra_univariates, + &mut data.libra_scaling_factor, + data.constant_term, + ); + // Send the Libra total sum to the transcript + transcript.send_fr_to_verifier::

("Libra:Sum".to_string(), data.libra_total_sum); + data.libra_challenge = transcript.get_challenge::

("Libra:Challenge".to_string()); + data.libra_running_sum = data.libra_total_sum * data.libra_challenge; + data.setup_auxiliary_data(); + + data + } + + fn generate_libra_univariates( + number_of_polynomials: usize, + univariate_length: usize, + ) -> Vec> { + (0..number_of_polynomials) + .map(|_| Polynomial::random(univariate_length)) + .collect() + } + + fn compute_libra_total_sum( + libra_univariates: &[Polynomial], + scaling_factor: &mut P::ScalarField, + constant_term: P::ScalarField, + ) -> P::ScalarField { + let mut total_sum = P::ScalarField::zero(); + let two_inv: P::ScalarField = P::ScalarField::one() / P::ScalarField::from(2); + *scaling_factor *= two_inv; + + for univariate in libra_univariates { + total_sum += univariate.coefficients[0] + univariate.eval_poly(P::ScalarField::one()); + *scaling_factor *= P::ScalarField::from(2); + } + total_sum *= *scaling_factor; + + total_sum + constant_term * P::ScalarField::from(1 << libra_univariates.len()) + } + + fn setup_auxiliary_data(&mut self) { + let two_inv: P::ScalarField = P::ScalarField::one() / P::ScalarField::from(2); + self.libra_scaling_factor *= self.libra_challenge; + for univariate in &mut self.libra_univariates { + *univariate *= self.libra_scaling_factor; + } + self.libra_running_sum += -self.libra_univariates[0].coefficients[0] + - self.libra_univariates[0].eval_poly(P::ScalarField::one()); + self.libra_running_sum *= two_inv; + } + + fn create_interpolation_domain(&mut self) { + self.interpolation_domain[0] = P::ScalarField::one(); + // TACEO TODO remove unwrap + let subgroup_generator = P::get_subgroup_generator(); + for idx in 1..SUBGROUP_SIZE { + self.interpolation_domain[idx] = + self.interpolation_domain[idx - 1] * subgroup_generator; + } + } + + fn compute_concatenated_libra_polynomial(&mut self) { + let mut coeffs_lagrange_subgroup = [P::ScalarField::zero(); SUBGROUP_SIZE]; + coeffs_lagrange_subgroup[0] = self.constant_term; + + for poly_idx in 0..self.log_circuit_size { + for idx in 0..LIBRA_UNIVARIATES_LENGTH { + let idx_to_populate = 1 + poly_idx * LIBRA_UNIVARIATES_LENGTH + idx; + coeffs_lagrange_subgroup[idx_to_populate] = + self.libra_univariates[poly_idx].coefficients[idx]; + } + } + + self.libra_concatenated_lagrange_form = Polynomial:: { + coefficients: coeffs_lagrange_subgroup.to_vec(), + }; + + let masking_scalars = Univariate::::get_random(); + + // if !P::is_bn254() { + // libra_concatenated_monomial_form_unmasked = Polynomial:: { + // coefficients: coeffs_lagrange_subgroup.to_vec(), + // }; + // } else { + // TACEO TODO remove unwrap + let domain = GeneralEvaluationDomain::::new(SUBGROUP_SIZE) + .ok_or(eyre::eyre!("Polynomial Degree too large")) + .unwrap(); + + let coeffs_lagrange_subgroup_ifft = domain.ifft(&coeffs_lagrange_subgroup); + let libra_concatenated_monomial_form_unmasked = Polynomial:: { + coefficients: coeffs_lagrange_subgroup_ifft, + }; + // } + + for idx in 0..SUBGROUP_SIZE { + self.libra_concatenated_monomial_form.coefficients[idx] = + libra_concatenated_monomial_form_unmasked.coefficients[idx]; + } + + for idx in 0..masking_scalars.evaluations.len() { + self.libra_concatenated_monomial_form.coefficients[idx] -= + masking_scalars.evaluations[idx]; + self.libra_concatenated_monomial_form.coefficients[SUBGROUP_SIZE + idx] += + masking_scalars.evaluations[idx]; + } + } + + pub(crate) fn update_zk_sumcheck_data( + &mut self, + round_challenge: P::ScalarField, + round_idx: usize, + ) { + let two_inv: P::ScalarField = P::ScalarField::one() / P::ScalarField::from(2); + + if round_idx < self.log_circuit_size - 1 { + for univariate in &mut self.libra_univariates { + *univariate *= two_inv; + } + + let libra_evaluation = self.libra_univariates[round_idx].eval_poly(round_challenge); + let next_libra_univariate = &self.libra_univariates[round_idx + 1]; + + self.libra_running_sum += -next_libra_univariate.coefficients[0] + - next_libra_univariate.eval_poly(P::ScalarField::one()); + self.libra_running_sum *= two_inv; + + self.libra_running_sum += libra_evaluation; + self.libra_scaling_factor *= two_inv; + + self.libra_evaluations + .push(libra_evaluation / self.libra_scaling_factor); + } else { + let libra_evaluation = self.libra_univariates[round_idx].eval_poly(round_challenge) + / self.libra_scaling_factor; + self.libra_evaluations.push(libra_evaluation); + for univariate in &mut self.libra_univariates { + *univariate *= P::ScalarField::one() / self.libra_challenge; + } + } + } +} diff --git a/co-noir/ultrahonk/src/decider/types.rs b/co-noir/ultrahonk/src/decider/types.rs index 5116f1e9e..a97d53b6a 100644 --- a/co-noir/ultrahonk/src/decider/types.rs +++ b/co-noir/ultrahonk/src/decider/types.rs @@ -18,6 +18,9 @@ pub(crate) struct VerifierMemory { } pub(crate) const MAX_PARTIAL_RELATION_LENGTH: usize = 7; +pub(crate) const BATCHED_RELATION_PARTIAL_LENGTH: usize = MAX_PARTIAL_RELATION_LENGTH + 1; +pub(crate) const BATCHED_RELATION_PARTIAL_LENGTH_ZK: usize = MAX_PARTIAL_RELATION_LENGTH + 2; + pub(crate) type ProverUnivariates = AllEntities>; pub(crate) type PartiallyEvaluatePolys = AllEntities>; pub(crate) type ClaimedEvaluations = AllEntities; diff --git a/co-noir/ultrahonk/src/decider/univariate.rs b/co-noir/ultrahonk/src/decider/univariate.rs index df3b456c5..ac1aa96f5 100644 --- a/co-noir/ultrahonk/src/decider/univariate.rs +++ b/co-noir/ultrahonk/src/decider/univariate.rs @@ -224,6 +224,15 @@ impl Univariate { *result += extended; } } + + pub(crate) fn get_random() -> Self { + let mut rng = rand::thread_rng(); + let mut evaluations = [F::one(); SIZE]; + for eval in evaluations.iter_mut() { + *eval = F::rand(&mut rng); + } + Self { evaluations } + } } impl Default for Univariate { diff --git a/co-noir/ultrahonk/src/decider/verifier.rs b/co-noir/ultrahonk/src/decider/verifier.rs index 2bd706d92..bc80a190a 100644 --- a/co-noir/ultrahonk/src/decider/verifier.rs +++ b/co-noir/ultrahonk/src/decider/verifier.rs @@ -6,7 +6,7 @@ use crate::{ prelude::TranscriptFieldType, transcript::{Transcript, TranscriptHasher}, verifier::HonkVerifyResult, - Utils, + Utils, NUM_LIBRA_COMMITMENTS, }; use ark_ec::AffineRepr; use ark_ff::One; @@ -16,14 +16,18 @@ use std::marker::PhantomData; pub(crate) struct DeciderVerifier< P: HonkCurve, H: TranscriptHasher, + const SIZE: usize, > { pub(super) memory: VerifierMemory

, phantom_data: PhantomData

, phantom_hasher: PhantomData, } -impl, H: TranscriptHasher> - DeciderVerifier +impl< + P: HonkCurve, + H: TranscriptHasher, + const SIZE: usize, + > DeciderVerifier { pub(crate) fn new(memory: VerifierMemory

) -> Self { Self { @@ -90,19 +94,44 @@ impl, H: TranscriptHasher circuit_size: u32, crs: &P::G2Affine, mut transcript: Transcript, + has_zk: bool, ) -> HonkVerifyResult { tracing::trace!("Decider verification"); + let mut libra_commitments = Vec::with_capacity(NUM_LIBRA_COMMITMENTS); + if has_zk { + libra_commitments + .push(transcript.receive_point_from_prover::

( + "Libra:concatenation_commitment".to_string(), + )?); + } - let sumcheck_output = self.sumcheck_verify(&mut transcript, circuit_size)?; + let sumcheck_output = self.sumcheck_verify(&mut transcript, circuit_size, has_zk)?; if !sumcheck_output.verified { tracing::trace!("Sumcheck failed"); return Ok(false); } + if has_zk { + libra_commitments.push( + transcript + .receive_point_from_prover::

("Libra:big_sum_commitment".to_string())?, + ); + libra_commitments.push( + transcript + .receive_point_from_prover::

("Libra:quotient_commitment".to_string())?, + ); + } + let libra_commitments = if has_zk { + Some(libra_commitments) + } else { + None + }; let mut opening_claim = self.compute_batch_opening_claim( circuit_size, sumcheck_output.multivariate_challenge, &mut transcript, + libra_commitments, + sumcheck_output.claimed_libra_evaluation, )?; let pairing_points = Self::reduce_verify_shplemini(&mut opening_claim, transcript)?; diff --git a/co-noir/ultrahonk/src/lib.rs b/co-noir/ultrahonk/src/lib.rs index 37b44cf15..727bd2528 100644 --- a/co-noir/ultrahonk/src/lib.rs +++ b/co-noir/ultrahonk/src/lib.rs @@ -24,6 +24,9 @@ pub const NUM_ALPHAS: usize = decider::relations::NUM_SUBRELATIONS - 1; /// The log of the max circuit size assumed in order to achieve constant sized Honk proofs /// AZTEC TODO(): Remove the need for const sized proofs pub const CONST_PROOF_SIZE_LOG_N: usize = 28; +// For ZK Flavors: the number of the commitments required by Libra and SmallSubgroupIPA. +pub const NUM_LIBRA_COMMITMENTS: usize = 3; +pub const NUM_LIBRA_EVALUATIONS: usize = 4; pub struct Utils {} diff --git a/co-noir/ultrahonk/src/prover.rs b/co-noir/ultrahonk/src/prover.rs index 3df7fdda7..7fc26c2b4 100644 --- a/co-noir/ultrahonk/src/prover.rs +++ b/co-noir/ultrahonk/src/prover.rs @@ -1,5 +1,10 @@ use crate::{ - decider::{prover::Decider, types::ProverMemory}, + decider::{ + prover::Decider, + types::{ + ProverMemory, BATCHED_RELATION_PARTIAL_LENGTH, BATCHED_RELATION_PARTIAL_LENGTH_ZK, + }, + }, oink::prover::Oink, transcript::{Transcript, TranscriptFieldType, TranscriptHasher}, types::HonkProof, @@ -33,7 +38,10 @@ impl, H: TranscriptHasher gate_challenges } - pub fn prove(proving_key: ProvingKey

) -> HonkProofResult> { + pub fn prove( + proving_key: ProvingKey

, + has_zk: bool, + ) -> HonkProofResult> { tracing::trace!("UltraHonk prove"); let mut transcript = Transcript::::new(); @@ -49,7 +57,12 @@ impl, H: TranscriptHasher memory.relation_parameters.gate_challenges = Self::generate_gate_challenges(&mut transcript); - let decider = Decider::new(memory); - decider.prove(cicruit_size, &crs, transcript) + if !has_zk { + let decider = Decider::<_, _, BATCHED_RELATION_PARTIAL_LENGTH>::new(memory); + decider.prove(cicruit_size, &crs, transcript, has_zk) + } else { + let decider = Decider::<_, _, BATCHED_RELATION_PARTIAL_LENGTH_ZK>::new(memory); + decider.prove(cicruit_size, &crs, transcript, has_zk) + } } } diff --git a/co-noir/ultrahonk/src/sponge_hasher.rs b/co-noir/ultrahonk/src/sponge_hasher.rs index ff5579a68..99a1e080c 100644 --- a/co-noir/ultrahonk/src/sponge_hasher.rs +++ b/co-noir/ultrahonk/src/sponge_hasher.rs @@ -142,7 +142,7 @@ where res } - pub(crate) fn hash_fixed_lenth(input: &[F]) -> [F; OUT_LEN] { + pub(crate) fn hash_fixed_length(input: &[F]) -> [F; OUT_LEN] { Self::hash_internal::(input) } diff --git a/co-noir/ultrahonk/src/transcript.rs b/co-noir/ultrahonk/src/transcript.rs index 0c9310cf8..d9152b68a 100644 --- a/co-noir/ultrahonk/src/transcript.rs +++ b/co-noir/ultrahonk/src/transcript.rs @@ -21,7 +21,7 @@ impl + Default TranscriptHasher for FieldSponge { fn hash(buffer: Vec) -> F { - Self::hash_fixed_lenth::<1>(&buffer)[0] + Self::hash_fixed_length::<1>(&buffer)[0] } } @@ -121,6 +121,10 @@ where } fn send_to_verifier(&mut self, label: String, elements: &[F]) { + // println!("label: {label}"); + // elements + // .iter() + // .for_each(|y| println!("{:?}", y.to_string())); self.proof_data.extend(elements); self.consume_prover_elements(label, elements); } @@ -157,6 +161,7 @@ where } fn receive_n_from_prover(&mut self, label: String, n: usize) -> HonkProofResult> { + // println!("label: {label}"); if self.num_frs_read + n > self.proof_data.len() { return Err(HonkProofError::ProofTooSmall); } diff --git a/co-noir/ultrahonk/src/types.rs b/co-noir/ultrahonk/src/types.rs index dff74ebda..27b36c85f 100644 --- a/co-noir/ultrahonk/src/types.rs +++ b/co-noir/ultrahonk/src/types.rs @@ -6,7 +6,7 @@ use co_builder::{ #[derive(Clone, Debug, PartialEq, Eq)] pub struct HonkProof { - proof: Vec, + pub proof: Vec, } impl HonkProof { diff --git a/co-noir/ultrahonk/src/verifier.rs b/co-noir/ultrahonk/src/verifier.rs index fac63a501..1dac0e2fa 100644 --- a/co-noir/ultrahonk/src/verifier.rs +++ b/co-noir/ultrahonk/src/verifier.rs @@ -1,5 +1,10 @@ use crate::{ - decider::{types::VerifierMemory, verifier::DeciderVerifier}, + decider::{ + types::{ + VerifierMemory, BATCHED_RELATION_PARTIAL_LENGTH, BATCHED_RELATION_PARTIAL_LENGTH_ZK, + }, + verifier::DeciderVerifier, + }, oink::verifier::OinkVerifier, prelude::TranscriptFieldType, prover::UltraHonk, @@ -14,6 +19,7 @@ impl, H: TranscriptHasher pub fn verify( honk_proof: HonkProof, verifying_key: VerifyingKey

, + has_zk: bool, ) -> HonkVerifyResult { tracing::trace!("UltraHonk verification"); @@ -28,8 +34,14 @@ impl, H: TranscriptHasher let mut memory = VerifierMemory::from_memory_and_key(oink_result, verifying_key); memory.relation_parameters.gate_challenges = Self::generate_gate_challenges(&mut transcript); - - let decider_verifier = DeciderVerifier::new(memory); - decider_verifier.verify(cicruit_size, &crs, transcript) + if !has_zk { + let decider_verifier = + DeciderVerifier::<_, _, BATCHED_RELATION_PARTIAL_LENGTH>::new(memory); + decider_verifier.verify(cicruit_size, &crs, transcript, has_zk) + } else { + let decider_verifier = + DeciderVerifier::<_, _, BATCHED_RELATION_PARTIAL_LENGTH_ZK>::new(memory); + decider_verifier.verify(cicruit_size, &crs, transcript, has_zk) + } } } diff --git a/co-noir/ultrahonk/tests/plain.rs b/co-noir/ultrahonk/tests/plain.rs index 0092b1af4..8ac543544 100644 --- a/co-noir/ultrahonk/tests/plain.rs +++ b/co-noir/ultrahonk/tests/plain.rs @@ -14,11 +14,13 @@ fn plain_test>( proof_file: &str, circuit_file: &str, witness_file: &str, + has_zk: bool, ) { const CRS_PATH_G1: &str = "../co-builder/src/crs/bn254_g1.dat"; const CRS_PATH_G2: &str = "../co-builder/src/crs/bn254_g2.dat"; let constraint_system = Utils::get_constraint_system_from_file(circuit_file, true).unwrap(); + let witness = Utils::get_witness_from_file(witness_file).unwrap(); let mut driver = PlainAcvmSolver::new(); let builder = UltraCircuitBuilder::::create_circuit( @@ -39,16 +41,17 @@ fn plain_test>( .create_keys(prover_crs.into(), verififer_crs, &mut driver) .unwrap(); - let proof = UltraHonk::<_, H>::prove(proving_key).unwrap(); - - let proof_u8 = proof.to_buffer(); - let read_proof_u8 = std::fs::read(proof_file).unwrap(); - assert_eq!(proof_u8, read_proof_u8); + let proof = UltraHonk::<_, H>::prove(proving_key, has_zk).unwrap(); + if !has_zk { + let proof_u8 = proof.to_buffer(); + let read_proof_u8 = std::fs::read(proof_file).unwrap(); + assert_eq!(proof_u8, read_proof_u8); - let read_proof = HonkProof::from_buffer(&read_proof_u8).unwrap(); - assert_eq!(proof, read_proof); + let read_proof = HonkProof::from_buffer(&read_proof_u8).unwrap(); + assert_eq!(proof, read_proof); + } - let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key).unwrap(); + let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key, has_zk).unwrap(); assert!(is_valid); } @@ -58,7 +61,8 @@ fn poseidon_test_poseidon2sponge() { const CIRCUIT_FILE: &str = "../../test_vectors/noir/poseidon/kat/poseidon.json"; const WITNESS_FILE: &str = "../../test_vectors/noir/poseidon/kat/poseidon.gz"; - plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE); + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, false); + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, true); } #[test] @@ -67,7 +71,8 @@ fn poseidon_test_keccak256() { const CIRCUIT_FILE: &str = "../../test_vectors/noir/poseidon/kat/poseidon.json"; const WITNESS_FILE: &str = "../../test_vectors/noir/poseidon/kat/poseidon.gz"; - plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE); + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, false); + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, true); } #[test] @@ -75,7 +80,9 @@ fn add3_test_keccak256() { const PROOF_FILE: &str = "../../test_vectors/noir/add3u64/kat/add3u64_proof_with_kec"; const CIRCUIT_FILE: &str = "../../test_vectors/noir/add3u64/kat/add3u64.json"; const WITNESS_FILE: &str = "../../test_vectors/noir/add3u64/kat/add3u64.gz"; - plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE); + + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, false); + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, true); } #[test] @@ -83,5 +90,7 @@ fn add3_test_poseidon2sponge() { const PROOF_FILE: &str = "../../test_vectors/noir/add3u64/kat/add3u64_proof_with_pos"; const CIRCUIT_FILE: &str = "../../test_vectors/noir/add3u64/kat/add3u64.json"; const WITNESS_FILE: &str = "../../test_vectors/noir/add3u64/kat/add3u64.gz"; - plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE); + + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, false); + plain_test::(PROOF_FILE, CIRCUIT_FILE, WITNESS_FILE, true); } diff --git a/tests/tests/noir/proof_tests/plain.rs b/tests/tests/noir/proof_tests/plain.rs index 69bcf2d6a..896ac8c98 100644 --- a/tests/tests/noir/proof_tests/plain.rs +++ b/tests/tests/noir/proof_tests/plain.rs @@ -42,6 +42,7 @@ fn proof_test>(name: &str) { let constraint_system = Utils::get_constraint_system_from_file(&circuit_file, true) .expect("failed to parse program artifact"); let witness = Utils::get_witness_from_file(&witness_file).expect("failed to parse witness"); + let has_zk = false; let mut driver = PlainAcvmSolver::new(); let builder = PlainCoBuilder::::create_circuit( @@ -63,7 +64,7 @@ fn proof_test>(name: &str) { let proof = CoUltraHonk::::prove(proving_key, &prover_crs).unwrap(); - let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key).unwrap(); + let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key, has_zk).unwrap(); assert!(is_valid); } @@ -74,6 +75,7 @@ fn witness_and_proof_test>(name: &str) let program_artifact = Utils::get_program_artifact_from_file(&circuit_file) .expect("failed to parse program artifact"); let constraint_system = Utils::get_constraint_system_from_artifact(&program_artifact, true); + let has_zk = false; let solver = PlainCoSolver::init_plain_driver(program_artifact, prover_toml).unwrap(); let witness = solver.solve().unwrap().0; @@ -99,7 +101,7 @@ fn witness_and_proof_test>(name: &str) let proof = CoUltraHonk::::prove(proving_key, &prover_crs).unwrap(); - let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key).unwrap(); + let is_valid = UltraHonk::<_, H>::verify(proof, verifying_key, has_zk).unwrap(); assert!(is_valid); } diff --git a/tests/tests/noir/proof_tests/rep3.rs b/tests/tests/noir/proof_tests/rep3.rs index d91b441dd..9a2c4f863 100644 --- a/tests/tests/noir/proof_tests/rep3.rs +++ b/tests/tests/noir/proof_tests/rep3.rs @@ -44,6 +44,7 @@ fn convert_witness_rep3( fn proof_test>(name: &str) { let circuit_file = format!("../test_vectors/noir/{}/kat/{}.json", name, name); let witness_file = format!("../test_vectors/noir/{}/kat/{}.gz", name, name); + let has_zk = false; let program_artifact = Utils::get_program_artifact_from_file(&circuit_file) .expect("failed to parse program artifact"); @@ -87,13 +88,14 @@ fn proof_test>(name: &str) { let verifier_crs = CrsParser::::get_crs_g2(CRS_PATH_G2).unwrap(); let vk = co_noir::generate_vk(&constraint_system, prover_crs, verifier_crs, false).unwrap(); - let is_valid = UltraHonk::<_, H>::verify(proof, vk).unwrap(); + let is_valid = UltraHonk::<_, H>::verify(proof, vk, has_zk).unwrap(); assert!(is_valid); } fn witness_and_proof_test>(name: &str) { let circuit_file = format!("../test_vectors/noir/{}/kat/{}.json", name, name); let prover_toml = format!("../test_vectors/noir/{}/Prover.toml", name); + let has_zk = false; let program_artifact = Utils::get_program_artifact_from_file(&circuit_file) .expect("failed to parse program artifact"); @@ -138,7 +140,7 @@ fn witness_and_proof_test>(name: &str) let verifier_crs = CrsParser::::get_crs_g2(CRS_PATH_G2).unwrap(); let vk = co_noir::generate_vk(&constraint_system, prover_crs, verifier_crs, false).unwrap(); - let is_valid = UltraHonk::<_, H>::verify(proof, vk).unwrap(); + let is_valid = UltraHonk::<_, H>::verify(proof, vk, has_zk).unwrap(); assert!(is_valid); } diff --git a/tests/tests/noir/proof_tests/shamir.rs b/tests/tests/noir/proof_tests/shamir.rs index 4ddcb6de2..68c1ec18a 100644 --- a/tests/tests/noir/proof_tests/shamir.rs +++ b/tests/tests/noir/proof_tests/shamir.rs @@ -16,6 +16,7 @@ fn proof_test>( ) { let circuit_file = format!("../test_vectors/noir/{}/kat/{}.json", name, name); let witness_file = format!("../test_vectors/noir/{}/kat/{}.gz", name, name); + let has_zk = false; let program_artifact = Utils::get_program_artifact_from_file(&circuit_file) .expect("failed to parse program artifact"); @@ -65,7 +66,7 @@ fn proof_test>( let verifier_crs = CrsParser::::get_crs_g2(CRS_PATH_G2).unwrap(); let vk = co_noir::generate_vk(&constraint_system, prover_crs, verifier_crs, false).unwrap(); - let is_valid = UltraHonk::<_, H>::verify(proof, vk).unwrap(); + let is_valid = UltraHonk::<_, H>::verify(proof, vk, has_zk).unwrap(); assert!(is_valid); }