diff --git a/benches/affine.rs b/benches/affine.rs index 0623bf8f3..108e523b6 100644 --- a/benches/affine.rs +++ b/benches/affine.rs @@ -33,10 +33,10 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let input = VarTensor::new_advice(cs, K, len, vec![len], true); - let kernel = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); - let bias = VarTensor::new_advice(cs, K, len, vec![len], true); - let output = VarTensor::new_advice(cs, K, len, vec![len], true); + let input = VarTensor::new_advice(cs, K, len, vec![len], true, 512); + let kernel = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 512); + let bias = VarTensor::new_advice(cs, K, len, vec![len], true, 512); + let output = VarTensor::new_advice(cs, K, len, vec![len], true, 512); // tells the config layer to add an affine op to a circuit gate let affine_node = FusedNode { op: FusedOp::Affine, diff --git a/benches/cnvrl.rs b/benches/cnvrl.rs index 3c8e2db07..04d73b023 100644 --- a/benches/cnvrl.rs +++ b/benches/cnvrl.rs @@ -53,6 +53,7 @@ where IN_CHANNELS * IMAGE_HEIGHT * IMAGE_WIDTH, vec![IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH], true, + 512, ); let kernel = VarTensor::new_advice( cs, @@ -60,15 +61,17 @@ where OUT_CHANNELS * IN_CHANNELS * KERNEL_HEIGHT * KERNEL_WIDTH, vec![OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH], true, + 512 ); - let bias = VarTensor::new_advice(cs, K, OUT_CHANNELS, vec![OUT_CHANNELS], true); + let bias = VarTensor::new_advice(cs, K, OUT_CHANNELS, vec![OUT_CHANNELS], true, 512); let output = VarTensor::new_advice( cs, K, OUT_CHANNELS * output_height * output_width, vec![OUT_CHANNELS, output_height, output_width], true, + 512, ); // tells the config layer to add a conv op to a circuit gate diff --git a/benches/range.rs b/benches/range.rs index 123c6a1b5..592e1f3a5 100644 --- a/benches/range.rs +++ b/benches/range.rs @@ -33,7 +33,7 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, K, len, vec![len], true)) + .map(|_| VarTensor::new_advice(cs, K, len, vec![len], true, 512)) .collect_vec(); RangeCheckConfig::configure(cs, &advices[0], &advices[1], RANGE) diff --git a/benches/relu.rs b/benches/relu.rs index 26a164744..432ea20f9 100644 --- a/benches/relu.rs +++ b/benches/relu.rs @@ -34,7 +34,7 @@ impl + Clone> Circuit fn configure(cs: &mut ConstraintSystem) -> Self::Config { unsafe { let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, K, LEN, vec![LEN], true)) + .map(|_| VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512)) .collect::>(); Self::Config::configure(cs, &advices[0], &advices[1], Some(&[BITS, 128])) diff --git a/examples/conv2d_mnist/main.rs b/examples/conv2d_mnist/main.rs index fc3739d3c..1fb8056f3 100644 --- a/examples/conv2d_mnist/main.rs +++ b/examples/conv2d_mnist/main.rs @@ -149,6 +149,7 @@ where max(IN_CHANNELS * IMAGE_HEIGHT * IMAGE_WIDTH, LEN), vec![IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH], true, + 512, ); let kernel = VarTensor::new_advice( cs, @@ -159,16 +160,18 @@ where ), vec![OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH], true, + 512, ); let bias = - VarTensor::new_advice(cs, K, max(OUT_CHANNELS, CLASSES), vec![OUT_CHANNELS], true); + VarTensor::new_advice(cs, K, max(OUT_CHANNELS, CLASSES), vec![OUT_CHANNELS], true, 512); let output = VarTensor::new_advice( cs, K, max(OUT_CHANNELS * output_height * output_width, LEN), vec![OUT_CHANNELS, output_height, output_width], true, + 512, ); // tells the config layer to add a conv op to a circuit gate diff --git a/examples/mlp_4d.rs b/examples/mlp_4d.rs index dca06dc17..7fde20598 100644 --- a/examples/mlp_4d.rs +++ b/examples/mlp_4d.rs @@ -50,10 +50,10 @@ impl Circuit // Here we wire together the layers by using the output advice in each layer as input advice in the next (not with copying / equality). // This can be automated but we will sometimes want skip connections, etc. so we need the flexibility. fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); - let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true); - let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512); + let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); // tells the config layer to add an affine op to the circuit gate let affine_node = FusedNode { op: FusedOp::Affine, diff --git a/src/circuit/eltwise.rs b/src/circuit/eltwise.rs index 5f566c7de..c31fcede4 100644 --- a/src/circuit/eltwise.rs +++ b/src/circuit/eltwise.rs @@ -336,7 +336,7 @@ mod tests { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true)) + .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true, 512)) .collect::>(); Self::Config::configure(cs, &advices[0], &advices[1], Some(&[2, 1])) diff --git a/src/circuit/fused.rs b/src/circuit/fused.rs index b8238f8b9..d66c17381 100644 --- a/src/circuit/fused.rs +++ b/src/circuit/fused.rs @@ -335,10 +335,10 @@ mod tests { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); - let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true); - let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512); + let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); // tells the config layer to add an affine op to a circuit gate let affine_node = FusedNode { op: FusedOp::Affine, diff --git a/src/circuit/range.rs b/src/circuit/range.rs index 5ed41b183..225d1b356 100644 --- a/src/circuit/range.rs +++ b/src/circuit/range.rs @@ -156,7 +156,7 @@ mod tests { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true)) + .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true, 512)) .collect_vec(); let input = &advices[0]; let expected = &advices[1]; diff --git a/src/commands.rs b/src/commands.rs index c221f5e7d..803dda97b 100644 --- a/src/commands.rs +++ b/src/commands.rs @@ -30,6 +30,9 @@ pub struct Cli { /// Flags whether params are public #[arg(long, default_value = "false")] pub public_params: bool, + /// Flags to set maximum rotations + #[arg(short = 'M', long, default_value = "512")] + pub max_rotations: usize, } #[derive(ValueEnum, Copy, Clone, Debug, PartialEq, Eq)] diff --git a/src/graph/vars.rs b/src/graph/vars.rs index 2c39dd9e6..35d0dfb2f 100644 --- a/src/graph/vars.rs +++ b/src/graph/vars.rs @@ -122,6 +122,8 @@ impl ModelVars { fixed_dims: (usize, usize), instance_dims: (usize, Vec>), ) -> Self { + let tensor_max = Cli::parse().max_rotations; + let advices = (0..advice_dims.0) .map(|_| { VarTensor::new_advice( @@ -130,12 +132,20 @@ impl ModelVars { advice_dims.1, vec![advice_dims.1], true, + tensor_max, ) }) .collect_vec(); let fixed = (0..fixed_dims.0) .map(|_| { - VarTensor::new_fixed(cs, logrows as usize, fixed_dims.1, vec![fixed_dims.1], true) + VarTensor::new_fixed( + cs, + logrows as usize, + fixed_dims.1, + vec![fixed_dims.1], + true, + tensor_max, + ) }) .collect_vec(); let instances = (0..instance_dims.0) diff --git a/src/tensor/var.rs b/src/tensor/var.rs index 4dd63e385..88db9ac21 100644 --- a/src/tensor/var.rs +++ b/src/tensor/var.rs @@ -33,9 +33,10 @@ impl VarTensor { capacity: usize, dims: Vec, equality: bool, + v1: usize, ) -> Self { let base = 2u32; - let max_rows = min(512, base.pow(k as u32) as usize - cs.blinding_factors() - 1); + let max_rows = min(v1, base.pow(k as u32) as usize - cs.blinding_factors() - 1); let modulo = (capacity / max_rows) + 1; let mut advices = vec![]; for _ in 0..modulo { @@ -60,9 +61,10 @@ impl VarTensor { capacity: usize, dims: Vec, equality: bool, + v1: usize, ) -> Self { let base = 2u32; - let max_rows = min(512, base.pow(k as u32) as usize - cs.blinding_factors() - 1); + let max_rows = min(v1, base.pow(k as u32) as usize - cs.blinding_factors() - 1); let modulo = (capacity / max_rows) + 1; let mut fixed = vec![]; for _ in 0..modulo {