diff --git a/.noir-sync-commit b/.noir-sync-commit index 90fcf3f8242..f099a88382c 100644 --- a/.noir-sync-commit +++ b/.noir-sync-commit @@ -1 +1 @@ -89846cfbc4961c5258d91b5973f027be80885a20 +e4eb5f539f377fd3c2e1a874707ffce62a5bc10a diff --git a/noir/noir-repo/.github/workflows/gates_report.yml b/noir/noir-repo/.github/workflows/gates_report.yml index 3d4bef1940e..0cc94a1a04d 100644 --- a/noir/noir-repo/.github/workflows/gates_report.yml +++ b/noir/noir-repo/.github/workflows/gates_report.yml @@ -1,88 +1,94 @@ -# name: Report gates diff - -# on: -# push: -# branches: -# - master -# pull_request: - -# jobs: -# build-nargo: -# runs-on: ubuntu-latest -# strategy: -# matrix: -# target: [x86_64-unknown-linux-gnu] - -# steps: -# - name: Checkout Noir repo -# uses: actions/checkout@v4 - -# - name: Setup toolchain -# uses: dtolnay/rust-toolchain@1.74.1 - -# - uses: Swatinem/rust-cache@v2 -# with: -# key: ${{ matrix.target }} -# cache-on-failure: true -# save-if: ${{ github.event_name != 'merge_group' }} - -# - name: Build Nargo -# run: cargo build --package nargo_cli --release - -# - name: Package artifacts -# run: | -# mkdir dist -# cp ./target/release/nargo ./dist/nargo -# 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-x86_64-unknown-linux-gnu.tar.gz - -# - name: Upload artifact -# uses: actions/upload-artifact@v4 -# with: -# name: nargo -# path: ./dist/* -# retention-days: 3 - - -# compare_gas_reports: -# needs: [build-nargo] -# runs-on: ubuntu-latest -# permissions: -# pull-requests: write - -# steps: -# - uses: actions/checkout@v4 - -# - name: Download nargo binary -# uses: actions/download-artifact@v4 -# with: -# name: nargo -# path: ./nargo - -# - name: Set nargo on PATH -# run: | -# nargo_binary="${{ github.workspace }}/nargo/nargo" -# chmod +x $nargo_binary -# echo "$(dirname $nargo_binary)" >> $GITHUB_PATH -# export PATH="$PATH:$(dirname $nargo_binary)" -# nargo -V - -# - name: Generate gates report -# working-directory: ./test_programs -# run: | -# ./gates_report.sh -# mv gates_report.json ../gates_report.json +name: Report gates diff + +on: + push: + branches: + - master + pull_request: + +jobs: + build-nargo: + runs-on: ubuntu-latest + strategy: + matrix: + target: [x86_64-unknown-linux-gnu] + + steps: + - name: Checkout Noir repo + uses: actions/checkout@v4 + + - name: Setup toolchain + uses: dtolnay/rust-toolchain@1.74.1 + + - uses: Swatinem/rust-cache@v2 + with: + key: ${{ matrix.target }} + cache-on-failure: true + save-if: ${{ github.event_name != 'merge_group' }} + + - name: Build Nargo + run: cargo build --package nargo_cli --release + + - name: Package artifacts + run: | + mkdir dist + cp ./target/release/nargo ./dist/nargo + 7z a -ttar -so -an ./dist/* | 7z a -si ./nargo-x86_64-unknown-linux-gnu.tar.gz + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: nargo + path: ./dist/* + retention-days: 3 + + + compare_gates_reports: + needs: [build-nargo] + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + - uses: actions/checkout@v4 + + - name: Install `bb` + run: | + ./scripts/install_bb.sh + echo "$HOME/.bb/" >> $GITHUB_PATH + + - name: Download nargo binary + uses: actions/download-artifact@v4 + with: + name: nargo + path: ./nargo + + - name: Set nargo on PATH + run: | + nargo_binary="${{ github.workspace }}/nargo/nargo" + chmod +x $nargo_binary + echo "$(dirname $nargo_binary)" >> $GITHUB_PATH + export PATH="$PATH:$(dirname $nargo_binary)" + nargo -V + + - name: Generate gates report + working-directory: ./test_programs + run: | + ./rebuild.sh + ./gates_report.sh + mv gates_report.json ../gates_report.json -# - name: Compare gates reports -# id: gates_diff -# uses: vezenovm/noir-gates-diff@acf12797860f237117e15c0d6e08d64253af52b6 -# with: -# report: gates_report.json -# summaryQuantile: 0.9 # only display the 10% most significant circuit size diffs in the summary (defaults to 20%) - -# - name: Add gates diff to sticky comment -# if: github.event_name == 'pull_request' || github.event_name == 'pull_request_target' -# uses: marocchino/sticky-pull-request-comment@v2 -# with: -# # delete the comment in case changes no longer impact circuit sizes -# delete: ${{ !steps.gates_diff.outputs.markdown }} -# message: ${{ steps.gates_diff.outputs.markdown }} + - name: Compare gates reports + id: gates_diff + uses: vezenovm/noir-gates-diff@acf12797860f237117e15c0d6e08d64253af52b6 + with: + report: gates_report.json + summaryQuantile: 0.9 # only display the 10% most significant circuit size diffs in the summary (defaults to 20%) + + - name: Add gates diff to sticky comment + if: github.event_name == 'pull_request' || github.event_name == 'pull_request_target' + uses: marocchino/sticky-pull-request-comment@v2 + with: + # delete the comment in case changes no longer impact circuit sizes + delete: ${{ !steps.gates_diff.outputs.markdown }} + message: ${{ steps.gates_diff.outputs.markdown }} diff --git a/noir/noir-repo/.github/workflows/test-js-packages.yml b/noir/noir-repo/.github/workflows/test-js-packages.yml index c8a8be998e6..db162e21269 100644 --- a/noir/noir-repo/.github/workflows/test-js-packages.yml +++ b/noir/noir-repo/.github/workflows/test-js-packages.yml @@ -399,10 +399,10 @@ jobs: - name: Checkout uses: actions/checkout@v4 - - name: Download bb binary + - name: Install `bb` run: | - # Adds `bb` to PATH ./scripts/install_bb.sh + echo "$HOME/.bb/" >> $GITHUB_PATH - name: Download nargo binary uses: actions/download-artifact@v4 @@ -453,7 +453,7 @@ jobs: test-integration-browser: name: Integration Tests (Browser) runs-on: ubuntu-latest - needs: [build-acvm-js, build-noir-wasm, build-nargo, build-noirc-abi] + needs: [build-acvm-js, build-noir-wasm, build-noirc-abi] timeout-minutes: 30 steps: @@ -495,6 +495,46 @@ jobs: run: | yarn test:browser + test-examples: + name: Example scripts + runs-on: ubuntu-latest + needs: [build-nargo] + timeout-minutes: 30 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1.2.0 + + - name: Install `bb` + run: | + ./scripts/install_bb.sh + echo "$HOME/.bb/" >> $GITHUB_PATH + + - name: Download nargo binary + uses: actions/download-artifact@v4 + with: + name: nargo + path: ./nargo + + - name: Set nargo on PATH + run: | + nargo_binary="${{ github.workspace }}/nargo/nargo" + chmod +x $nargo_binary + echo "$(dirname $nargo_binary)" >> $GITHUB_PATH + export PATH="$PATH:$(dirname $nargo_binary)" + nargo -V + + - name: Run `prove_and_verify` + working-directory: ./examples/prove_and_verify + run: ./test.sh + + - name: Run `codegen_verifier` + working-directory: ./examples/codegen_verifier + run: ./test.sh + # This is a job which depends on all test jobs and reports the overall status. # This allows us to add/remove test jobs without having to update the required workflows. tests-end: @@ -512,6 +552,7 @@ jobs: - test-noir-codegen - test-integration-node - test-integration-browser + - test-examples steps: - name: Report overall success diff --git a/noir/noir-repo/acvm-repo/acir/src/circuit/opcodes.rs b/noir/noir-repo/acvm-repo/acir/src/circuit/opcodes.rs index e6dc11dac78..6043196dfff 100644 --- a/noir/noir-repo/acvm-repo/acir/src/circuit/opcodes.rs +++ b/noir/noir-repo/acvm-repo/acir/src/circuit/opcodes.rs @@ -18,6 +18,12 @@ pub enum BlockType { ReturnData, } +impl BlockType { + pub fn is_databus(&self) -> bool { + matches!(self, BlockType::CallData | BlockType::ReturnData) + } +} + #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum Opcode { diff --git a/noir/noir-repo/acvm-repo/acvm/src/compiler/optimizers/unused_memory.rs b/noir/noir-repo/acvm-repo/acvm/src/compiler/optimizers/unused_memory.rs index 18eefa79ac2..5fdcf54a492 100644 --- a/noir/noir-repo/acvm-repo/acvm/src/compiler/optimizers/unused_memory.rs +++ b/noir/noir-repo/acvm-repo/acvm/src/compiler/optimizers/unused_memory.rs @@ -44,8 +44,9 @@ impl UnusedMemoryOptimizer { let mut optimized_opcodes = Vec::with_capacity(self.circuit.opcodes.len()); for (idx, opcode) in self.circuit.opcodes.into_iter().enumerate() { match opcode { - Opcode::MemoryInit { block_id, .. } - if self.unused_memory_initializations.contains(&block_id) => + Opcode::MemoryInit { block_id, block_type, .. } + if !block_type.is_databus() + && self.unused_memory_initializations.contains(&block_id) => { // Drop opcode } diff --git a/noir/noir-repo/acvm-repo/acvm/src/compiler/transformers/csat.rs b/noir/noir-repo/acvm-repo/acvm/src/compiler/transformers/csat.rs index 9e2e3091c74..12a37e3e37a 100644 --- a/noir/noir-repo/acvm-repo/acvm/src/compiler/transformers/csat.rs +++ b/noir/noir-repo/acvm-repo/acvm/src/compiler/transformers/csat.rs @@ -210,16 +210,16 @@ impl CSatTransformer { } } else { // No more usable elements left in the old opcode - opcode.linear_combinations = remaining_linear_terms; break; } } + opcode.linear_combinations.extend(remaining_linear_terms); + // Constraint this intermediate_opcode to be equal to the temp variable by adding it into the IndexMap // We need a unique name for our intermediate variable // XXX: Another optimization, which could be applied in another algorithm // If two opcodes have a large fan-in/out and they share a few common terms, then we should create intermediate variables for them // Do some sort of subset matching algorithm for this on the terms of the polynomial - let inter_var = Self::get_or_create_intermediate_vars( intermediate_variables, intermediate_opcode, diff --git a/noir/noir-repo/acvm-repo/acvm_js/src/execute.rs b/noir/noir-repo/acvm-repo/acvm_js/src/execute.rs index 338511874c9..9f2b07b31fc 100644 --- a/noir/noir-repo/acvm-repo/acvm_js/src/execute.rs +++ b/noir/noir-repo/acvm-repo/acvm_js/src/execute.rs @@ -20,11 +20,11 @@ use crate::{ }; #[wasm_bindgen] -pub struct WasmBlackBoxFunctionSolver(Bn254BlackBoxSolver); +pub struct WasmBlackBoxFunctionSolver; impl WasmBlackBoxFunctionSolver { async fn initialize() -> WasmBlackBoxFunctionSolver { - WasmBlackBoxFunctionSolver(Bn254BlackBoxSolver::initialize().await) + WasmBlackBoxFunctionSolver } } @@ -47,15 +47,9 @@ pub async fn execute_circuit( ) -> Result { console_error_panic_hook::set_once(); - let solver = WasmBlackBoxFunctionSolver::initialize().await; - - let mut witness_stack = execute_program_with_native_type_return( - &solver, - program, - initial_witness, - &foreign_call_handler, - ) - .await?; + let mut witness_stack = + execute_program_with_native_type_return(program, initial_witness, &foreign_call_handler) + .await?; let witness_map = witness_stack.pop().expect("Should have at least one witness on the stack").witness; Ok(witness_map.into()) @@ -71,7 +65,7 @@ pub async fn execute_circuit( /// @returns {SolvedAndReturnWitness} The solved witness calculated by executing the circuit on the provided inputs, as well as the return witness indices as specified by the circuit. #[wasm_bindgen(js_name = executeCircuitWithReturnWitness, skip_jsdoc)] pub async fn execute_circuit_with_return_witness( - solver: &WasmBlackBoxFunctionSolver, + _solver: &WasmBlackBoxFunctionSolver, program: Vec, initial_witness: JsWitnessMap, foreign_call_handler: ForeignCallHandler, @@ -82,7 +76,6 @@ pub async fn execute_circuit_with_return_witness( .map_err(|_| JsExecutionError::new("Failed to deserialize circuit. This is likely due to differing serialization formats between ACVM_JS and your compiler".to_string(), None, None))?; let mut witness_stack = execute_program_with_native_program_and_return( - solver, &program, initial_witness, &foreign_call_handler, @@ -108,20 +101,16 @@ pub async fn execute_circuit_with_return_witness( /// @returns {WitnessMap} The solved witness calculated by executing the circuit on the provided inputs. #[wasm_bindgen(js_name = executeCircuitWithBlackBoxSolver, skip_jsdoc)] pub async fn execute_circuit_with_black_box_solver( - solver: &WasmBlackBoxFunctionSolver, + _solver: &WasmBlackBoxFunctionSolver, program: Vec, initial_witness: JsWitnessMap, foreign_call_handler: ForeignCallHandler, ) -> Result { console_error_panic_hook::set_once(); - let mut witness_stack = execute_program_with_native_type_return( - solver, - program, - initial_witness, - &foreign_call_handler, - ) - .await?; + let mut witness_stack = + execute_program_with_native_type_return(program, initial_witness, &foreign_call_handler) + .await?; let witness_map = witness_stack.pop().expect("Should have at least one witness on the stack").witness; Ok(witness_map.into()) @@ -143,24 +132,19 @@ pub async fn execute_program( #[wasm_bindgen(js_name = executeProgramWithBlackBoxSolver, skip_jsdoc)] pub async fn execute_program_with_black_box_solver( - solver: &WasmBlackBoxFunctionSolver, + _solver: &WasmBlackBoxFunctionSolver, program: Vec, initial_witness: JsWitnessMap, foreign_call_executor: &ForeignCallHandler, ) -> Result { - let witness_stack = execute_program_with_native_type_return( - solver, - program, - initial_witness, - foreign_call_executor, - ) - .await?; + let witness_stack = + execute_program_with_native_type_return(program, initial_witness, foreign_call_executor) + .await?; Ok(witness_stack.into()) } async fn execute_program_with_native_type_return( - solver: &WasmBlackBoxFunctionSolver, program: Vec, initial_witness: JsWitnessMap, foreign_call_executor: &ForeignCallHandler, @@ -171,25 +155,20 @@ async fn execute_program_with_native_type_return( None, None))?; - execute_program_with_native_program_and_return( - solver, - &program, - initial_witness, - foreign_call_executor, - ) - .await + execute_program_with_native_program_and_return(&program, initial_witness, foreign_call_executor) + .await } async fn execute_program_with_native_program_and_return( - solver: &WasmBlackBoxFunctionSolver, program: &Program, initial_witness: JsWitnessMap, foreign_call_executor: &ForeignCallHandler, ) -> Result { + let blackbox_solver = Bn254BlackBoxSolver; let executor = ProgramExecutor::new( &program.functions, &program.unconstrained_functions, - &solver.0, + &blackbox_solver, foreign_call_executor, ); let witness_stack = executor.execute(initial_witness.into()).await?; diff --git a/noir/noir-repo/acvm-repo/bn254_blackbox_solver/benches/criterion.rs b/noir/noir-repo/acvm-repo/bn254_blackbox_solver/benches/criterion.rs index a8fa7d8aae4..b86414423cf 100644 --- a/noir/noir-repo/acvm-repo/bn254_blackbox_solver/benches/criterion.rs +++ b/noir/noir-repo/acvm-repo/bn254_blackbox_solver/benches/criterion.rs @@ -15,23 +15,21 @@ fn bench_poseidon2(c: &mut Criterion) { fn bench_pedersen_commitment(c: &mut Criterion) { let inputs = [FieldElement::one(); 2]; - let solver = Bn254BlackBoxSolver::new(); c.bench_function("pedersen_commitment", |b| { - b.iter(|| solver.pedersen_commitment(black_box(&inputs), 0)) + b.iter(|| Bn254BlackBoxSolver.pedersen_commitment(black_box(&inputs), 0)) }); } fn bench_pedersen_hash(c: &mut Criterion) { let inputs = [FieldElement::one(); 2]; - let solver = Bn254BlackBoxSolver::new(); - c.bench_function("pedersen_hash", |b| b.iter(|| solver.pedersen_hash(black_box(&inputs), 0))); + c.bench_function("pedersen_hash", |b| { + b.iter(|| Bn254BlackBoxSolver.pedersen_hash(black_box(&inputs), 0)) + }); } fn bench_schnorr_verify(c: &mut Criterion) { - let solver = Bn254BlackBoxSolver::new(); - let pub_key_x = FieldElement::from_hex( "0x04b260954662e97f00cab9adb773a259097f7a274b83b113532bce27fa3fb96a", ) @@ -51,7 +49,7 @@ fn bench_schnorr_verify(c: &mut Criterion) { c.bench_function("schnorr_verify", |b| { b.iter(|| { - solver.schnorr_verify( + Bn254BlackBoxSolver.schnorr_verify( black_box(&pub_key_x), black_box(&pub_key_y), black_box(&sig_bytes), diff --git a/noir/noir-repo/acvm-repo/bn254_blackbox_solver/src/lib.rs b/noir/noir-repo/acvm-repo/bn254_blackbox_solver/src/lib.rs index 43b86e083d5..ae5a1c3db6d 100644 --- a/noir/noir-repo/acvm-repo/bn254_blackbox_solver/src/lib.rs +++ b/noir/noir-repo/acvm-repo/bn254_blackbox_solver/src/lib.rs @@ -15,26 +15,9 @@ use ark_ec::AffineRepr; pub use embedded_curve_ops::{embedded_curve_add, multi_scalar_mul}; pub use poseidon2::poseidon2_permutation; +#[derive(Default)] pub struct Bn254BlackBoxSolver; -impl Bn254BlackBoxSolver { - pub async fn initialize() -> Bn254BlackBoxSolver { - Bn254BlackBoxSolver - } - - #[cfg(not(target_arch = "wasm32"))] - pub fn new() -> Bn254BlackBoxSolver { - Bn254BlackBoxSolver - } -} - -#[cfg(not(target_arch = "wasm32"))] -impl Default for Bn254BlackBoxSolver { - fn default() -> Self { - Self::new() - } -} - impl BlackBoxFunctionSolver for Bn254BlackBoxSolver { fn schnorr_verify( &self, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs index 22814a22889..5576d673f1d 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen.rs @@ -14,7 +14,7 @@ use crate::ssa::ir::function::Function; pub(crate) fn convert_ssa_function(func: &Function, enable_debug_trace: bool) -> BrilligArtifact { let mut brillig_context = BrilligContext::new(enable_debug_trace); - let mut function_context = FunctionContext::new(func, &mut brillig_context); + let mut function_context = FunctionContext::new(func); brillig_context.enter_context(FunctionContext::function_id_to_function_label(func.id())); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 6a4f9f5cc0e..0dbab80dace 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -7,7 +7,7 @@ use crate::brillig::brillig_ir::{ use crate::ssa::ir::dfg::CallStack; use crate::ssa::ir::instruction::ConstrainError; use crate::ssa::ir::{ - basic_block::{BasicBlock, BasicBlockId}, + basic_block::BasicBlockId, dfg::DataFlowGraph, function::FunctionId, instruction::{ @@ -49,8 +49,7 @@ impl<'block> BrilligBlock<'block> { dfg: &DataFlowGraph, ) { let live_in = function_context.liveness.get_live_in(&block_id); - let variables = - BlockVariables::new(live_in.clone(), function_context.all_block_parameters()); + let variables = BlockVariables::new(live_in.clone()); brillig_context.set_allocated_registers( variables @@ -72,9 +71,9 @@ impl<'block> BrilligBlock<'block> { let block_label = self.create_block_label_for_current_function(self.block_id); self.brillig_context.enter_context(block_label); - // Convert the block parameters + self.convert_block_params(dfg); + let block = &dfg[self.block_id]; - self.convert_block_params(block, dfg); // Convert all of the instructions into the block for instruction_id in block.instructions() { @@ -134,12 +133,8 @@ impl<'block> BrilligBlock<'block> { let target_block = &dfg[*destination_block]; for (src, dest) in arguments.iter().zip(target_block.parameters()) { // Destinations are block parameters so they should have been allocated previously. - let destination = self.variables.get_block_param( - self.function_context, - *destination_block, - *dest, - dfg, - ); + let destination = + self.variables.get_allocation(self.function_context, *dest, dfg); let source = self.convert_ssa_value(*src, dfg); self.pass_variable(source, destination); } @@ -206,10 +201,14 @@ impl<'block> BrilligBlock<'block> { } } - /// Converts SSA Block parameters into Brillig Registers. - fn convert_block_params(&mut self, block: &BasicBlock, dfg: &DataFlowGraph) { - for param_id in block.parameters() { - let value = &dfg[*param_id]; + /// Allocates the block parameters that the given block is defining + fn convert_block_params(&mut self, dfg: &DataFlowGraph) { + // We don't allocate the block parameters here, we allocate the parameters the block is defining. + // Since predecessors to a block have to know where the parameters of the block are allocated to pass data to it, + // the block parameters need to be defined/allocated before the given block. Variable liveness provides when the block parameters are defined. + // For the entry block, the defined block params will be the params of the function + any extra params of blocks it's the immediate dominator of. + for param_id in self.function_context.liveness.defined_block_params(&self.block_id) { + let value = &dfg[param_id]; let param_type = match value { Value::Param { typ, .. } => typ, _ => unreachable!("ICE: Only Param type values should appear in block parameters"), @@ -220,10 +219,10 @@ impl<'block> BrilligBlock<'block> { // Be a valid pointer to the array. // For slices, two registers are passed, the pointer to the data and a register holding the size of the slice. Type::Numeric(_) | Type::Array(..) | Type::Slice(..) | Type::Reference(_) => { - self.variables.get_block_param( + self.variables.define_variable( self.function_context, - self.block_id, - *param_id, + self.brillig_context, + param_id, dfg, ); } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs index 892e82d771a..fb9a8577d94 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs @@ -9,7 +9,6 @@ use crate::{ BrilligContext, }, ssa::ir::{ - basic_block::BasicBlockId, dfg::DataFlowGraph, types::{CompositeType, Type}, value::ValueId, @@ -21,18 +20,13 @@ use super::brillig_fn::FunctionContext; #[derive(Debug, Default)] pub(crate) struct BlockVariables { available_variables: HashSet, - block_parameters: HashSet, available_constants: HashMap, } impl BlockVariables { /// Creates a BlockVariables instance. It uses the variables that are live in to the block and the global available variables (block parameters) - pub(crate) fn new(live_in: HashSet, all_block_parameters: HashSet) -> Self { - BlockVariables { - available_variables: live_in.into_iter().chain(all_block_parameters.clone()).collect(), - block_parameters: all_block_parameters, - ..Default::default() - } + pub(crate) fn new(live_in: HashSet) -> Self { + BlockVariables { available_variables: live_in, ..Default::default() } } /// Returns all non-constant variables that have not been removed at this point. @@ -92,16 +86,13 @@ impl BlockVariables { brillig_context: &mut BrilligContext, ) { assert!(self.available_variables.remove(value_id), "ICE: Variable is not available"); - // Block parameters should not be deallocated - if !self.block_parameters.contains(value_id) { - let variable = function_context - .ssa_value_allocations - .get(value_id) - .expect("ICE: Variable allocation not found"); - variable.extract_registers().iter().for_each(|register| { - brillig_context.deallocate_register(*register); - }); - } + let variable = function_context + .ssa_value_allocations + .get(value_id) + .expect("ICE: Variable allocation not found"); + variable.extract_registers().iter().for_each(|register| { + brillig_context.deallocate_register(*register); + }); } /// For a given SSA value id, return the corresponding cached allocation. @@ -155,27 +146,6 @@ impl BlockVariables { pub(crate) fn dump_constants(&mut self) { self.available_constants.clear(); } - - /// For a given block parameter, return the allocation that was done globally to the function. - pub(crate) fn get_block_param( - &mut self, - function_context: &FunctionContext, - block_id: BasicBlockId, - value_id: ValueId, - dfg: &DataFlowGraph, - ) -> BrilligVariable { - let value_id = dfg.resolve(value_id); - assert!( - function_context - .block_parameters - .get(&block_id) - .expect("Block not found") - .contains(&value_id), - "Value is not a block parameter" - ); - - *function_context.ssa_value_allocations.get(&value_id).expect("Block param not found") - } } /// Computes the length of an array. This will match with the indexes that SSA will issue diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs index 617e400b92f..000d1230ece 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs @@ -4,7 +4,6 @@ use crate::{ brillig::brillig_ir::{ artifact::{BrilligParameter, Label}, brillig_variable::{get_bit_size_from_ssa_type, BrilligVariable}, - BrilligContext, }, ssa::ir::{ basic_block::BasicBlockId, @@ -14,16 +13,14 @@ use crate::{ value::ValueId, }, }; -use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; +use fxhash::FxHashMap as HashMap; -use super::{brillig_block_variables::allocate_value, variable_liveness::VariableLiveness}; +use super::variable_liveness::VariableLiveness; pub(crate) struct FunctionContext { pub(crate) function_id: FunctionId, /// Map from SSA values its allocation. Since values can be only defined once in SSA form, we insert them here on when we allocate them at their definition. pub(crate) ssa_value_allocations: HashMap, - /// Block parameters are pre allocated at the function level. - pub(crate) block_parameters: HashMap>, /// The block ids of the function in reverse post order. pub(crate) blocks: Vec, /// Liveness information for each variable in the function. @@ -31,40 +28,22 @@ pub(crate) struct FunctionContext { } impl FunctionContext { - /// Creates a new function context. It will allocate parameters for all blocks and compute the liveness of every variable. - pub(crate) fn new(function: &Function, brillig_context: &mut BrilligContext) -> Self { + /// Creates a new function context. It will compute the liveness of every variable. + pub(crate) fn new(function: &Function) -> Self { let id = function.id(); let mut reverse_post_order = Vec::new(); reverse_post_order.extend_from_slice(PostOrder::with_function(function).as_slice()); reverse_post_order.reverse(); - let mut block_parameters = HashMap::default(); - let mut ssa_variable_to_register_or_memory = HashMap::default(); - - for &block_id in &reverse_post_order { - let block = &function.dfg[block_id]; - let parameters = block.parameters().to_vec(); - parameters.iter().for_each(|&value_id| { - let variable = allocate_value(value_id, brillig_context, &function.dfg); - ssa_variable_to_register_or_memory.insert(value_id, variable); - }); - block_parameters.insert(block_id, parameters); - } - Self { function_id: id, - ssa_value_allocations: ssa_variable_to_register_or_memory, - block_parameters, + ssa_value_allocations: HashMap::default(), blocks: reverse_post_order, liveness: VariableLiveness::from_function(function), } } - pub(crate) fn all_block_parameters(&self) -> HashSet { - self.block_parameters.values().flat_map(|parameters| parameters.iter()).cloned().collect() - } - /// Creates a function label from a given SSA function id. pub(crate) fn function_id_to_function_label(function_id: FunctionId) -> Label { function_id.to_string() diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs index 3e1515b1eed..491086e8c0f 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs @@ -377,9 +377,9 @@ mod tests { builder.set_runtime(RuntimeType::Brillig); let ssa = builder.finish(); - let mut brillig_context = create_context(); + let brillig_context = create_context(); - let function_context = FunctionContext::new(ssa.main(), &mut brillig_context); + let function_context = FunctionContext::new(ssa.main()); (ssa, function_context, brillig_context) } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs index c9f1cd1e247..52eded81919 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs @@ -93,6 +93,9 @@ fn variables_used_in_block(block: &BasicBlock, dfg: &DataFlowGraph) -> Vec Vec; -fn compute_defined_variables(block: &BasicBlock, dfg: &DataFlowGraph) -> Variables { - let mut defined_vars = HashSet::default(); - - for parameter in block.parameters() { - defined_vars.insert(dfg.resolve(*parameter)); - } - - for instruction_id in block.instructions() { - let result_values = dfg.instruction_results(*instruction_id); - for result_value in result_values { - defined_vars.insert(dfg.resolve(*result_value)); - } - } - - defined_vars -} - fn compute_used_before_def( block: &BasicBlock, dfg: &DataFlowGraph, @@ -142,6 +128,8 @@ pub(crate) struct VariableLiveness { live_in: HashMap, /// The variables that stop being alive after each specific instruction last_uses: HashMap, + /// The list of block params the given block is defining. The order matters for the entry block, so it's a vec. + param_definitions: HashMap>, } impl VariableLiveness { @@ -150,8 +138,15 @@ impl VariableLiveness { let cfg = ControlFlowGraph::with_function(func); let post_order = PostOrder::with_function(func); - let mut instance = - Self { cfg, post_order, live_in: HashMap::default(), last_uses: HashMap::default() }; + let mut instance = Self { + cfg, + post_order, + live_in: HashMap::default(), + last_uses: HashMap::default(), + param_definitions: HashMap::default(), + }; + + instance.compute_block_param_definitions(func); instance.compute_live_in_of_blocks(func); @@ -179,6 +174,30 @@ impl VariableLiveness { self.last_uses.get(block_id).expect("Last uses should have been calculated") } + /// Retrieves the list of block params the given block is defining. + /// Block params are defined before the block that owns them (since they are used by the predecessor blocks). They must be defined in the immediate dominator. + /// This is the last point where the block param can be allocated without it being allocated in different places in different branches. + pub(crate) fn defined_block_params(&self, block_id: &BasicBlockId) -> Vec { + self.param_definitions.get(block_id).cloned().unwrap_or_default() + } + + fn compute_block_param_definitions(&mut self, func: &Function) { + let tree = DominatorTree::with_cfg_and_post_order(&self.cfg, &self.post_order); + + // Going in reverse post order to process the entry block first + let mut reverse_post_order = Vec::new(); + reverse_post_order.extend_from_slice(self.post_order.as_slice()); + reverse_post_order.reverse(); + for block in reverse_post_order { + let params = func.dfg[block].parameters(); + // If it has no dominator, it's the entry block + let dominator_block = tree.immediate_dominator(block).unwrap_or(func.entry_block()); + let definitions_for_the_dominator = + self.param_definitions.entry(dominator_block).or_default(); + definitions_for_the_dominator.extend(params.iter()); + } + } + fn compute_live_in_of_blocks(&mut self, func: &Function) { let back_edges = find_back_edges(func, &self.cfg, &self.post_order); @@ -197,9 +216,10 @@ impl VariableLiveness { block_id: BasicBlockId, back_edges: &HashSet, ) { - let block = &func.dfg[block_id]; + let defined = self.compute_defined_variables(block_id, &func.dfg); + + let block: &BasicBlock = &func.dfg[block_id]; - let defined = compute_defined_variables(block, &func.dfg); let used_before_def = compute_used_before_def(block, &func.dfg, &defined); let mut live_out = HashSet::default(); @@ -222,6 +242,24 @@ impl VariableLiveness { self.live_in.insert(block_id, used_before_def.union(&passthrough_vars).cloned().collect()); } + fn compute_defined_variables(&self, block_id: BasicBlockId, dfg: &DataFlowGraph) -> Variables { + let block: &BasicBlock = &dfg[block_id]; + let mut defined_vars = HashSet::default(); + + for parameter in self.defined_block_params(&block_id) { + defined_vars.insert(dfg.resolve(parameter)); + } + + for instruction_id in block.instructions() { + let result_values = dfg.instruction_results(*instruction_id); + for result_value in result_values { + defined_vars.insert(dfg.resolve(*result_value)); + } + } + + defined_vars + } + fn update_live_ins_within_loop(&mut self, back_edge: BackEdge) { let header_live_ins = self .live_in @@ -513,12 +551,12 @@ mod test { let liveness = VariableLiveness::from_function(func); assert!(liveness.get_live_in(&func.entry_block()).is_empty()); - assert_eq!(liveness.get_live_in(&b1), &FxHashSet::from_iter([v0, v1, v3].into_iter())); + assert_eq!(liveness.get_live_in(&b1), &FxHashSet::from_iter([v0, v1, v3, v4].into_iter())); assert_eq!(liveness.get_live_in(&b3), &FxHashSet::from_iter([v3].into_iter())); assert_eq!(liveness.get_live_in(&b2), &FxHashSet::from_iter([v0, v1, v3, v4].into_iter())); assert_eq!( liveness.get_live_in(&b4), - &FxHashSet::from_iter([v0, v1, v3, v4, v6].into_iter()) + &FxHashSet::from_iter([v0, v1, v3, v4, v6, v7].into_iter()) ); assert_eq!(liveness.get_live_in(&b6), &FxHashSet::from_iter([v0, v1, v3, v4].into_iter())); assert_eq!( @@ -540,4 +578,58 @@ mod test { Some(&FxHashSet::from_iter([v3].into_iter())) ); } + + #[test] + fn block_params() { + // brillig fn main f0 { + // b0(v0: u1): + // jmpif v0 then: b1, else: b2 + // b1(): + // jmp b3(Field 27, Field 29) + // b3(v1: Field, v2: Field): + // return v1 + // b2(): + // jmp b3(Field 28, Field 40) + // } + + let main_id = Id::test_new(1); + let mut builder = FunctionBuilder::new("main".into(), main_id); + builder.set_runtime(RuntimeType::Brillig); + + let v0 = builder.add_parameter(Type::bool()); + + let b1 = builder.insert_block(); + let b2 = builder.insert_block(); + let b3 = builder.insert_block(); + + builder.terminate_with_jmpif(v0, b1, b2); + + builder.switch_to_block(b1); + let twenty_seven = builder.field_constant(27_u128); + let twenty_nine = builder.field_constant(29_u128); + builder.terminate_with_jmp(b3, vec![twenty_seven, twenty_nine]); + + builder.switch_to_block(b3); + let v1 = builder.add_block_parameter(b3, Type::field()); + let v2 = builder.add_block_parameter(b3, Type::field()); + builder.terminate_with_return(vec![v1]); + + builder.switch_to_block(b2); + let twenty_eight = builder.field_constant(28_u128); + let forty = builder.field_constant(40_u128); + builder.terminate_with_jmp(b3, vec![twenty_eight, forty]); + + let ssa = builder.finish(); + let func = ssa.main(); + let liveness = VariableLiveness::from_function(func); + + // Entry point defines its own params and also b3's params. + assert_eq!(liveness.defined_block_params(&func.entry_block()), vec![v0, v1, v2]); + assert_eq!(liveness.defined_block_params(&b1), vec![]); + assert_eq!(liveness.defined_block_params(&b2), vec![]); + assert_eq!(liveness.defined_block_params(&b3), vec![]); + + assert_eq!(liveness.get_live_in(&b1), &FxHashSet::from_iter([v1, v2].into_iter())); + assert_eq!(liveness.get_live_in(&b2), &FxHashSet::from_iter([v1, v2].into_iter())); + } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs index 69e5f6ddfcc..c2fe7878bf8 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs @@ -56,6 +56,7 @@ pub(crate) fn optimize_into_acir( .run_pass(Ssa::defunctionalize, "After Defunctionalization:") .run_pass(Ssa::remove_paired_rc, "After Removing Paired rc_inc & rc_decs:") .run_pass(Ssa::inline_functions, "After Inlining:") + .run_pass(Ssa::resolve_is_unconstrained, "After Resolving IsUnconstrained:") // Run mem2reg with the CFG separated into blocks .run_pass(Ssa::mem2reg, "After Mem2Reg:") .run_pass(Ssa::as_slice_optimization, "After `as_slice` optimization") diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs index 90cdbb650c2..b6e88a8d4b0 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs @@ -614,13 +614,44 @@ impl AcirContext { expr.push_multiplication_term(FieldElement::one(), lhs_witness, rhs_witness); self.add_data(AcirVarData::Expr(expr)) } - ( - AcirVarData::Expr(_) | AcirVarData::Witness(_), - AcirVarData::Expr(_) | AcirVarData::Witness(_), - ) => { + (AcirVarData::Expr(expression), AcirVarData::Witness(witness)) + | (AcirVarData::Witness(witness), AcirVarData::Expr(expression)) + if expression.is_linear() => + { + let mut expr = Expression::default(); + for term in expression.linear_combinations.iter() { + expr.push_multiplication_term(term.0, term.1, witness); + } + expr.push_addition_term(expression.q_c, witness); + self.add_data(AcirVarData::Expr(expr)) + } + (AcirVarData::Expr(lhs_expr), AcirVarData::Expr(rhs_expr)) => { + let degree_one = if lhs_expr.is_linear() && rhs_expr.is_degree_one_univariate() { + Some((lhs_expr, rhs_expr)) + } else if rhs_expr.is_linear() && lhs_expr.is_degree_one_univariate() { + Some((rhs_expr, lhs_expr)) + } else { + None + }; + if let Some((lin, univariate)) = degree_one { + let mut expr = Expression::default(); + let rhs_term = univariate.linear_combinations[0]; + for term in lin.linear_combinations.iter() { + expr.push_multiplication_term(term.0 * rhs_term.0, term.1, rhs_term.1); + } + expr.push_addition_term(lin.q_c * rhs_term.0, rhs_term.1); + expr.sort(); + expr = expr.add_mul(univariate.q_c, &lin); + self.add_data(AcirVarData::Expr(expr)) + } else { + let lhs = self.get_or_create_witness_var(lhs)?; + let rhs = self.get_or_create_witness_var(rhs)?; + self.mul_var(lhs, rhs)? + } + } + _ => { let lhs = self.get_or_create_witness_var(lhs)?; let rhs = self.get_or_create_witness_var(rhs)?; - self.mul_var(lhs, rhs)? } }; diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index fefe5f6f8e6..30fd83d7704 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -1728,7 +1728,7 @@ impl<'a> Context<'a> { &mut self, terminator: &TerminatorInstruction, dfg: &DataFlowGraph, - ) -> Result, InternalError> { + ) -> Result, RuntimeError> { let (return_values, call_stack) = match terminator { TerminatorInstruction::Return { return_values, call_stack } => { (return_values, call_stack) @@ -1750,6 +1750,8 @@ impl<'a> Context<'a> { if !is_databus { // We do not return value for the data bus. self.acir_context.return_var(acir_var)?; + } else { + self.check_array_is_initialized(self.data_bus.return_data.unwrap(), dfg)?; } } Ok(warnings) diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index f136d3c5fb2..93ea703721f 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -65,6 +65,7 @@ pub(crate) enum Intrinsic { FromField, AsField, AsWitness, + IsUnconstrained, } impl std::fmt::Display for Intrinsic { @@ -89,6 +90,7 @@ impl std::fmt::Display for Intrinsic { Intrinsic::FromField => write!(f, "from_field"), Intrinsic::AsField => write!(f, "as_field"), Intrinsic::AsWitness => write!(f, "as_witness"), + Intrinsic::IsUnconstrained => write!(f, "is_unconstrained"), } } } @@ -116,7 +118,8 @@ impl Intrinsic { | Intrinsic::SliceRemove | Intrinsic::StrAsBytes | Intrinsic::FromField - | Intrinsic::AsField => false, + | Intrinsic::AsField + | Intrinsic::IsUnconstrained => false, // Some black box functions have side-effects Intrinsic::BlackBox(func) => matches!(func, BlackBoxFunc::RecursiveAggregation), @@ -145,6 +148,7 @@ impl Intrinsic { "from_field" => Some(Intrinsic::FromField), "as_field" => Some(Intrinsic::AsField), "as_witness" => Some(Intrinsic::AsWitness), + "is_unconstrained" => Some(Intrinsic::IsUnconstrained), other => BlackBoxFunc::lookup(other).map(Intrinsic::BlackBox), } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 8e320de9337..8f57d9de368 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -294,6 +294,7 @@ pub(super) fn simplify_call( SimplifyResult::SimplifiedToInstruction(instruction) } Intrinsic::AsWitness => SimplifyResult::None, + Intrinsic::IsUnconstrained => SimplifyResult::None, } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs index 27536d59ea5..f6c3f022bfc 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mod.rs @@ -17,5 +17,6 @@ mod rc; mod remove_bit_shifts; mod remove_enable_side_effects; mod remove_if_else; +mod resolve_is_unconstrained; mod simplify_cfg; mod unrolling; diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs index 9309652d508..464faa57323 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs @@ -158,7 +158,8 @@ impl Context { | Intrinsic::FromField | Intrinsic::AsField | Intrinsic::AsSlice - | Intrinsic::AsWitness => false, + | Intrinsic::AsWitness + | Intrinsic::IsUnconstrained => false, }, // We must assume that functions contain a side effect as we cannot inspect more deeply. diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs index 3d2b5142219..91b455dbf29 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs @@ -232,6 +232,7 @@ fn slice_capacity_change( | Intrinsic::BlackBox(_) | Intrinsic::FromField | Intrinsic::AsField - | Intrinsic::AsWitness => SizeChange::None, + | Intrinsic::AsWitness + | Intrinsic::IsUnconstrained => SizeChange::None, } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/resolve_is_unconstrained.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/resolve_is_unconstrained.rs new file mode 100644 index 00000000000..2c9e33ae528 --- /dev/null +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/resolve_is_unconstrained.rs @@ -0,0 +1,56 @@ +use crate::ssa::{ + ir::{ + function::{Function, RuntimeType}, + instruction::{Instruction, Intrinsic}, + types::Type, + value::Value, + }, + ssa_gen::Ssa, +}; +use acvm::FieldElement; +use fxhash::FxHashSet as HashSet; + +impl Ssa { + /// An SSA pass to find any calls to `Intrinsic::IsUnconstrained` and replacing any uses of the result of the intrinsic + /// with the resolved boolean value. + /// Note that this pass must run after the pass that does runtime separation, since in SSA generation an ACIR function can end up targeting brillig. + #[tracing::instrument(level = "trace", skip(self))] + pub(crate) fn resolve_is_unconstrained(mut self) -> Self { + for func in self.functions.values_mut() { + replace_is_unconstrained_result(func); + } + self + } +} + +fn replace_is_unconstrained_result(func: &mut Function) { + let mut is_unconstrained_calls = HashSet::default(); + // Collect all calls to is_unconstrained + for block_id in func.reachable_blocks() { + for &instruction_id in func.dfg[block_id].instructions() { + let target_func = match &func.dfg[instruction_id] { + Instruction::Call { func, .. } => *func, + _ => continue, + }; + + if let Value::Intrinsic(Intrinsic::IsUnconstrained) = &func.dfg[target_func] { + is_unconstrained_calls.insert(instruction_id); + } + } + } + + for instruction_id in is_unconstrained_calls { + let call_returns = func.dfg.instruction_results(instruction_id); + let original_return_id = call_returns[0]; + + // We replace the result with a fresh id. This will be unused, so the DIE pass will remove the leftover intrinsic call. + func.dfg.replace_result(instruction_id, original_return_id); + + let is_within_unconstrained = func.dfg.make_constant( + FieldElement::from(matches!(func.runtime(), RuntimeType::Brillig)), + Type::bool(), + ); + // Replace all uses of the original return value with the constant + func.dfg.set_value_from_id(original_return_id, is_within_unconstrained); + } +} diff --git a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/expressions.rs b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/expressions.rs index 83b607ca976..f1631286336 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/expressions.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/expressions.rs @@ -194,7 +194,7 @@ impl<'context> Elaborator<'context> { let expr_id = self.interner.push_expr(ident); self.interner.push_expr_location(expr_id, call_expr_span, self.file); let ident = old_value.ident.clone(); - let typ = self.type_check_variable(ident, expr_id); + let typ = self.type_check_variable(ident, expr_id, None); self.interner.push_expr_type(expr_id, typ.clone()); capture_types.push(typ); fmt_str_idents.push(expr_id); @@ -291,16 +291,25 @@ impl<'context> Elaborator<'context> { Some(method_ref) => { // Automatically add `&mut` if the method expects a mutable reference and // the object is not already one. - if let HirMethodReference::FuncId(func_id) = &method_ref { - if *func_id != FuncId::dummy_id() { - let function_type = self.interner.function_meta(func_id).typ.clone(); - - self.try_add_mutable_reference_to_object( - &function_type, - &mut object_type, - &mut object, - ); + let func_id = match &method_ref { + HirMethodReference::FuncId(func_id) => *func_id, + HirMethodReference::TraitMethodId(method_id, _) => { + let id = self.interner.trait_method_id(*method_id); + let definition = self.interner.definition(id); + let DefinitionKind::Function(func_id) = definition.kind else { + unreachable!("Expected trait function to be a DefinitionKind::Function") + }; + func_id } + }; + + if func_id != FuncId::dummy_id() { + let function_type = self.interner.function_meta(&func_id).typ.clone(); + self.try_add_mutable_reference_to_object( + &function_type, + &mut object_type, + &mut object, + ); } // These arguments will be given to the desugared function call. @@ -322,8 +331,13 @@ impl<'context> Elaborator<'context> { let generics = method_call.generics.map(|option_inner| { option_inner.into_iter().map(|generic| self.resolve_type(generic)).collect() }); + let turbofish_generics = generics.clone(); let method_call = HirMethodCallExpression { method, object, arguments, location, generics }; + + // Desugar the method call into a normal, resolved function call + // so that the backend doesn't need to worry about methods + // TODO: update object_type here? let ((function_id, function_name), function_call) = method_call.into_function_call( &method_ref, object_type, @@ -331,7 +345,8 @@ impl<'context> Elaborator<'context> { self.interner, ); - let func_type = self.type_check_variable(function_name, function_id); + let func_type = + self.type_check_variable(function_name, function_id, turbofish_generics); // Type check the new call now that it has been changed from a method call // to a function call. This way we avoid duplicating code. diff --git a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/mod.rs b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/mod.rs index 0f9d22ca9b5..0581e7900f8 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/mod.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/mod.rs @@ -10,7 +10,13 @@ use crate::{ UnresolvedTraitConstraint, UnresolvedTypeExpression, }, hir::{ - def_collector::{dc_crate::CompilationError, errors::DuplicateType}, + def_collector::{ + dc_crate::{ + filter_literal_globals, CompilationError, UnresolvedGlobal, UnresolvedStruct, + UnresolvedTrait, UnresolvedTypeAlias, + }, + errors::DuplicateType, + }, resolution::{errors::ResolverError, path_resolver::PathResolver, resolver::LambdaContext}, scope::ScopeForest as GenericScopeForest, type_check::TypeCheckError, @@ -22,15 +28,16 @@ use crate::{ HirInfixExpression, HirLambda, HirMemberAccess, HirMethodCallExpression, HirMethodReference, HirPrefixExpression, }, + stmt::HirLetStatement, traits::TraitConstraint, }, macros_api::{ BlockExpression, CallExpression, CastExpression, Expression, ExpressionKind, HirExpression, HirLiteral, HirStatement, Ident, IndexExpression, Literal, MemberAccessExpression, - MethodCallExpression, NodeInterner, NoirFunction, PrefixExpression, Statement, - StatementKind, StructId, + MethodCallExpression, NodeInterner, NoirFunction, NoirStruct, Pattern, PrefixExpression, + SecondaryAttribute, Statement, StatementKind, StructId, }, - node_interner::{DefinitionKind, DependencyId, ExprId, FuncId, StmtId, TraitId}, + node_interner::{DefinitionKind, DependencyId, ExprId, FuncId, StmtId, TraitId, TypeAliasId}, Shared, StructType, Type, TypeVariable, }; use crate::{ @@ -68,6 +75,7 @@ mod expressions; mod patterns; mod scope; mod statements; +mod traits; mod types; use fm::FileId; @@ -203,24 +211,48 @@ impl<'context> Elaborator<'context> { ) -> Vec<(CompilationError, FileId)> { let mut this = Self::new(context, crate_id); - // the resolver filters literal globals first - for global in items.globals {} + // We must first resolve and intern the globals before we can resolve any stmts inside each function. + // Each function uses its own resolver with a newly created ScopeForest, and must be resolved again to be within a function's scope + // + // Additionally, we must resolve integer globals before structs since structs may refer to + // the values of integer globals as numeric generics. + let (literal_globals, non_literal_globals) = filter_literal_globals(items.globals); - for alias in items.type_aliases {} + for global in literal_globals { + this.elaborate_global(global); + } + + for (alias_id, alias) in items.type_aliases { + this.define_type_alias(alias_id, alias); + } - for trait_ in items.traits {} + this.collect_traits(items.traits); - for struct_ in items.types {} + // Must resolve structs before we resolve globals. + this.collect_struct_definitions(items.types); + // Bind trait impls to their trait. Collect trait functions, that have a + // default implementation, which hasn't been overridden. for trait_impl in &mut items.trait_impls { this.collect_trait_impl(trait_impl); } + // Before we resolve any function symbols we must go through our impls and + // re-collect the methods within into their proper module. This cannot be + // done during def collection since we need to be able to resolve the type of + // the impl since that determines the module we should collect into. + // + // These are resolved after trait impls so that struct methods are chosen + // over trait methods if there are name conflicts. for ((typ, module), impls) in &items.impls { this.collect_impls(typ, *module, impls); } - // resolver resolves non-literal globals here + // We must wait to resolve non-literal globals until after we resolve structs since struct + // globals will need to reference the struct type they're initialized to to ensure they are valid. + for global in non_literal_globals { + this.elaborate_global(global); + } for functions in items.functions { this.elaborate_functions(functions); @@ -1100,4 +1132,101 @@ impl<'context> Elaborator<'context> { }); } } + + fn define_type_alias(&mut self, alias_id: TypeAliasId, alias: UnresolvedTypeAlias) { + self.file = alias.file_id; + self.local_module = alias.module_id; + + let generics = self.add_generics(&alias.type_alias_def.generics); + self.resolve_local_globals(); + self.current_item = Some(DependencyId::Alias(alias_id)); + let typ = self.resolve_type(alias.type_alias_def.typ); + self.interner.set_type_alias(alias_id, typ, generics); + } + + fn collect_struct_definitions(&mut self, structs: BTreeMap) { + // This is necessary to avoid cloning the entire struct map + // when adding checks after each struct field is resolved. + let struct_ids = structs.keys().copied().collect::>(); + + // Resolve each field in each struct. + // Each struct should already be present in the NodeInterner after def collection. + for (type_id, typ) in structs { + self.file = typ.file_id; + self.local_module = typ.module_id; + let (generics, fields) = self.resolve_struct_fields(typ.struct_def, type_id); + + self.interner.update_struct(type_id, |struct_def| { + struct_def.set_fields(fields); + struct_def.generics = generics; + }); + } + + // Check whether the struct fields have nested slices + // We need to check after all structs are resolved to + // make sure every struct's fields is accurately set. + for id in struct_ids { + let struct_type = self.interner.get_struct(id); + // Only handle structs without generics as any generics args will be checked + // after monomorphization when performing SSA codegen + if struct_type.borrow().generics.is_empty() { + let fields = struct_type.borrow().get_fields(&[]); + for (_, field_type) in fields.iter() { + if field_type.is_nested_slice() { + let location = struct_type.borrow().location; + self.file = location.file; + self.push_err(ResolverError::NestedSlices { span: location.span }); + } + } + } + } + } + + pub fn resolve_struct_fields( + &mut self, + unresolved: NoirStruct, + struct_id: StructId, + ) -> (Generics, Vec<(Ident, Type)>) { + let generics = self.add_generics(&unresolved.generics); + + // Check whether the struct definition has globals in the local module and add them to the scope + self.resolve_local_globals(); + + self.current_item = Some(DependencyId::Struct(struct_id)); + + self.resolving_ids.insert(struct_id); + let fields = vecmap(unresolved.fields, |(ident, typ)| (ident, self.resolve_type(typ))); + self.resolving_ids.remove(&struct_id); + + (generics, fields) + } + + fn elaborate_global(&mut self, global: UnresolvedGlobal) { + self.local_module = global.module_id; + self.file = global.file_id; + + let global_id = global.global_id; + self.current_item = Some(DependencyId::Global(global_id)); + + let definition_kind = DefinitionKind::Global(global_id); + let let_stmt = global.stmt_def; + + if !self.in_contract + && let_stmt.attributes.iter().any(|attr| matches!(attr, SecondaryAttribute::Abi(_))) + { + let span = let_stmt.pattern.span(); + self.push_err(ResolverError::AbiAttributeOutsideContract { span }); + } + + if !let_stmt.comptime && matches!(let_stmt.pattern, Pattern::Mutable(..)) { + let span = let_stmt.pattern.span(); + self.push_err(ResolverError::MutableGlobal { span }); + } + + let (let_statement, _typ) = self.elaborate_let(let_stmt); + + let statement_id = self.interner.get_global(global_id).let_statement; + self.interner.get_global_definition_mut(global_id).kind = definition_kind; + self.interner.replace_statement(statement_id, let_statement); + } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/patterns.rs b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/patterns.rs index 810e1b90743..17c11b88f4a 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/patterns.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/patterns.rs @@ -5,6 +5,7 @@ use rustc_hash::FxHashSet as HashSet; use crate::{ ast::ERROR_IDENT, hir::{ + def_collector::dc_crate::CompilationError, resolution::errors::ResolverError, type_check::{Source, TypeCheckError}, }, @@ -330,9 +331,9 @@ impl<'context> Elaborator<'context> { ) -> (ExprId, Type) { let span = variable.span; let expr = self.resolve_variable(variable); - let id = self.interner.push_expr(HirExpression::Ident(expr.clone(), generics)); + let id = self.interner.push_expr(HirExpression::Ident(expr.clone(), generics.clone())); self.interner.push_expr_location(id, span, self.file); - let typ = self.type_check_variable(expr, id); + let typ = self.type_check_variable(expr, id, generics); self.interner.push_expr_type(id, typ.clone()); (id, typ) } @@ -384,14 +385,19 @@ impl<'context> Elaborator<'context> { } } - pub(super) fn type_check_variable(&mut self, ident: HirIdent, expr_id: ExprId) -> Type { + pub(super) fn type_check_variable( + &mut self, + ident: HirIdent, + expr_id: ExprId, + generics: Option>, + ) -> Type { let mut bindings = TypeBindings::new(); // Add type bindings from any constraints that were used. // We need to do this first since otherwise instantiating the type below // will replace each trait generic with a fresh type variable, rather than // the type used in the trait constraint (if it exists). See #4088. - if let ImplKind::TraitMethod(_, constraint, _) = &ident.impl_kind { + if let ImplKind::TraitMethod(_, constraint, assumed) = &ident.impl_kind { let the_trait = self.interner.get_trait(constraint.trait_id); assert_eq!(the_trait.generics.len(), constraint.trait_generics.len()); @@ -401,6 +407,16 @@ impl<'context> Elaborator<'context> { bindings.insert(param.id(), (param.clone(), arg.clone())); } } + + // If the trait impl is already assumed to exist we should add any type bindings for `Self`. + // Otherwise `self` will be replaced with a fresh type variable, which will require the user + // to specify a redundant type annotation. + if *assumed { + bindings.insert( + the_trait.self_type_typevar_id, + (the_trait.self_type_typevar.clone(), constraint.typ.clone()), + ); + } } // An identifiers type may be forall-quantified in the case of generic functions. @@ -409,10 +425,21 @@ impl<'context> Elaborator<'context> { // variable to handle generic functions. let t = self.interner.id_type_substitute_trait_as_type(ident.id); + let definition = self.interner.try_definition(ident.id); + let function_generic_count = definition.map_or(0, |definition| match &definition.kind { + DefinitionKind::Function(function) => { + self.interner.function_modifiers(function).generic_count + } + _ => 0, + }); + + let span = self.interner.expr_span(&expr_id); + let location = self.interner.expr_location(&expr_id); // This instantiates a trait's generics as well which need to be set // when the constraint below is later solved for when the function is // finished. How to link the two? - let (typ, bindings) = t.instantiate_with_bindings(bindings, self.interner); + let (typ, bindings) = + self.instantiate(t, bindings, generics, function_generic_count, span, location); // Push any trait constraints required by this definition to the context // to be checked later when the type of this variable is further constrained. @@ -447,6 +474,39 @@ impl<'context> Elaborator<'context> { typ } + fn instantiate( + &mut self, + typ: Type, + bindings: TypeBindings, + turbofish_generics: Option>, + function_generic_count: usize, + span: Span, + location: Location, + ) -> (Type, TypeBindings) { + match turbofish_generics { + Some(turbofish_generics) => { + if turbofish_generics.len() != function_generic_count { + let type_check_err = TypeCheckError::IncorrectTurbofishGenericCount { + expected_count: function_generic_count, + actual_count: turbofish_generics.len(), + span, + }; + self.errors.push((CompilationError::TypeError(type_check_err), location.file)); + typ.instantiate_with_bindings(bindings, self.interner) + } else { + // Fetch the count of any implicit generics on the function, such as + // for a method within a generic impl. + let implicit_generic_count = match &typ { + Type::Forall(generics, _) => generics.len() - function_generic_count, + _ => 0, + }; + typ.instantiate_with(turbofish_generics, self.interner, implicit_generic_count) + } + } + None => typ.instantiate_with_bindings(bindings, self.interner), + } + } + fn get_ident_from_path(&mut self, path: Path) -> (HirIdent, usize) { let location = Location::new(path.span(), self.file); diff --git a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/traits.rs b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/traits.rs new file mode 100644 index 00000000000..e7018d900d8 --- /dev/null +++ b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/traits.rs @@ -0,0 +1,196 @@ +use std::collections::BTreeMap; + +use iter_extended::vecmap; +use noirc_errors::Location; + +use crate::{ + ast::{FunctionKind, TraitItem, UnresolvedGenerics, UnresolvedTraitConstraint}, + hir::{ + def_collector::dc_crate::UnresolvedTrait, def_map::ModuleId, + resolution::path_resolver::StandardPathResolver, + }, + hir_def::{ + function::{FuncMeta, HirFunction}, + traits::{TraitConstant, TraitFunction, TraitType}, + }, + macros_api::{ + BlockExpression, FunctionDefinition, FunctionReturnType, Ident, ItemVisibility, + NoirFunction, Param, Pattern, UnresolvedType, Visibility, + }, + node_interner::{FuncId, TraitId}, + token::Attributes, + Generics, Type, TypeVariable, TypeVariableKind, +}; + +use super::Elaborator; + +impl<'context> Elaborator<'context> { + pub fn collect_traits(&mut self, traits: BTreeMap) { + for (trait_id, unresolved_trait) in &traits { + self.interner.push_empty_trait(*trait_id, unresolved_trait); + } + + for (trait_id, unresolved_trait) in traits { + let generics = vecmap(&unresolved_trait.trait_def.generics, |_| { + TypeVariable::unbound(self.interner.next_type_variable_id()) + }); + + // Resolve order + // 1. Trait Types ( Trait constants can have a trait type, therefore types before constants) + let _ = self.resolve_trait_types(&unresolved_trait); + // 2. Trait Constants ( Trait's methods can use trait types & constants, therefore they should be after) + let _ = self.resolve_trait_constants(&unresolved_trait); + // 3. Trait Methods + let methods = self.resolve_trait_methods(trait_id, &unresolved_trait, &generics); + + self.interner.update_trait(trait_id, |trait_def| { + trait_def.set_methods(methods); + trait_def.generics = generics; + }); + + // This check needs to be after the trait's methods are set since + // the interner may set `interner.ordering_type` based on the result type + // of the Cmp trait, if this is it. + if self.crate_id.is_stdlib() { + self.interner.try_add_operator_trait(trait_id); + } + } + } + + fn resolve_trait_types(&mut self, _unresolved_trait: &UnresolvedTrait) -> Vec { + // TODO + vec![] + } + + fn resolve_trait_constants( + &mut self, + _unresolved_trait: &UnresolvedTrait, + ) -> Vec { + // TODO + vec![] + } + + fn resolve_trait_methods( + &mut self, + trait_id: TraitId, + unresolved_trait: &UnresolvedTrait, + trait_generics: &Generics, + ) -> Vec { + self.local_module = unresolved_trait.module_id; + self.file = self.def_maps[&self.crate_id].file_id(unresolved_trait.module_id); + + let mut functions = vec![]; + + for item in &unresolved_trait.trait_def.items { + if let TraitItem::Function { + name, + generics, + parameters, + return_type, + where_clause, + body: _, + } = item + { + let old_generic_count = self.generics.len(); + + let the_trait = self.interner.get_trait(trait_id); + let self_typevar = the_trait.self_type_typevar.clone(); + let self_type = Type::TypeVariable(self_typevar.clone(), TypeVariableKind::Normal); + let name_span = the_trait.name.span(); + + self.add_generics(generics); + self.add_existing_generics(&unresolved_trait.trait_def.generics, trait_generics); + self.add_existing_generic("Self", name_span, self_typevar); + self.self_type = Some(self_type.clone()); + + let func_id = unresolved_trait.method_ids[&name.0.contents]; + self.resolve_trait_function( + name, + generics, + parameters, + return_type, + where_clause, + func_id, + ); + + let arguments = vecmap(parameters, |param| self.resolve_type(param.1.clone())); + let return_type = self.resolve_type(return_type.get_type().into_owned()); + + let generics = vecmap(&self.generics, |(_, type_var, _)| type_var.clone()); + + let default_impl_list: Vec<_> = unresolved_trait + .fns_with_default_impl + .functions + .iter() + .filter(|(_, _, q)| q.name() == name.0.contents) + .collect(); + + let default_impl = if default_impl_list.len() == 1 { + Some(Box::new(default_impl_list[0].2.clone())) + } else { + None + }; + + let no_environment = Box::new(Type::Unit); + let function_type = + Type::Function(arguments, Box::new(return_type), no_environment); + + functions.push(TraitFunction { + name: name.clone(), + typ: Type::Forall(generics, Box::new(function_type)), + location: Location::new(name.span(), unresolved_trait.file_id), + default_impl, + default_impl_module_id: unresolved_trait.module_id, + }); + + self.generics.truncate(old_generic_count); + } + } + functions + } + + pub fn resolve_trait_function( + &mut self, + name: &Ident, + generics: &UnresolvedGenerics, + parameters: &[(Ident, UnresolvedType)], + return_type: &FunctionReturnType, + where_clause: &[UnresolvedTraitConstraint], + func_id: FuncId, + ) { + let old_generic_count = self.generics.len(); + self.scopes.start_function(); + + // Check whether the function has globals in the local module and add them to the scope + self.resolve_local_globals(); + + self.trait_bounds = where_clause.to_vec(); + + let kind = FunctionKind::Normal; + let def = FunctionDefinition { + name: name.clone(), + attributes: Attributes::empty(), + is_unconstrained: false, + is_comptime: false, + visibility: ItemVisibility::Public, // Trait functions are always public + generics: generics.clone(), + parameters: vecmap(parameters, |(name, typ)| Param { + visibility: Visibility::Private, + pattern: Pattern::Identifier(name.clone()), + typ: typ.clone(), + span: name.span(), + }), + body: BlockExpression { statements: Vec::new() }, + span: name.span(), + where_clause: where_clause.to_vec(), + return_type: return_type.clone(), + return_visibility: Visibility::Private, + }; + + self.elaborate_function(NoirFunction { kind, def }, func_id); + let _ = self.scopes.end_function(); + // Don't check the scope tree for unused variables, they can't be used in a declaration anyway. + self.trait_bounds.clear(); + self.generics.truncate(old_generic_count); + } +} diff --git a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/types.rs b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/types.rs index 54920d5738b..3c8d805d802 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/elaborator/types.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/elaborator/types.rs @@ -4,7 +4,10 @@ use iter_extended::vecmap; use noirc_errors::{Location, Span}; use crate::{ - ast::{BinaryOpKind, IntegerBitSize, UnresolvedTraitConstraint, UnresolvedTypeExpression}, + ast::{ + BinaryOpKind, IntegerBitSize, NoirTypeAlias, UnresolvedGenerics, UnresolvedTraitConstraint, + UnresolvedTypeExpression, + }, hir::{ def_map::ModuleDefId, resolution::{ @@ -26,7 +29,10 @@ use crate::{ HirExpression, HirLiteral, HirStatement, Path, PathKind, SecondaryAttribute, Signedness, UnaryOp, UnresolvedType, UnresolvedTypeData, }, - node_interner::{DefinitionKind, ExprId, GlobalId, TraitId, TraitImplKind, TraitMethodId}, + node_interner::{ + DefinitionKind, DependencyId, ExprId, GlobalId, TraitId, TraitImplKind, TraitMethodId, + TypeAliasId, + }, Generics, Shared, StructType, Type, TypeAlias, TypeBinding, TypeVariable, TypeVariableKind, }; @@ -1435,4 +1441,27 @@ impl<'context> Elaborator<'context> { } } } + + pub fn add_existing_generics(&mut self, names: &UnresolvedGenerics, generics: &Generics) { + assert_eq!(names.len(), generics.len()); + + for (name, typevar) in names.iter().zip(generics) { + self.add_existing_generic(&name.0.contents, name.0.span(), typevar.clone()); + } + } + + pub fn add_existing_generic(&mut self, name: &str, span: Span, typevar: TypeVariable) { + // Check for name collisions of this generic + let rc_name = Rc::new(name.to_owned()); + + if let Some((_, _, first_span)) = self.find_generic(&rc_name) { + self.push_err(ResolverError::DuplicateDefinition { + name: name.to_owned(), + first_span: *first_span, + second_span: span, + }); + } else { + self.generics.push((rc_name, typevar, span)); + } + } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs index d2eaf79b0f0..afec3839599 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs @@ -486,7 +486,7 @@ fn inject_prelude( /// Separate the globals Vec into two. The first element in the tuple will be the /// literal globals, except for arrays, and the second will be all other globals. /// We exclude array literals as they can contain complex types -fn filter_literal_globals( +pub fn filter_literal_globals( globals: Vec, ) -> (Vec, Vec) { globals.into_iter().partition(|global| match &global.stmt_def.expression.kind { diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/expr.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/expr.rs index abff466e1d5..336c1cedbb6 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/expr.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/expr.rs @@ -371,7 +371,7 @@ impl<'interner> TypeChecker<'interner> { // We need to do this first since otherwise instantiating the type below // will replace each trait generic with a fresh type variable, rather than // the type used in the trait constraint (if it exists). See #4088. - if let ImplKind::TraitMethod(_, constraint, _) = &ident.impl_kind { + if let ImplKind::TraitMethod(_, constraint, assumed) = &ident.impl_kind { let the_trait = self.interner.get_trait(constraint.trait_id); assert_eq!(the_trait.generics.len(), constraint.trait_generics.len()); @@ -381,6 +381,16 @@ impl<'interner> TypeChecker<'interner> { bindings.insert(param.id(), (param.clone(), arg.clone())); } } + + // If the trait impl is already assumed to exist we should add any type bindings for `Self`. + // Otherwise `self` will be replaced with a fresh type variable, which will require the user + // to specify a redundant type annotation. + if *assumed { + bindings.insert( + the_trait.self_type_typevar_id, + (the_trait.self_type_typevar.clone(), constraint.typ.clone()), + ); + } } // An identifiers type may be forall-quantified in the case of generic functions. @@ -389,8 +399,6 @@ impl<'interner> TypeChecker<'interner> { // variable to handle generic functions. let t = self.interner.id_type_substitute_trait_as_type(ident.id); - let span = self.interner.expr_span(expr_id); - let definition = self.interner.try_definition(ident.id); let function_generic_count = definition.map_or(0, |definition| match &definition.kind { DefinitionKind::Function(function) => { @@ -399,6 +407,7 @@ impl<'interner> TypeChecker<'interner> { _ => 0, }); + let span = self.interner.expr_span(expr_id); // This instantiates a trait's generics as well which need to be set // when the constraint below is later solved for when the function is // finished. How to link the two? diff --git a/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs b/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs index 9a20d0dd537..54a6af97744 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs @@ -55,9 +55,12 @@ struct LambdaContext { struct Monomorphizer<'interner> { /// Functions are keyed by their unique ID and expected type so that we can monomorphize /// a new version of the function for each type. + /// We also key by any turbofish generics that are specified. + /// This is necessary for a case where we may have a trait generic that can be instantiated + /// outside of a function parameter or return value. /// /// Using nested HashMaps here lets us avoid cloning HirTypes when calling .get() - functions: HashMap>, + functions: HashMap), FuncId>>, /// Unlike functions, locals are only keyed by their unique ID because they are never /// duplicated during monomorphization. Doing so would allow them to be used polymorphically @@ -198,10 +201,15 @@ impl<'interner> Monomorphizer<'interner> { id: node_interner::FuncId, expr_id: node_interner::ExprId, typ: &HirType, + turbofish_generics: Vec, trait_method: Option, ) -> Definition { let typ = typ.follow_bindings(); - match self.functions.get(&id).and_then(|inner_map| inner_map.get(&typ)) { + match self + .functions + .get(&id) + .and_then(|inner_map| inner_map.get(&(typ.clone(), turbofish_generics.clone()))) + { Some(id) => Definition::Function(*id), None => { // Function has not been monomorphized yet @@ -222,7 +230,8 @@ impl<'interner> Monomorphizer<'interner> { Definition::Builtin(opcode) } FunctionKind::Normal => { - let id = self.queue_function(id, expr_id, typ, trait_method); + let id = + self.queue_function(id, expr_id, typ, turbofish_generics, trait_method); Definition::Function(id) } FunctionKind::Oracle => { @@ -249,8 +258,14 @@ impl<'interner> Monomorphizer<'interner> { } /// Prerequisite: typ = typ.follow_bindings() - fn define_function(&mut self, id: node_interner::FuncId, typ: HirType, new_id: FuncId) { - self.functions.entry(id).or_default().insert(typ, new_id); + fn define_function( + &mut self, + id: node_interner::FuncId, + typ: HirType, + turbofish_generics: Vec, + new_id: FuncId, + ) { + self.functions.entry(id).or_default().insert((typ, turbofish_generics), new_id); } fn compile_main( @@ -393,7 +408,7 @@ impl<'interner> Monomorphizer<'interner> { use ast::Literal::*; let expr = match self.interner.expression(&expr) { - HirExpression::Ident(ident, _) => self.ident(ident, expr)?, + HirExpression::Ident(ident, generics) => self.ident(ident, expr, generics)?, HirExpression::Literal(HirLiteral::Str(contents)) => Literal(Str(contents)), HirExpression::Literal(HirLiteral::FmtStr(contents, idents)) => { let fields = try_vecmap(idents, |ident| self.expr(ident))?; @@ -825,6 +840,7 @@ impl<'interner> Monomorphizer<'interner> { &mut self, ident: HirIdent, expr_id: node_interner::ExprId, + generics: Option>, ) -> Result { let typ = self.interner.id_type(expr_id); @@ -838,7 +854,13 @@ impl<'interner> Monomorphizer<'interner> { let mutable = definition.mutable; let location = Some(ident.location); let name = definition.name.clone(); - let definition = self.lookup_function(*func_id, expr_id, &typ, None); + let definition = self.lookup_function( + *func_id, + expr_id, + &typ, + generics.unwrap_or_default(), + None, + ); let typ = Self::convert_type(&typ, ident.location)?; let ident = ast::Ident { location, mutable, definition, name, typ: typ.clone() }; let ident_expression = ast::Expression::Ident(ident); @@ -1063,10 +1085,11 @@ impl<'interner> Monomorphizer<'interner> { } }; - let func_id = match self.lookup_function(func_id, expr_id, &function_type, Some(method)) { - Definition::Function(func_id) => func_id, - _ => unreachable!(), - }; + let func_id = + match self.lookup_function(func_id, expr_id, &function_type, vec![], Some(method)) { + Definition::Function(func_id) => func_id, + _ => unreachable!(), + }; let the_trait = self.interner.get_trait(method.trait_id); let location = self.interner.expr_location(&expr_id); @@ -1292,10 +1315,11 @@ impl<'interner> Monomorphizer<'interner> { id: node_interner::FuncId, expr_id: node_interner::ExprId, function_type: HirType, + turbofish_generics: Vec, trait_method: Option, ) -> FuncId { let new_id = self.next_function_id(); - self.define_function(id, function_type.clone(), new_id); + self.define_function(id, function_type.clone(), turbofish_generics, new_id); let bindings = self.interner.get_instantiation_bindings(expr_id); let bindings = self.follow_bindings(bindings); diff --git a/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs b/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs index 7f1b67abfbd..d4145ef6a1d 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs @@ -916,6 +916,13 @@ impl NodeInterner { &self.definitions[id.0] } + /// Retrieves the definition where the given id was defined. + /// This will panic if given DefinitionId::dummy_id. Use try_definition for + /// any call with a possibly undefined variable. + pub fn definition_mut(&mut self, id: DefinitionId) -> &mut DefinitionInfo { + &mut self.definitions[id.0] + } + /// Tries to retrieve the given id's definition. /// This function should be used during name resolution or type checking when we cannot be sure /// all variables have corresponding definitions (in case of an error in the user's code). @@ -997,6 +1004,11 @@ impl NodeInterner { self.definition(global.definition_id) } + pub fn get_global_definition_mut(&mut self, global_id: GlobalId) -> &mut DefinitionInfo { + let global = self.get_global(global_id); + self.definition_mut(global.definition_id) + } + pub fn get_all_globals(&self) -> &[GlobalInfo] { &self.globals } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/tests.rs b/noir/noir-repo/compiler/noirc_frontend/src/tests.rs index 7bf5655486b..bd81752c046 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/tests.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/tests.rs @@ -1430,14 +1430,14 @@ fn specify_method_types_with_turbofish() { } impl Foo { - fn generic_method(_self: Self) where U: Default { + fn generic_method(_self: Self) -> U where U: Default { U::default() } } fn main() { let foo: Foo = Foo { inner: 1 }; - foo.generic_method::(); + let _ = foo.generic_method::(); } "#; let errors = get_program_errors(src); diff --git a/noir/noir-repo/cspell.json b/noir/noir-repo/cspell.json index b4f214c2f27..4497d0bf9da 100644 --- a/noir/noir-repo/cspell.json +++ b/noir/noir-repo/cspell.json @@ -19,6 +19,7 @@ "barebones", "barretenberg", "barustenberg", + "bbup", "bincode", "bindgen", "bitand", diff --git a/noir/noir-repo/docs/docs/getting_started/barretenberg/index.md b/noir/noir-repo/docs/docs/getting_started/barretenberg/index.md index f435ae151fe..0102c86770b 100644 --- a/noir/noir-repo/docs/docs/getting_started/barretenberg/index.md +++ b/noir/noir-repo/docs/docs/getting_started/barretenberg/index.md @@ -1,7 +1,6 @@ --- title: Barretenberg Installation -description: - `bb` is a command line tool for interacting with Aztec's proving backend Barretenberg. This page is a quick guide on how to install `bb` +description: bb is a command line tool for interacting with Aztec's proving backend Barretenberg. This page is a quick guide on how to install `bb` keywords: [ Barretenberg bb @@ -24,31 +23,25 @@ Open a terminal on your machine, and write: ##### macOS (Apple Silicon) ```bash -mkdir -p $HOME/.barretenberg && \ -curl -o ./barretenberg-aarch64-apple-darwin.tar.gz -L https://github.com/AztecProtocol/aztec-packages/releases/download/aztec-packages-v0.38.0/barretenberg-aarch64-apple-darwin.tar.gz && \ -tar -xvf ./barretenberg-aarch64-apple-darwin.tar.gz -C $HOME/.barretenberg/ && \ -echo 'export PATH=$PATH:$HOME/.barretenberg/' >> ~/.zshrc && \ +curl -L https://raw.githubusercontent.com/AztecProtocol/aztec-packages/master/barretenberg/cpp/installation/install | bash source ~/.zshrc +bbup -v 0.41.0 ``` ##### macOS (Intel) ```bash -mkdir -p $HOME/.barretenberg && \ -curl -o ./barretenberg-x86_64-apple-darwin.tar.gz -L https://github.com/AztecProtocol/aztec-packages/releases/download/aztec-packages-v0.38.0/barretenberg-x86_64-apple-darwin.tar.gz && \ -tar -xvf ./barretenberg-x86_64-apple-darwin.tar.gz -C $HOME/.barretenberg/ && \ -echo 'export PATH=$PATH:$HOME/.barretenberg/' >> ~/.zshrc && \ +curl -L https://raw.githubusercontent.com/AztecProtocol/aztec-packages/master/barretenberg/cpp/installation/install | bash source ~/.zshrc +bbup -v 0.41.0 ``` ##### Linux (Bash) ```bash -mkdir -p $HOME/.barretenberg && \ -curl -o ./barretenberg-x86_64-linux-gnu.tar.gz -L https://github.com/AztecProtocol/aztec-packages/releases/download/aztec-packages-v0.38.0/barretenberg-x86_64-linux-gnu.tar.gz && \ -tar -xvf ./barretenberg-x86_64-linux-gnu.tar.gz -C $HOME/.barretenberg/ && \ -echo -e 'export PATH=$PATH:$HOME/.barretenberg/' >> ~/.bashrc && \ +curl -L https://raw.githubusercontent.com/AztecProtocol/aztec-packages/master/barretenberg/cpp/installation/install | bash source ~/.bashrc +bbup -v 0.41.0 ``` Now we're ready to start working on [our first Noir program!](../hello_noir/index.md) diff --git a/noir/noir-repo/docs/docs/how_to/how-to-solidity-verifier.md b/noir/noir-repo/docs/docs/how_to/how-to-solidity-verifier.md index 7c96e22b8d5..e6ed9abaec6 100644 --- a/noir/noir-repo/docs/docs/how_to/how-to-solidity-verifier.md +++ b/noir/noir-repo/docs/docs/how_to/how-to-solidity-verifier.md @@ -230,7 +230,7 @@ It would be incorrect to say that a Noir proof verification costs any gas at all ## A Note on EVM chains -ZK-SNARK verification depends on some precompiled cryptographic primitives such as Elliptic Curve Pairings (if you like complex math, you can read about EC Pairings [here](https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627)). Not all EVM chains support EC Pairings, notably some of the ZK-EVMs. This means that you won't be able to use the verifier contract in all of them. +Noir proof verification requires the ecMul, ecAdd and ecPairing precompiles. Not all EVM chains support EC Pairings, notably some of the ZK-EVMs. This means that you won't be able to use the verifier contract in all of them. You can find an incomplete list of which EVM chains support these precompiles [here](https://www.evmdiff.com/features?feature=precompiles). For example, chains like `zkSync ERA` and `Polygon zkEVM` do not currently support these precompiles, so proof verification via Solidity verifier contracts won't work. Here's a quick list of EVM chains that have been tested and are known to work: diff --git a/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/eddsa.mdx b/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/eddsa.mdx index c2c0624dfad..789d26ce426 100644 --- a/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/eddsa.mdx +++ b/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/eddsa.mdx @@ -15,12 +15,12 @@ Verifier for EdDSA signatures fn eddsa_poseidon_verify(public_key_x : Field, public_key_y : Field, signature_s: Field, signature_r8_x: Field, signature_r8_y: Field, message: Field) -> bool ``` -It is also possible to specify the hash algorithm used for the signature by using the `eddsa_verify_with_hasher` function with a parameter implementing the Hasher trait. For instance, if you want to use Poseidon2 instead, you can do the following: +It is also possible to specify the hash algorithm used for the signature by using the `eddsa_verify` function by passing a type implementing the Hasher trait with the turbofish operator. +For instance, if you want to use Poseidon2 instead, you can do the following: ```rust use dep::std::hash::poseidon2::Poseidon2Hasher; -let mut hasher = Poseidon2Hasher::default(); -eddsa_verify_with_hasher(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg, &mut hasher); +eddsa_verify::(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg); ``` diff --git a/noir/noir-repo/docs/docs/noir/standard_library/is_unconstrained.md b/noir/noir-repo/docs/docs/noir/standard_library/is_unconstrained.md new file mode 100644 index 00000000000..bb157e719dc --- /dev/null +++ b/noir/noir-repo/docs/docs/noir/standard_library/is_unconstrained.md @@ -0,0 +1,59 @@ +--- +title: Is Unconstrained Function +description: + The is_unconstrained function returns wether the context at that point of the program is unconstrained or not. +keywords: + [ + unconstrained + ] +--- + +It's very common for functions in circuits to take unconstrained hints of an expensive computation and then verify it. This is done by running the hint in an unconstrained context and then verifying the result in a constrained context. + +When a function is marked as unconstrained, any subsequent functions that it calls will also be run in an unconstrained context. However, if we are implementing a library function, other users might call it within an unconstrained context or a constrained one. Generally, in an unconstrained context we prefer just computing the result instead of taking a hint of it and verifying it, since that'd mean doing the same computation twice: + +```rust + +fn my_expensive_computation(){ + ... +} + +unconstrained fn my_expensive_computation_hint(){ + my_expensive_computation() +} + +pub fn external_interface(){ + my_expensive_computation_hint(); + // verify my_expensive_computation: If external_interface is called from unconstrained, this is redundant + ... +} + +``` + +In order to improve the performance in an unconstrained context you can use the function at `std::runtime::is_unconstrained() -> bool`: + + +```rust +use dep::std::runtime::is_unconstrained; + +fn my_expensive_computation(){ + ... +} + +unconstrained fn my_expensive_computation_hint(){ + my_expensive_computation() +} + +pub fn external_interface(){ + if is_unconstrained() { + my_expensive_computation(); + } else { + my_expensive_computation_hint(); + // verify my_expensive_computation + ... + } +} + +``` + +The is_unconstrained result is resolved at compile time, so in unconstrained contexts the compiler removes the else branch, and in constrained contexts the compiler removes the if branch, reducing the amount of compute necessary to run external_interface. \ No newline at end of file diff --git a/noir/noir-repo/docs/versioned_docs/version-v0.29.0/how_to/how-to-solidity-verifier.md b/noir/noir-repo/docs/versioned_docs/version-v0.29.0/how_to/how-to-solidity-verifier.md index e3c7c1065da..e5ce3b7bc22 100644 --- a/noir/noir-repo/docs/versioned_docs/version-v0.29.0/how_to/how-to-solidity-verifier.md +++ b/noir/noir-repo/docs/versioned_docs/version-v0.29.0/how_to/how-to-solidity-verifier.md @@ -212,13 +212,14 @@ It would be incorrect to say that a Noir proof verification costs any gas at all ZK-SNARK verification depends on some precompiled cryptographic primitives such as Elliptic Curve Pairings (if you like complex math, you can read about EC Pairings [here](https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627)). Not all EVM chains support EC Pairings, notably some of the ZK-EVMs. This means that you won't be able to use the verifier contract in all of them. -For example, chains like `zkSync ERA` and `Polygon zkEVM` do not currently support these precompiles, so proof verification via Solidity verifier contracts won't work. Here's a quick list of EVM chains that have been tested and are known to work: +For example, `Polygon zkEVM` does not currently support these precompiles, so proof verification via Solidity verifier contracts won't work. Here's a quick list of EVM chains that have been tested and are known to work: - Optimism - Arbitrum - Polygon PoS - Scroll - Celo +- zkSync If you test any other chains, please open a PR on this page to update the list. See [this doc](https://github.com/noir-lang/noir-starter/tree/main/with-foundry#testing-on-chain) for more info about testing verifier contracts on different EVM chains. diff --git a/noir/noir-repo/examples/codegen-verifier/.gitignore b/noir/noir-repo/examples/codegen_verifier/.gitignore similarity index 100% rename from noir/noir-repo/examples/codegen-verifier/.gitignore rename to noir/noir-repo/examples/codegen_verifier/.gitignore diff --git a/noir/noir-repo/examples/codegen-verifier/Nargo.toml b/noir/noir-repo/examples/codegen_verifier/Nargo.toml similarity index 100% rename from noir/noir-repo/examples/codegen-verifier/Nargo.toml rename to noir/noir-repo/examples/codegen_verifier/Nargo.toml diff --git a/noir/noir-repo/examples/codegen-verifier/Prover.toml b/noir/noir-repo/examples/codegen_verifier/Prover.toml similarity index 100% rename from noir/noir-repo/examples/codegen-verifier/Prover.toml rename to noir/noir-repo/examples/codegen_verifier/Prover.toml diff --git a/noir/noir-repo/examples/codegen-verifier/codegen_verifier.sh b/noir/noir-repo/examples/codegen_verifier/codegen_verifier.sh similarity index 100% rename from noir/noir-repo/examples/codegen-verifier/codegen_verifier.sh rename to noir/noir-repo/examples/codegen_verifier/codegen_verifier.sh diff --git a/noir/noir-repo/examples/codegen-verifier/foundry.toml b/noir/noir-repo/examples/codegen_verifier/foundry.toml similarity index 100% rename from noir/noir-repo/examples/codegen-verifier/foundry.toml rename to noir/noir-repo/examples/codegen_verifier/foundry.toml diff --git a/noir/noir-repo/examples/codegen-verifier/src/main.nr b/noir/noir-repo/examples/codegen_verifier/src/main.nr similarity index 100% rename from noir/noir-repo/examples/codegen-verifier/src/main.nr rename to noir/noir-repo/examples/codegen_verifier/src/main.nr diff --git a/noir/noir-repo/examples/codegen-verifier/test.sh b/noir/noir-repo/examples/codegen_verifier/test.sh similarity index 100% rename from noir/noir-repo/examples/codegen-verifier/test.sh rename to noir/noir-repo/examples/codegen_verifier/test.sh diff --git a/noir/noir-repo/noir_stdlib/src/eddsa.nr b/noir/noir-repo/noir_stdlib/src/eddsa.nr index 3aff6043ffd..337969be90e 100644 --- a/noir/noir-repo/noir_stdlib/src/eddsa.nr +++ b/noir/noir-repo/noir_stdlib/src/eddsa.nr @@ -3,6 +3,7 @@ use crate::ec::consts::te::baby_jubjub; use crate::ec::tecurve::affine::Point as TEPoint; use crate::hash::{Hash, Hasher, BuildHasher, BuildHasherDefault}; use crate::hash::poseidon::PoseidonHasher; +use crate::default::Default; // Returns true if signature is valid pub fn eddsa_poseidon_verify( @@ -13,28 +14,25 @@ pub fn eddsa_poseidon_verify( signature_r8_y: Field, message: Field ) -> bool { - let mut hasher = PoseidonHasher::default(); - eddsa_verify_with_hasher( + eddsa_verify::( pub_key_x, pub_key_y, signature_s, signature_r8_x, signature_r8_y, - message, - &mut hasher + message ) } -pub fn eddsa_verify_with_hasher( +pub fn eddsa_verify( pub_key_x: Field, pub_key_y: Field, signature_s: Field, signature_r8_x: Field, signature_r8_y: Field, - message: Field, - hasher: &mut H + message: Field ) -> bool -where H: Hasher { +where H: Hasher + Default { // Verifies by testing: // S * B8 = R8 + H(R8, A, m) * A8 let bjj = baby_jubjub(); @@ -47,12 +45,13 @@ where H: Hasher { // Ensure S < Subgroup Order assert(signature_s.lt(bjj.suborder)); // Calculate the h = H(R, A, msg) - signature_r8_x.hash(hasher); - signature_r8_y.hash(hasher); - pub_key_x.hash(hasher); - pub_key_y.hash(hasher); - message.hash(hasher); - let hash: Field = (*hasher).finish(); + let mut hasher = H::default(); + hasher.write(signature_r8_x); + hasher.write(signature_r8_y); + hasher.write(pub_key_x); + hasher.write(pub_key_y); + hasher.write(message); + let hash: Field = hasher.finish(); // Calculate second part of the right side: right2 = h*8*A // Multiply by 8 by doubling 3 times. This also ensures that the result is in the subgroup. let pub_key_mul_2 = bjj.curve.add(pub_key, pub_key); diff --git a/noir/noir-repo/noir_stdlib/src/field/bn254.nr b/noir/noir-repo/noir_stdlib/src/field/bn254.nr index 2e82d9e7c23..bcdc23f80dc 100644 --- a/noir/noir-repo/noir_stdlib/src/field/bn254.nr +++ b/noir/noir-repo/noir_stdlib/src/field/bn254.nr @@ -1,11 +1,13 @@ +use crate::runtime::is_unconstrained; + // The low and high decomposition of the field modulus global PLO: Field = 53438638232309528389504892708671455233; global PHI: Field = 64323764613183177041862057485226039389; global TWO_POW_128: Field = 0x100000000000000000000000000000000; -/// A hint for decomposing a single field into two 16 byte fields. -unconstrained fn decompose_unsafe(x: Field) -> (Field, Field) { +// Decomposes a single field into two 16 byte fields. +fn compute_decomposition(x: Field) -> (Field, Field) { let x_bytes = x.to_le_bytes(32); let mut low: Field = 0; @@ -21,37 +23,11 @@ unconstrained fn decompose_unsafe(x: Field) -> (Field, Field) { (low, high) } -// Assert that (alo > blo && ahi >= bhi) || (alo <= blo && ahi > bhi) -fn assert_gt_limbs(a: (Field, Field), b: (Field, Field)) { - let (alo, ahi) = a; - let (blo, bhi) = b; - let borrow = lte_unsafe_16(alo, blo); - - let rlo = alo - blo - 1 + (borrow as Field) * TWO_POW_128; - let rhi = ahi - bhi - (borrow as Field); - - rlo.assert_max_bit_size(128); - rhi.assert_max_bit_size(128); -} - -/// Decompose a single field into two 16 byte fields. -pub fn decompose(x: Field) -> (Field, Field) { - // Take hints of the decomposition - let (xlo, xhi) = decompose_unsafe(x); - - // Range check the limbs - xlo.assert_max_bit_size(128); - xhi.assert_max_bit_size(128); - - // Check that the decomposition is correct - assert_eq(x, xlo + TWO_POW_128 * xhi); - - // Assert that the decomposition of P is greater than the decomposition of x - assert_gt_limbs((PLO, PHI), (xlo, xhi)); - (xlo, xhi) +unconstrained fn decompose_hint(x: Field) -> (Field, Field) { + compute_decomposition(x) } -fn lt_unsafe_internal(x: Field, y: Field, num_bytes: u32) -> bool { +fn compute_lt(x: Field, y: Field, num_bytes: u32) -> bool { let x_bytes = x.to_le_radix(256, num_bytes); let y_bytes = y.to_le_radix(256, num_bytes); let mut x_is_lt = false; @@ -70,29 +46,67 @@ fn lt_unsafe_internal(x: Field, y: Field, num_bytes: u32) -> bool { x_is_lt } -fn lte_unsafe_internal(x: Field, y: Field, num_bytes: u32) -> bool { +fn compute_lte(x: Field, y: Field, num_bytes: u32) -> bool { if x == y { true } else { - lt_unsafe_internal(x, y, num_bytes) + compute_lt(x, y, num_bytes) } } -unconstrained fn lt_unsafe_32(x: Field, y: Field) -> bool { - lt_unsafe_internal(x, y, 32) +unconstrained fn lt_32_hint(x: Field, y: Field) -> bool { + compute_lt(x, y, 32) } -unconstrained fn lte_unsafe_16(x: Field, y: Field) -> bool { - lte_unsafe_internal(x, y, 16) +unconstrained fn lte_16_hint(x: Field, y: Field) -> bool { + compute_lte(x, y, 16) +} + +// Assert that (alo > blo && ahi >= bhi) || (alo <= blo && ahi > bhi) +fn assert_gt_limbs(a: (Field, Field), b: (Field, Field)) { + let (alo, ahi) = a; + let (blo, bhi) = b; + let borrow = lte_16_hint(alo, blo); + + let rlo = alo - blo - 1 + (borrow as Field) * TWO_POW_128; + let rhi = ahi - bhi - (borrow as Field); + + rlo.assert_max_bit_size(128); + rhi.assert_max_bit_size(128); +} + +/// Decompose a single field into two 16 byte fields. +pub fn decompose(x: Field) -> (Field, Field) { + if is_unconstrained() { + compute_decomposition(x) + } else { + // Take hints of the decomposition + let (xlo, xhi) = decompose_hint(x); + + // Range check the limbs + xlo.assert_max_bit_size(128); + xhi.assert_max_bit_size(128); + + // Check that the decomposition is correct + assert_eq(x, xlo + TWO_POW_128 * xhi); + + // Assert that the decomposition of P is greater than the decomposition of x + assert_gt_limbs((PLO, PHI), (xlo, xhi)); + (xlo, xhi) + } } pub fn assert_gt(a: Field, b: Field) { - // Decompose a and b - let a_limbs = decompose(a); - let b_limbs = decompose(b); + if is_unconstrained() { + assert(compute_lt(b, a, 32)); + } else { + // Decompose a and b + let a_limbs = decompose(a); + let b_limbs = decompose(b); - // Assert that a_limbs is greater than b_limbs - assert_gt_limbs(a_limbs, b_limbs) + // Assert that a_limbs is greater than b_limbs + assert_gt_limbs(a_limbs, b_limbs) + } } pub fn assert_lt(a: Field, b: Field) { @@ -100,14 +114,19 @@ pub fn assert_lt(a: Field, b: Field) { } pub fn gt(a: Field, b: Field) -> bool { - if a == b { - false - } else if lt_unsafe_32(a, b) { - assert_gt(b, a); + if is_unconstrained() { + compute_lt(b, a, 32) + } else if a == b { false - } else { - assert_gt(a, b); - true + } else { + // Take a hint of the comparison and verify it + if lt_32_hint(a, b) { + assert_gt(b, a); + false + } else { + assert_gt(a, b); + true + } } } @@ -117,44 +136,41 @@ pub fn lt(a: Field, b: Field) -> bool { mod tests { // TODO: Allow imports from "super" - use crate::field::bn254::{ - decompose_unsafe, decompose, lt_unsafe_internal, assert_gt, gt, lt, TWO_POW_128, - lte_unsafe_internal, PLO, PHI - }; + use crate::field::bn254::{decompose_hint, decompose, compute_lt, assert_gt, gt, lt, TWO_POW_128, compute_lte, PLO, PHI}; #[test] - fn check_decompose_unsafe() { - assert_eq(decompose_unsafe(TWO_POW_128), (0, 1)); - assert_eq(decompose_unsafe(TWO_POW_128 + 0x1234567890), (0x1234567890, 1)); - assert_eq(decompose_unsafe(0x1234567890), (0x1234567890, 0)); + fn check_decompose() { + assert_eq(decompose(TWO_POW_128), (0, 1)); + assert_eq(decompose(TWO_POW_128 + 0x1234567890), (0x1234567890, 1)); + assert_eq(decompose(0x1234567890), (0x1234567890, 0)); } #[test] - fn check_decompose() { + unconstrained fn check_decompose_unconstrained() { assert_eq(decompose(TWO_POW_128), (0, 1)); assert_eq(decompose(TWO_POW_128 + 0x1234567890), (0x1234567890, 1)); assert_eq(decompose(0x1234567890), (0x1234567890, 0)); } #[test] - fn check_lt_unsafe() { - assert(lt_unsafe_internal(0, 1, 16)); - assert(lt_unsafe_internal(0, 0x100, 16)); - assert(lt_unsafe_internal(0x100, TWO_POW_128 - 1, 16)); - assert(!lt_unsafe_internal(0, TWO_POW_128, 16)); + fn check_compute_lt() { + assert(compute_lt(0, 1, 16)); + assert(compute_lt(0, 0x100, 16)); + assert(compute_lt(0x100, TWO_POW_128 - 1, 16)); + assert(!compute_lt(0, TWO_POW_128, 16)); } #[test] - fn check_lte_unsafe() { - assert(lte_unsafe_internal(0, 1, 16)); - assert(lte_unsafe_internal(0, 0x100, 16)); - assert(lte_unsafe_internal(0x100, TWO_POW_128 - 1, 16)); - assert(!lte_unsafe_internal(0, TWO_POW_128, 16)); - - assert(lte_unsafe_internal(0, 0, 16)); - assert(lte_unsafe_internal(0x100, 0x100, 16)); - assert(lte_unsafe_internal(TWO_POW_128 - 1, TWO_POW_128 - 1, 16)); - assert(lte_unsafe_internal(TWO_POW_128, TWO_POW_128, 16)); + fn check_compute_lte() { + assert(compute_lte(0, 1, 16)); + assert(compute_lte(0, 0x100, 16)); + assert(compute_lte(0x100, TWO_POW_128 - 1, 16)); + assert(!compute_lte(0, TWO_POW_128, 16)); + + assert(compute_lte(0, 0, 16)); + assert(compute_lte(0x100, 0x100, 16)); + assert(compute_lte(TWO_POW_128 - 1, TWO_POW_128 - 1, 16)); + assert(compute_lte(TWO_POW_128, TWO_POW_128, 16)); } #[test] @@ -166,6 +182,15 @@ mod tests { assert_gt(0 - 1, 0); } + #[test] + unconstrained fn check_assert_gt_unconstrained() { + assert_gt(1, 0); + assert_gt(0x100, 0); + assert_gt((0 - 1), (0 - 2)); + assert_gt(TWO_POW_128, 0); + assert_gt(0 - 1, 0); + } + #[test] fn check_gt() { assert(gt(1, 0)); @@ -178,6 +203,18 @@ mod tests { assert(!gt(0 - 2, 0 - 1)); } + #[test] + unconstrained fn check_gt_unconstrained() { + assert(gt(1, 0)); + assert(gt(0x100, 0)); + assert(gt((0 - 1), (0 - 2))); + assert(gt(TWO_POW_128, 0)); + assert(!gt(0, 0)); + assert(!gt(0, 0x100)); + assert(gt(0 - 1, 0 - 2)); + assert(!gt(0 - 2, 0 - 1)); + } + #[test] fn check_plo_phi() { assert_eq(PLO + PHI * TWO_POW_128, 0); diff --git a/noir/noir-repo/noir_stdlib/src/lib.nr b/noir/noir-repo/noir_stdlib/src/lib.nr index f11b21003ad..ad47171fa46 100644 --- a/noir/noir-repo/noir_stdlib/src/lib.nr +++ b/noir/noir-repo/noir_stdlib/src/lib.nr @@ -25,6 +25,7 @@ mod default; mod prelude; mod uint128; mod bigint; +mod runtime; // Oracle calls are required to be wrapped in an unconstrained function // Thus, the only argument to the `println` oracle is expected to always be an ident @@ -70,3 +71,4 @@ pub fn wrapping_mul(x: T, y: T) -> T { #[builtin(as_witness)] pub fn as_witness(x: Field) {} + diff --git a/noir/noir-repo/noir_stdlib/src/runtime.nr b/noir/noir-repo/noir_stdlib/src/runtime.nr new file mode 100644 index 00000000000..c075107cd52 --- /dev/null +++ b/noir/noir-repo/noir_stdlib/src/runtime.nr @@ -0,0 +1,2 @@ +#[builtin(is_unconstrained)] +pub fn is_unconstrained() -> bool {} diff --git a/noir/noir-repo/scripts/install_bb.sh b/noir/noir-repo/scripts/install_bb.sh index 4ee5bbbbe47..c3ed476200a 100755 --- a/noir/noir-repo/scripts/install_bb.sh +++ b/noir/noir-repo/scripts/install_bb.sh @@ -1,9 +1,11 @@ #!/bin/bash -# We use this script just for CI so we assume we're running on x86 linux +VERSION="0.41.0" -mkdir -p $HOME/.barretenberg -curl -o ./barretenberg-aarch64-apple-darwin.tar.gz -L https://github.com/AztecProtocol/aztec-packages/releases/download/aztec-packages-v0.38.0/barretenberg-aarch64-apple-darwin.tar.gz -tar -xvf ./barretenberg-aarch64-apple-darwin.tar.gz -C $HOME/.barretenberg/ -echo 'export PATH=$PATH:$HOME/.barretenberg/' >> ~/.bashrc -source ~/.bashrc +BBUP_PATH=~/.bb/bbup + +if ! [ -f $BBUP_PATH ]; then + curl -L https://raw.githubusercontent.com/AztecProtocol/aztec-packages/master/barretenberg/cpp/installation/install | bash +fi + +$BBUP_PATH -v $VERSION diff --git a/noir/noir-repo/test_programs/execution_success/brillig_slice_input/Nargo.toml b/noir/noir-repo/test_programs/compile_success_empty/brillig_slice_input/Nargo.toml similarity index 100% rename from noir/noir-repo/test_programs/execution_success/brillig_slice_input/Nargo.toml rename to noir/noir-repo/test_programs/compile_success_empty/brillig_slice_input/Nargo.toml diff --git a/noir/noir-repo/test_programs/execution_success/brillig_slice_input/src/main.nr b/noir/noir-repo/test_programs/compile_success_empty/brillig_slice_input/src/main.nr similarity index 100% rename from noir/noir-repo/test_programs/execution_success/brillig_slice_input/src/main.nr rename to noir/noir-repo/test_programs/compile_success_empty/brillig_slice_input/src/main.nr diff --git a/noir/noir-repo/test_programs/execution_success/regression_4383/Nargo.toml b/noir/noir-repo/test_programs/compile_success_empty/regression_4383/Nargo.toml similarity index 100% rename from noir/noir-repo/test_programs/execution_success/regression_4383/Nargo.toml rename to noir/noir-repo/test_programs/compile_success_empty/regression_4383/Nargo.toml diff --git a/noir/noir-repo/test_programs/execution_success/regression_4383/src/main.nr b/noir/noir-repo/test_programs/compile_success_empty/regression_4383/src/main.nr similarity index 100% rename from noir/noir-repo/test_programs/execution_success/regression_4383/src/main.nr rename to noir/noir-repo/test_programs/compile_success_empty/regression_4383/src/main.nr diff --git a/noir/noir-repo/test_programs/execution_success/regression_4436/Nargo.toml b/noir/noir-repo/test_programs/compile_success_empty/regression_4436/Nargo.toml similarity index 100% rename from noir/noir-repo/test_programs/execution_success/regression_4436/Nargo.toml rename to noir/noir-repo/test_programs/compile_success_empty/regression_4436/Nargo.toml diff --git a/noir/noir-repo/test_programs/execution_success/regression_4436/src/main.nr b/noir/noir-repo/test_programs/compile_success_empty/regression_4436/src/main.nr similarity index 100% rename from noir/noir-repo/test_programs/execution_success/regression_4436/src/main.nr rename to noir/noir-repo/test_programs/compile_success_empty/regression_4436/src/main.nr diff --git a/noir/noir-repo/test_programs/execution_success/slice_init_with_complex_type/Nargo.toml b/noir/noir-repo/test_programs/compile_success_empty/slice_init_with_complex_type/Nargo.toml similarity index 100% rename from noir/noir-repo/test_programs/execution_success/slice_init_with_complex_type/Nargo.toml rename to noir/noir-repo/test_programs/compile_success_empty/slice_init_with_complex_type/Nargo.toml diff --git a/noir/noir-repo/test_programs/execution_success/slice_init_with_complex_type/Prover.toml b/noir/noir-repo/test_programs/compile_success_empty/slice_init_with_complex_type/Prover.toml similarity index 100% rename from noir/noir-repo/test_programs/execution_success/slice_init_with_complex_type/Prover.toml rename to noir/noir-repo/test_programs/compile_success_empty/slice_init_with_complex_type/Prover.toml diff --git a/noir/noir-repo/test_programs/execution_success/slice_init_with_complex_type/src/main.nr b/noir/noir-repo/test_programs/compile_success_empty/slice_init_with_complex_type/src/main.nr similarity index 100% rename from noir/noir-repo/test_programs/execution_success/slice_init_with_complex_type/src/main.nr rename to noir/noir-repo/test_programs/compile_success_empty/slice_init_with_complex_type/src/main.nr diff --git a/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/Nargo.toml b/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/Nargo.toml new file mode 100644 index 00000000000..e094862a1dc --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "brillig_block_parameter_liveness" +type = "bin" +authors = [""] + +[dependencies] diff --git a/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/Prover.toml b/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/Prover.toml new file mode 100644 index 00000000000..6b7fd9c6ab6 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/Prover.toml @@ -0,0 +1 @@ +conditions = ["1", "0", "1", "0", "1"] diff --git a/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/src/main.nr b/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/src/main.nr new file mode 100644 index 00000000000..142290ecbe2 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/brillig_block_parameter_liveness/src/main.nr @@ -0,0 +1,91 @@ +// Tests that we run liveness in block parameters by trying to create too many block parameters to fit in the stack + +// Uses up 10 stack items +struct Inner { + a: u64, + b: u64, + c: u64, + d: u64, + e: u64, + f: u64, + g: u64, + h: u64, + i: u64, + j: u64, +} + +// Uses up 50 stack items +struct Middle { + inner_a: Inner, + inner_b: Inner, + inner_c: Inner, + inner_d: Inner, + inner_e: Inner, +} + +// Uses up 500 stack items +struct Outer { + middle_a: Middle, + middle_b: Middle, + middle_c: Middle, + middle_d: Middle, + middle_e: Middle, + middle_f: Middle, + middle_g: Middle, + middle_h: Middle, +} + +// If we don't take into account block parameter liveness, this function will need 5*500=2500 stack items +unconstrained fn main(conditions: [bool; 5]) -> pub Outer { + let out0 = if conditions[0] { + let mut outer: Outer = dep::std::unsafe::zeroed(); + outer.middle_a.inner_a.a = 1; + outer + } else { + let mut outer: Outer= dep::std::unsafe::zeroed(); + outer.middle_f.inner_c.d = 2; + outer + }; + + let out1 = if conditions[1] { + let mut new_outer = out0; + new_outer.middle_a.inner_a.b = 3; + new_outer + } else { + let mut new_outer = out0; + new_outer.middle_f.inner_c.c = 4; + new_outer + }; + + let out2 = if conditions[2] { + let mut new_outer = out1; + new_outer.middle_a.inner_a.c = 5; + new_outer + } else { + let mut new_outer = out1; + new_outer.middle_f.inner_c.b = 6; + new_outer + }; + + let out3 = if conditions[3] { + let mut new_outer = out2; + new_outer.middle_a.inner_a.d = 7; + new_outer + } else { + let mut new_outer = out2; + new_outer.middle_f.inner_c.a = 8; + new_outer + }; + + let out4 = if conditions[4] { + let mut new_outer = out3; + new_outer.middle_a.inner_a.f = 9; + new_outer + } else { + let mut new_outer = out3; + new_outer.middle_f.inner_c.f = 10; + new_outer + }; + + out4 +} diff --git a/noir/noir-repo/test_programs/execution_success/eddsa/src/main.nr b/noir/noir-repo/test_programs/execution_success/eddsa/src/main.nr index 012c8466f2f..ada15d5405c 100644 --- a/noir/noir-repo/test_programs/execution_success/eddsa/src/main.nr +++ b/noir/noir-repo/test_programs/execution_success/eddsa/src/main.nr @@ -2,7 +2,7 @@ use dep::std::compat; use dep::std::ec::consts::te::baby_jubjub; use dep::std::ec::tecurve::affine::Point as TEPoint; use dep::std::hash; -use dep::std::eddsa::{eddsa_to_pub, eddsa_poseidon_verify, eddsa_verify_with_hasher}; +use dep::std::eddsa::{eddsa_to_pub, eddsa_poseidon_verify, eddsa_verify}; use dep::std::hash::poseidon2::Poseidon2Hasher; fn main(msg: pub Field, _priv_key_a: Field, _priv_key_b: Field) { @@ -50,7 +50,6 @@ fn main(msg: pub Field, _priv_key_a: Field, _priv_key_b: Field) { // User A's signature over the message can't be used with another message assert(!eddsa_poseidon_verify(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg + 1)); // Using a different hash should fail - let mut hasher = Poseidon2Hasher::default(); - assert(!eddsa_verify_with_hasher(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg, &mut hasher)); + assert(!eddsa_verify::(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg)); } } diff --git a/noir/noir-repo/test_programs/execution_success/is_unconstrained/Nargo.toml b/noir/noir-repo/test_programs/execution_success/is_unconstrained/Nargo.toml new file mode 100644 index 00000000000..deef68c7f72 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/is_unconstrained/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "is_unconstrained" +type = "bin" +authors = [""] + +[dependencies] diff --git a/noir/noir-repo/test_programs/execution_success/is_unconstrained/Prover.toml b/noir/noir-repo/test_programs/execution_success/is_unconstrained/Prover.toml new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/is_unconstrained/Prover.toml @@ -0,0 +1 @@ + diff --git a/noir/noir-repo/test_programs/execution_success/is_unconstrained/src/main.nr b/noir/noir-repo/test_programs/execution_success/is_unconstrained/src/main.nr new file mode 100644 index 00000000000..af40af1029f --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/is_unconstrained/src/main.nr @@ -0,0 +1,14 @@ +use dep::std::runtime::is_unconstrained; + +fn check(should_be_unconstrained: bool) { + assert_eq(should_be_unconstrained, is_unconstrained()); +} + +unconstrained fn unconstrained_intermediate() { + check(true); +} + +fn main() { + unconstrained_intermediate(); + check(false); +} diff --git a/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/Nargo.toml b/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/Nargo.toml new file mode 100644 index 00000000000..8624cda646b --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "turbofish_call_func_diff_types" +type = "bin" +authors = [""] +compiler_version = ">=0.29.0" + +[dependencies] \ No newline at end of file diff --git a/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/Prover.toml b/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/Prover.toml new file mode 100644 index 00000000000..f28f2f8cc48 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/Prover.toml @@ -0,0 +1,2 @@ +x = "5" +y = "10" diff --git a/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/src/main.nr b/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/src/main.nr new file mode 100644 index 00000000000..b880d3ae047 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/turbofish_call_func_diff_types/src/main.nr @@ -0,0 +1,34 @@ +use dep::std::hash::Hasher; +use dep::std::hash::poseidon2::Poseidon2Hasher; +use dep::std::hash::poseidon::PoseidonHasher; + +fn main(x: Field, y: pub Field) { + let mut hasher = PoseidonHasher::default(); + hasher.write(x); + hasher.write(y); + let poseidon_expected_hash = hasher.finish(); + // Check that we get the same result when using the hasher in a + // method that purely uses trait methods without a supplied implementation. + assert(hash_simple_array::([x, y]) == poseidon_expected_hash); + + // Now let's do the same logic but with a different `Hasher` supplied to the turbofish operator + // We want to make sure that we have correctly monomorphized a function with a trait generic + // where the generic is not used on any function parameters or the return value. + let mut hasher = Poseidon2Hasher::default(); + hasher.write(x); + hasher.write(y); + let poseidon2_expected_hash = hasher.finish(); + assert(hash_simple_array::([x, y]) == poseidon2_expected_hash); +} + +fn hash_simple_array(input: [Field; 2]) -> Field where H: Hasher + Default { + // Check that we can call a trait method instead of a trait implementation + let mut hasher = H::default(); + // Regression that the object is converted to a mutable reference type `&mut _`. + // Otherwise will see `Expected type &mut _, found type H`. + // Then we need to make sure to also auto dereference later in the type checking process + // when searching for a matching impl or else we will get `No matching impl found for `&mut H: Hasher` + hasher.write(input[0]); + hasher.write(input[1]); + hasher.finish() +} diff --git a/noir/noir-repo/test_programs/gates_report.sh b/noir/noir-repo/test_programs/gates_report.sh index 3b0b4d9e148..b33e81b037c 100755 --- a/noir/noir-repo/test_programs/gates_report.sh +++ b/noir/noir-repo/test_programs/gates_report.sh @@ -1,36 +1,37 @@ #!/usr/bin/env bash set -e +BACKEND=${BACKEND:-bb} + # These tests are incompatible with gas reporting excluded_dirs=("workspace" "workspace_default_member" "double_verify_nested_proof") -# These tests cause failures in CI with a stack overflow for some reason. -ci_excluded_dirs=("eddsa") - current_dir=$(pwd) -base_path="$current_dir/execution_success" -test_dirs=$(ls $base_path) +artifacts_path="$current_dir/acir_artifacts" +test_dirs=$(ls $artifacts_path) -# We generate a Noir workspace which contains all of the test cases -# This allows us to generate a gates report using `nargo info` for all of them at once. +echo "{\"programs\": [" > gates_report.json -echo "[workspace]" > Nargo.toml -echo "members = [" >> Nargo.toml +# Bound for checking where to place last parentheses +NUM_ARTIFACTS=$(ls -1q "$artifacts_path" | wc -l) -for dir in $test_dirs; do - if [[ " ${excluded_dirs[@]} " =~ " ${dir} " ]]; then - continue - fi +ITER="1" +for pathname in $test_dirs; do + ARTIFACT_NAME=$(basename "$pathname") + + GATES_INFO=$($BACKEND gates -b "$artifacts_path/$ARTIFACT_NAME/target/program.json") + MAIN_FUNCTION_INFO=$(echo $GATES_INFO | jq -r '.functions[0] | .name = "main"') + echo "{\"package_name\": \"$ARTIFACT_NAME\", \"functions\": [$MAIN_FUNCTION_INFO]" >> gates_report.json - if [[ ${CI-false} = "true" ]] && [[ " ${ci_excluded_dirs[@]} " =~ " ${dir} " ]]; then - continue + if (($ITER == $NUM_ARTIFACTS)); then + echo "}" >> gates_report.json + else + echo "}, " >> gates_report.json fi - echo " \"execution_success/$dir\"," >> Nargo.toml + ITER=$(( $ITER + 1 )) done -echo "]" >> Nargo.toml +echo "]}" >> gates_report.json -nargo info --json > gates_report.json -rm Nargo.toml diff --git a/noir/noir-repo/test_programs/rebuild.sh b/noir/noir-repo/test_programs/rebuild.sh index 4733bad10c3..cfc6479febf 100755 --- a/noir/noir-repo/test_programs/rebuild.sh +++ b/noir/noir-repo/test_programs/rebuild.sh @@ -8,6 +8,14 @@ process_dir() { local current_dir=$2 local dir_name=$(basename "$dir") + if [[ ! -f "$dir/Nargo.toml" ]]; then + # This directory isn't a proper test but just hold some stale build artifacts + # We then delete it and carry on. + rm -rf $dir + return 0 + fi + + if [[ ! -d "$current_dir/acir_artifacts/$dir_name" ]]; then mkdir -p $current_dir/acir_artifacts/$dir_name fi diff --git a/noir/noir-repo/tooling/acvm_cli/src/cli/execute_cmd.rs b/noir/noir-repo/tooling/acvm_cli/src/cli/execute_cmd.rs index 4e36dbd1f22..5f9651c9138 100644 --- a/noir/noir-repo/tooling/acvm_cli/src/cli/execute_cmd.rs +++ b/noir/noir-repo/tooling/acvm_cli/src/cli/execute_cmd.rs @@ -67,13 +67,12 @@ pub(crate) fn execute_program_from_witness( bytecode: &[u8], foreign_call_resolver_url: Option<&str>, ) -> Result { - let blackbox_solver = Bn254BlackBoxSolver::new(); let program: Program = Program::deserialize_program(bytecode) .map_err(|_| CliError::CircuitDeserializationError())?; execute_program( &program, inputs_map, - &blackbox_solver, + &Bn254BlackBoxSolver, &mut DefaultForeignCallExecutor::new(true, foreign_call_resolver_url), ) .map_err(CliError::CircuitExecutionError) diff --git a/noir/noir-repo/tooling/debugger/ignored-tests.txt b/noir/noir-repo/tooling/debugger/ignored-tests.txt index cda26169421..a6d3c9a3a94 100644 --- a/noir/noir-repo/tooling/debugger/ignored-tests.txt +++ b/noir/noir-repo/tooling/debugger/ignored-tests.txt @@ -25,3 +25,4 @@ fold_fibonacci fold_complex_outputs slice_init_with_complex_type hashmap +is_unconstrained \ No newline at end of file diff --git a/noir/noir-repo/tooling/debugger/src/context.rs b/noir/noir-repo/tooling/debugger/src/context.rs index 646beaf0096..a031d127d82 100644 --- a/noir/noir-repo/tooling/debugger/src/context.rs +++ b/noir/noir-repo/tooling/debugger/src/context.rs @@ -34,6 +34,10 @@ pub(super) struct DebugContext<'a, B: BlackBoxFunctionSolver> { breakpoints: HashSet, source_to_opcodes: BTreeMap>, unconstrained_functions: &'a [BrilligBytecode], + + // Absolute (in terms of all the opcodes ACIR+Brillig) addresses of the ACIR + // opcodes with one additional entry for to indicate the last valid address. + acir_opcode_addresses: Vec, } impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { @@ -46,6 +50,7 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { unconstrained_functions: &'a [BrilligBytecode], ) -> Self { let source_to_opcodes = build_source_to_opcode_debug_mappings(debug_artifact); + let acir_opcode_addresses = build_acir_opcode_offsets(circuit, unconstrained_functions); Self { // TODO: need to handle brillig pointer in the debugger acvm: ACVM::new( @@ -61,6 +66,7 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { breakpoints: HashSet::new(), source_to_opcodes, unconstrained_functions, + acir_opcode_addresses, } } @@ -213,111 +219,55 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { .collect() } - fn get_opcodes_sizes(&self) -> Vec { - self.get_opcodes() - .iter() - .map(|opcode| match opcode { - Opcode::BrilligCall { id, .. } => { - self.unconstrained_functions[*id as usize].bytecode.len() - } - _ => 1, - }) - .collect() + /// Returns the absolute address of the opcode at the given location. + /// Absolute here means accounting for nested Brillig opcodes in BrilligCall + /// opcodes. + pub fn opcode_location_to_address(&self, location: &OpcodeLocation) -> usize { + match location { + OpcodeLocation::Acir(acir_index) => self.acir_opcode_addresses[*acir_index], + OpcodeLocation::Brillig { acir_index, brillig_index } => { + self.acir_opcode_addresses[*acir_index] + *brillig_index + } + } } - /// Offsets the given location by the given number of opcodes (including - /// Brillig opcodes). If the offset would move the location outside of a - /// valid circuit location, returns None and the number of remaining - /// opcodes/instructions left which span outside the valid range in the - /// second element of the returned tuple. - pub(super) fn offset_opcode_location( - &self, - location: &Option, - mut offset: i64, - ) -> (Option, i64) { - if offset == 0 { - return (*location, 0); + pub fn address_to_opcode_location(&self, address: usize) -> Option { + if address >= *self.acir_opcode_addresses.last().unwrap_or(&0) { + return None; } - let Some(location) = location else { - return (None, offset); - }; - - let (mut acir_index, mut brillig_index) = match location { - OpcodeLocation::Acir(acir_index) => (*acir_index, 0), - OpcodeLocation::Brillig { acir_index, brillig_index } => (*acir_index, *brillig_index), - }; - let opcode_sizes = self.get_opcodes_sizes(); - if offset > 0 { - while offset > 0 { - let opcode_size = opcode_sizes[acir_index] as i64 - brillig_index as i64; - if offset >= opcode_size { - acir_index += 1; - offset -= opcode_size; - brillig_index = 0; - } else { - brillig_index += offset as usize; - offset = 0; - } - if acir_index >= opcode_sizes.len() { - return (None, offset); - } + let location = match self.acir_opcode_addresses.binary_search(&address) { + Ok(found_index) => OpcodeLocation::Acir(found_index), + Err(insert_index) => { + let acir_index = insert_index - 1; + let base_offset = self.acir_opcode_addresses[acir_index]; + let brillig_index = address - base_offset; + OpcodeLocation::Brillig { acir_index, brillig_index } } - } else { - while offset < 0 { - if brillig_index > 0 { - if brillig_index > (-offset) as usize { - brillig_index -= (-offset) as usize; - offset = 0; - } else { - offset += brillig_index as i64; - brillig_index = 0; - } - } else { - if acir_index == 0 { - return (None, offset); - } - acir_index -= 1; - let opcode_size = opcode_sizes[acir_index] as i64; - if opcode_size <= -offset { - offset += opcode_size; - } else { - brillig_index = (opcode_size + offset) as usize; - offset = 0; - } - } - } - } - if brillig_index > 0 { - (Some(OpcodeLocation::Brillig { acir_index, brillig_index }), 0) - } else { - (Some(OpcodeLocation::Acir(acir_index)), 0) - } + }; + Some(location) } - pub(super) fn render_opcode_at_location(&self, location: &Option) -> String { + pub(super) fn render_opcode_at_location(&self, location: &OpcodeLocation) -> String { let opcodes = self.get_opcodes(); match location { - None => String::from("invalid"), - Some(OpcodeLocation::Acir(acir_index)) => { + OpcodeLocation::Acir(acir_index) => { let opcode = &opcodes[*acir_index]; match opcode { Opcode::BrilligCall { id, .. } => { let first_opcode = &self.unconstrained_functions[*id as usize].bytecode[0]; - format!("BRILLIG CALL {first_opcode:?}") + format!("BRILLIG {first_opcode:?}") } _ => format!("{opcode:?}"), } } - Some(OpcodeLocation::Brillig { acir_index, brillig_index }) => { - match &opcodes[*acir_index] { - Opcode::BrilligCall { id, .. } => { - let bytecode = &self.unconstrained_functions[*id as usize].bytecode; - let opcode = &bytecode[*brillig_index]; - format!(" | {opcode:?}") - } - _ => String::from(" | invalid"), + OpcodeLocation::Brillig { acir_index, brillig_index } => match &opcodes[*acir_index] { + Opcode::BrilligCall { id, .. } => { + let bytecode = &self.unconstrained_functions[*id as usize].bytecode; + let opcode = &bytecode[*brillig_index]; + format!(" | {opcode:?}") } - } + _ => String::from(" | invalid"), + }, } } @@ -640,6 +590,28 @@ fn build_source_to_opcode_debug_mappings( result } +fn build_acir_opcode_offsets( + circuit: &Circuit, + unconstrained_functions: &[BrilligBytecode], +) -> Vec { + let mut result = Vec::with_capacity(circuit.opcodes.len() + 1); + // address of the first opcode is always 0 + result.push(0); + circuit.opcodes.iter().fold(0, |acc, opcode| { + let acc = acc + + match opcode { + Opcode::BrilligCall { id, .. } => { + unconstrained_functions[*id as usize].bytecode.len() + } + _ => 1, + }; + // push the starting address of the next opcode + result.push(acc); + acc + }); + result +} + // TODO: update all debugger tests to use unconstrained brillig pointers #[cfg(test)] mod tests { @@ -851,7 +823,7 @@ mod tests { } #[test] - fn test_offset_opcode_location() { + fn test_address_opcode_location_mapping() { let brillig_bytecode = BrilligBytecode { bytecode: vec![ BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }, @@ -883,85 +855,48 @@ mod tests { brillig_funcs, ); - assert_eq!(context.offset_opcode_location(&None, 0), (None, 0)); - assert_eq!(context.offset_opcode_location(&None, 2), (None, 2)); - assert_eq!(context.offset_opcode_location(&None, -2), (None, -2)); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 0), - (Some(OpcodeLocation::Acir(0)), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 1), - (Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 1 }), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 2), - (Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 2 }), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 3), - (Some(OpcodeLocation::Acir(1)), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 4), - (Some(OpcodeLocation::Acir(2)), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 5), - (Some(OpcodeLocation::Brillig { acir_index: 2, brillig_index: 1 }), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 7), - (Some(OpcodeLocation::Acir(3)), 0) - ); - assert_eq!(context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 8), (None, 0)); - assert_eq!(context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), 20), (None, 12)); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(1)), 2), - (Some(OpcodeLocation::Brillig { acir_index: 2, brillig_index: 1 }), 0) - ); - assert_eq!(context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), -1), (None, -1)); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(0)), -10), - (None, -10) - ); + let locations = + (0..=7).map(|address| context.address_to_opcode_location(address)).collect::>(); + // mapping from addresses to opcode locations assert_eq!( - context.offset_opcode_location( - &Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 1 }), - -1 - ), - (Some(OpcodeLocation::Acir(0)), 0) - ); - assert_eq!( - context.offset_opcode_location( - &Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 2 }), - -2 - ), - (Some(OpcodeLocation::Acir(0)), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(1)), -3), - (Some(OpcodeLocation::Acir(0)), 0) - ); - assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(2)), -4), - (Some(OpcodeLocation::Acir(0)), 0) - ); - assert_eq!( - context.offset_opcode_location( - &Some(OpcodeLocation::Brillig { acir_index: 2, brillig_index: 1 }), - -5 - ), - (Some(OpcodeLocation::Acir(0)), 0) + locations, + vec![ + Some(OpcodeLocation::Acir(0)), + Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 1 }), + Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 2 }), + Some(OpcodeLocation::Acir(1)), + Some(OpcodeLocation::Acir(2)), + Some(OpcodeLocation::Brillig { acir_index: 2, brillig_index: 1 }), + Some(OpcodeLocation::Brillig { acir_index: 2, brillig_index: 2 }), + Some(OpcodeLocation::Acir(3)), + ] ); + + let addresses = locations + .iter() + .flatten() + .map(|location| context.opcode_location_to_address(location)) + .collect::>(); + + // and vice-versa + assert_eq!(addresses, (0..=7).collect::>()); + + // check edge cases + assert_eq!(None, context.address_to_opcode_location(8)); assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(3)), -7), - (Some(OpcodeLocation::Acir(0)), 0) + 0, + context.opcode_location_to_address(&OpcodeLocation::Brillig { + acir_index: 0, + brillig_index: 0 + }) ); assert_eq!( - context.offset_opcode_location(&Some(OpcodeLocation::Acir(2)), -2), - (Some(OpcodeLocation::Brillig { acir_index: 0, brillig_index: 2 }), 0) + 4, + context.opcode_location_to_address(&OpcodeLocation::Brillig { + acir_index: 2, + brillig_index: 0 + }) ); } } diff --git a/noir/noir-repo/tooling/debugger/src/dap.rs b/noir/noir-repo/tooling/debugger/src/dap.rs index 060945132f5..c9b6b816a7e 100644 --- a/noir/noir-repo/tooling/debugger/src/dap.rs +++ b/noir/noir-repo/tooling/debugger/src/dap.rs @@ -1,6 +1,5 @@ use std::collections::BTreeMap; use std::io::{Read, Write}; -use std::str::FromStr; use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::acir::circuit::{Circuit, OpcodeLocation}; @@ -205,6 +204,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { Some(frame) => format!("{} {}", frame.function_name, index), None => format!("frame #{index}"), }; + let address = self.context.opcode_location_to_address(opcode_location); StackFrame { id: index as i64, @@ -218,7 +218,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { }), line: line_number as i64, column: column_number as i64, - instruction_pointer_reference: Some(opcode_location.to_string()), + instruction_pointer_reference: Some(address.to_string()), ..StackFrame::default() } }) @@ -240,50 +240,41 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { let Command::Disassemble(ref args) = req.command else { unreachable!("handle_disassemble called on a non disassemble request"); }; - let starting_ip = OpcodeLocation::from_str(args.memory_reference.as_str()).ok(); + + // we assume memory references are unsigned integers + let starting_address = args.memory_reference.parse::().unwrap_or(0); let instruction_offset = args.instruction_offset.unwrap_or(0); - let (mut opcode_location, mut invalid_count) = - self.context.offset_opcode_location(&starting_ip, instruction_offset); + + let mut address = starting_address + instruction_offset; let mut count = args.instruction_count; let mut instructions: Vec = vec![]; - // leading invalid locations (when the request goes back - // beyond the start of the program) - if invalid_count < 0 { - while invalid_count < 0 { + while count > 0 { + let opcode_location = if address >= 0 { + self.context.address_to_opcode_location(address as usize) + } else { + None + }; + + if let Some(opcode_location) = opcode_location { instructions.push(DisassembledInstruction { - address: String::from("---"), - instruction: String::from("---"), + address: address.to_string(), + // we'll use the instruction_bytes field to render the OpcodeLocation + instruction_bytes: Some(opcode_location.to_string()), + instruction: self.context.render_opcode_at_location(&opcode_location), + ..DisassembledInstruction::default() + }); + } else { + // entry for invalid location to fill up the request + instructions.push(DisassembledInstruction { + address: "---".to_owned(), + instruction: "---".to_owned(), ..DisassembledInstruction::default() }); - invalid_count += 1; - count -= 1; - } - if count > 0 { - opcode_location = Some(OpcodeLocation::Acir(0)); } - } - // the actual opcodes - while count > 0 && opcode_location.is_some() { - instructions.push(DisassembledInstruction { - address: format!("{}", opcode_location.unwrap()), - instruction: self.context.render_opcode_at_location(&opcode_location), - ..DisassembledInstruction::default() - }); - (opcode_location, _) = self.context.offset_opcode_location(&opcode_location, 1); - count -= 1; - } - // any remaining instruction count is beyond the valid opcode - // vector so return invalid placeholders - while count > 0 { - instructions.push(DisassembledInstruction { - address: String::from("---"), - instruction: String::from("---"), - ..DisassembledInstruction::default() - }); - invalid_count -= 1; count -= 1; + address += 1; } self.server.respond( @@ -418,25 +409,35 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { .breakpoints .iter() .map(|breakpoint| { - let Ok(location) = - OpcodeLocation::from_str(breakpoint.instruction_reference.as_str()) - else { + let offset = breakpoint.offset.unwrap_or(0); + let address = breakpoint.instruction_reference.parse::().unwrap_or(0) + offset; + let Ok(address): Result = address.try_into() else { return Breakpoint { verified: false, - message: Some(String::from("Missing instruction reference")), + message: Some(String::from("Invalid instruction reference/offset")), ..Breakpoint::default() }; }; - if !self.context.is_valid_opcode_location(&location) { + let Some(location) = self + .context + .address_to_opcode_location(address) + .filter(|location| self.context.is_valid_opcode_location(location)) + else { return Breakpoint { verified: false, message: Some(String::from("Invalid opcode location")), ..Breakpoint::default() }; - } + }; let id = self.get_next_breakpoint_id(); breakpoints_to_set.push((location, id)); - Breakpoint { id: Some(id), verified: true, ..Breakpoint::default() } + Breakpoint { + id: Some(id), + verified: true, + offset: Some(0), + instruction_reference: Some(address.to_string()), + ..Breakpoint::default() + } }) .collect(); @@ -496,15 +497,17 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { ..Breakpoint::default() }; } - let instruction_reference = format!("{}", location); + let breakpoint_address = self.context.opcode_location_to_address(&location); + let instruction_reference = format!("{}", breakpoint_address); let breakpoint_id = self.get_next_breakpoint_id(); breakpoints_to_set.push((location, breakpoint_id)); Breakpoint { id: Some(breakpoint_id), verified: true, source: Some(args.source.clone()), - instruction_reference: Some(instruction_reference), line: Some(line), + instruction_reference: Some(instruction_reference), + offset: Some(0), ..Breakpoint::default() } }) diff --git a/noir/noir-repo/tooling/nargo_cli/src/cli/dap_cmd.rs b/noir/noir-repo/tooling/nargo_cli/src/cli/dap_cmd.rs index 124e30069ae..eded2bfd8d2 100644 --- a/noir/noir-repo/tooling/nargo_cli/src/cli/dap_cmd.rs +++ b/noir/noir-repo/tooling/nargo_cli/src/cli/dap_cmd.rs @@ -1,5 +1,6 @@ use acvm::acir::circuit::ExpressionWidth; use acvm::acir::native_types::WitnessMap; +use bn254_blackbox_solver::Bn254BlackBoxSolver; use clap::Args; use nargo::constants::PROVER_INPUT_FILE; use nargo::workspace::Workspace; @@ -193,11 +194,9 @@ fn loop_uninitialized_dap( Ok((compiled_program, initial_witness)) => { server.respond(req.ack()?)?; - let blackbox_solver = bn254_blackbox_solver::Bn254BlackBoxSolver::new(); - noir_debugger::run_dap_loop( server, - &blackbox_solver, + &Bn254BlackBoxSolver, compiled_program, initial_witness, )?; diff --git a/noir/noir-repo/tooling/nargo_cli/src/cli/debug_cmd.rs b/noir/noir-repo/tooling/nargo_cli/src/cli/debug_cmd.rs index f950cd0405c..7865b608261 100644 --- a/noir/noir-repo/tooling/nargo_cli/src/cli/debug_cmd.rs +++ b/noir/noir-repo/tooling/nargo_cli/src/cli/debug_cmd.rs @@ -219,8 +219,6 @@ pub(crate) fn debug_program( compiled_program: &CompiledProgram, inputs_map: &InputMap, ) -> Result, CliError> { - let blackbox_solver = Bn254BlackBoxSolver::new(); - let initial_witness = compiled_program.abi.encode(inputs_map, None)?; let debug_artifact = DebugArtifact { @@ -230,7 +228,7 @@ pub(crate) fn debug_program( }; noir_debugger::debug_circuit( - &blackbox_solver, + &Bn254BlackBoxSolver, &compiled_program.program.functions[0], debug_artifact, initial_witness, diff --git a/noir/noir-repo/tooling/nargo_cli/src/cli/execute_cmd.rs b/noir/noir-repo/tooling/nargo_cli/src/cli/execute_cmd.rs index 862a46884ef..3fcedbb8f54 100644 --- a/noir/noir-repo/tooling/nargo_cli/src/cli/execute_cmd.rs +++ b/noir/noir-repo/tooling/nargo_cli/src/cli/execute_cmd.rs @@ -110,14 +110,12 @@ pub(crate) fn execute_program( inputs_map: &InputMap, foreign_call_resolver_url: Option<&str>, ) -> Result { - let blackbox_solver = Bn254BlackBoxSolver::new(); - let initial_witness = compiled_program.abi.encode(inputs_map, None)?; let solved_witness_stack_err = nargo::ops::execute_program( &compiled_program.program, initial_witness, - &blackbox_solver, + &Bn254BlackBoxSolver, &mut DefaultForeignCallExecutor::new(true, foreign_call_resolver_url), ); match solved_witness_stack_err { diff --git a/noir/noir-repo/tooling/nargo_cli/src/cli/info_cmd.rs b/noir/noir-repo/tooling/nargo_cli/src/cli/info_cmd.rs index 11cf6e22ab5..7c50e907dc9 100644 --- a/noir/noir-repo/tooling/nargo_cli/src/cli/info_cmd.rs +++ b/noir/noir-repo/tooling/nargo_cli/src/cli/info_cmd.rs @@ -94,7 +94,7 @@ pub(crate) fn run(args: InfoCommand, config: NargoConfig) -> Result<(), CliError }) .collect(); - let info_report = InfoReport { programs: program_info, contracts: Vec::new() }; + let info_report = InfoReport { programs: program_info }; if args.json { // Expose machine-readable JSON data. @@ -102,7 +102,8 @@ pub(crate) fn run(args: InfoCommand, config: NargoConfig) -> Result<(), CliError } else { // Otherwise print human-readable table. if !info_report.programs.is_empty() { - let mut program_table = table!([Fm->"Package", Fm->"Function", Fm->"Expression Width", Fm->"ACIR Opcodes", Fm->"Backend Circuit Size"]); + let mut program_table = + table!([Fm->"Package", Fm->"Function", Fm->"Expression Width", Fm->"ACIR Opcodes"]); for program_info in info_report.programs { let program_rows: Vec = program_info.into(); @@ -169,7 +170,6 @@ fn byte_index(string: &str, index: u32) -> usize { #[derive(Debug, Default, Serialize)] struct InfoReport { programs: Vec, - contracts: Vec, } #[derive(Debug, Serialize)] diff --git a/noir/noir-repo/tooling/nargo_cli/src/cli/lsp_cmd.rs b/noir/noir-repo/tooling/nargo_cli/src/cli/lsp_cmd.rs index 45ac02ea552..9ff7a42e5f5 100644 --- a/noir/noir-repo/tooling/nargo_cli/src/cli/lsp_cmd.rs +++ b/noir/noir-repo/tooling/nargo_cli/src/cli/lsp_cmd.rs @@ -25,8 +25,7 @@ pub(crate) fn run(_args: LspCommand, _config: NargoConfig) -> Result<(), CliErro runtime.block_on(async { let (server, _) = async_lsp::MainLoop::new_server(|client| { - let blackbox_solver = Bn254BlackBoxSolver::new(); - let router = NargoLspService::new(&client, blackbox_solver); + let router = NargoLspService::new(&client, Bn254BlackBoxSolver); ServiceBuilder::new() .layer(TracingLayer::default()) diff --git a/noir/noir-repo/tooling/nargo_cli/tests/hello_world.rs b/noir/noir-repo/tooling/nargo_cli/tests/hello_world.rs index 9fcb0c873e1..6b6931542b5 100644 --- a/noir/noir-repo/tooling/nargo_cli/tests/hello_world.rs +++ b/noir/noir-repo/tooling/nargo_cli/tests/hello_world.rs @@ -34,22 +34,11 @@ fn hello_world_example() { .stdout(predicate::str::contains("Constraint system successfully built!")); project_dir.child("Prover.toml").assert(predicate::path::is_file()); - project_dir.child("Verifier.toml").assert(predicate::path::is_file()); - // `nargo prove` + // `nargo execute` project_dir.child("Prover.toml").write_str("x = 1\ny = 2").unwrap(); let mut cmd = Command::cargo_bin("nargo").unwrap(); - cmd.arg("prove"); - cmd.assert().success(); - - project_dir - .child("proofs") - .child(format!("{project_name}.proof")) - .assert(predicate::path::is_file()); - - // `nargo verify p` - let mut cmd = Command::cargo_bin("nargo").unwrap(); - cmd.arg("verify"); + cmd.arg("execute"); cmd.assert().success(); } diff --git a/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json b/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json index 1a26367e3c0..10fd14a0090 100644 --- a/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json +++ b/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json @@ -42,7 +42,7 @@ "lint": "NODE_NO_WARNINGS=1 eslint . --ext .ts --ignore-path ./.eslintignore --max-warnings 0" }, "dependencies": { - "@aztec/bb.js": "portal:../../../../barretenberg/ts", + "@aztec/bb.js": "0.41.0", "@noir-lang/types": "workspace:*", "fflate": "^0.8.0" }, diff --git a/noir/noir-repo/yarn.lock b/noir/noir-repo/yarn.lock index b45678f5d8b..8fb574afa30 100644 --- a/noir/noir-repo/yarn.lock +++ b/noir/noir-repo/yarn.lock @@ -221,18 +221,19 @@ __metadata: languageName: node linkType: hard -"@aztec/bb.js@portal:../../../../barretenberg/ts::locator=%40noir-lang%2Fbackend_barretenberg%40workspace%3Atooling%2Fnoir_js_backend_barretenberg": - version: 0.0.0-use.local - resolution: "@aztec/bb.js@portal:../../../../barretenberg/ts::locator=%40noir-lang%2Fbackend_barretenberg%40workspace%3Atooling%2Fnoir_js_backend_barretenberg" +"@aztec/bb.js@npm:0.41.0": + version: 0.41.0 + resolution: "@aztec/bb.js@npm:0.41.0" dependencies: comlink: ^4.4.1 commander: ^10.0.1 debug: ^4.3.4 tslib: ^2.4.0 bin: - bb.js: ./dest/node/main.js + bb.js: dest/node/main.js + checksum: e5e0095eaff3de45726366726337b131bb6ff7cf2cb53be705572c7d6715dae4c948bf86a03cfad68bc98c0c2d83e64cbe3723cc72260c8dbfa262af8cb81f9b languageName: node - linkType: soft + linkType: hard "@babel/code-frame@npm:^7.0.0, @babel/code-frame@npm:^7.10.4, @babel/code-frame@npm:^7.12.11, @babel/code-frame@npm:^7.16.0, @babel/code-frame@npm:^7.22.13, @babel/code-frame@npm:^7.23.5, @babel/code-frame@npm:^7.8.3": version: 7.23.5 @@ -4395,7 +4396,7 @@ __metadata: version: 0.0.0-use.local resolution: "@noir-lang/backend_barretenberg@workspace:tooling/noir_js_backend_barretenberg" dependencies: - "@aztec/bb.js": "portal:../../../../barretenberg/ts" + "@aztec/bb.js": 0.41.0 "@noir-lang/types": "workspace:*" "@types/node": ^20.6.2 "@types/prettier": ^3