From 72e8de0656c4789f57ff1d3ddecc8901df627aab Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:43:08 +0000 Subject: [PATCH 001/109] fix: perform arithmetic simplification through `CheckedCast` (#6502) --- .../src/hir_def/types/arithmetic.rs | 108 +++++++++++++----- .../src/tests/arithmetic_generics.rs | 30 +++++ cspell.json | 2 + 3 files changed, 114 insertions(+), 26 deletions(-) diff --git a/compiler/noirc_frontend/src/hir_def/types/arithmetic.rs b/compiler/noirc_frontend/src/hir_def/types/arithmetic.rs index f4646c180a3..8cdf6f5502c 100644 --- a/compiler/noirc_frontend/src/hir_def/types/arithmetic.rs +++ b/compiler/noirc_frontend/src/hir_def/types/arithmetic.rs @@ -189,20 +189,27 @@ impl Type { op: BinaryTypeOperator, rhs: &Type, ) -> Option { - let Type::InfixExpr(l_lhs, l_op, l_rhs) = lhs.follow_bindings() else { - return None; - }; + match lhs.follow_bindings() { + Type::CheckedCast { from, to } => { + // Apply operation directly to `from` while attempting simplification to `to`. + let from = Type::InfixExpr(from, op, Box::new(rhs.clone())); + let to = Self::try_simplify_non_constants_in_lhs(&to, op, rhs)?; + Some(Type::CheckedCast { from: Box::new(from), to: Box::new(to) }) + } + Type::InfixExpr(l_lhs, l_op, l_rhs) => { + // Note that this is exact, syntactic equality, not unification. + // `rhs` is expected to already be in canonical form. + if l_op.approx_inverse() != Some(op) + || l_op == BinaryTypeOperator::Division + || l_rhs.canonicalize_unchecked() != *rhs + { + return None; + } - // Note that this is exact, syntactic equality, not unification. - // `rhs` is expected to already be in canonical form. - if l_op.approx_inverse() != Some(op) - || l_op == BinaryTypeOperator::Division - || l_rhs.canonicalize_unchecked() != *rhs - { - return None; + Some(*l_lhs) + } + _ => None, } - - Some(*l_lhs) } /// Try to simplify non-constant expressions in the form `N op1 (M op1 N)` @@ -219,23 +226,31 @@ impl Type { op: BinaryTypeOperator, rhs: &Type, ) -> Option { - let Type::InfixExpr(r_lhs, r_op, r_rhs) = rhs.follow_bindings() else { - return None; - }; + match rhs.follow_bindings() { + Type::CheckedCast { from, to } => { + // Apply operation directly to `from` while attempting simplification to `to`. + let from = Type::InfixExpr(Box::new(lhs.clone()), op, from); + let to = Self::try_simplify_non_constants_in_rhs(lhs, op, &to)?; + Some(Type::CheckedCast { from: Box::new(from), to: Box::new(to) }) + } + Type::InfixExpr(r_lhs, r_op, r_rhs) => { + // `N / (M * N)` should be simplified to `1 / M`, but we only handle + // simplifying to `M` in this function. + if op == BinaryTypeOperator::Division && r_op == BinaryTypeOperator::Multiplication + { + return None; + } - // `N / (M * N)` should be simplified to `1 / M`, but we only handle - // simplifying to `M` in this function. - if op == BinaryTypeOperator::Division && r_op == BinaryTypeOperator::Multiplication { - return None; - } + // Note that this is exact, syntactic equality, not unification. + // `lhs` is expected to already be in canonical form. + if r_op.inverse() != Some(op) || *lhs != r_rhs.canonicalize_unchecked() { + return None; + } - // Note that this is exact, syntactic equality, not unification. - // `lhs` is expected to already be in canonical form. - if r_op.inverse() != Some(op) || *lhs != r_rhs.canonicalize_unchecked() { - return None; + Some(*r_lhs) + } + _ => None, } - - Some(*r_lhs) } /// Given: @@ -360,6 +375,47 @@ mod tests { use crate::hir_def::types::{BinaryTypeOperator, Kind, Type, TypeVariable, TypeVariableId}; + #[test] + fn solves_n_minus_one_plus_one_through_checked_casts() { + // We want to test that the inclusion of a `CheckedCast` won't prevent us from canonicalizing + // the expression `(N - 1) + 1` to `N` if there exists a `CheckedCast` on the `N - 1` term. + + let n = Type::NamedGeneric( + TypeVariable::unbound(TypeVariableId(0), Kind::u32()), + std::rc::Rc::new("N".to_owned()), + ); + let n_minus_one = Type::InfixExpr( + Box::new(n.clone()), + BinaryTypeOperator::Subtraction, + Box::new(Type::Constant(FieldElement::one(), Kind::u32())), + ); + let checked_cast_n_minus_one = + Type::CheckedCast { from: Box::new(n_minus_one.clone()), to: Box::new(n_minus_one) }; + + let n_minus_one_plus_one = Type::InfixExpr( + Box::new(checked_cast_n_minus_one.clone()), + BinaryTypeOperator::Addition, + Box::new(Type::Constant(FieldElement::one(), Kind::u32())), + ); + + let canonicalized_typ = n_minus_one_plus_one.canonicalize(); + + assert_eq!(n, canonicalized_typ); + + // We also want to check that if the `CheckedCast` is on the RHS then we'll still be able to canonicalize + // the expression `1 + (N - 1)` to `N`. + + let one_plus_n_minus_one = Type::InfixExpr( + Box::new(Type::Constant(FieldElement::one(), Kind::u32())), + BinaryTypeOperator::Addition, + Box::new(checked_cast_n_minus_one), + ); + + let canonicalized_typ = one_plus_n_minus_one.canonicalize(); + + assert_eq!(n, canonicalized_typ); + } + #[test] fn instantiate_after_canonicalize_smoke_test() { let field_element_kind = Kind::numeric(Type::FieldElement); diff --git a/compiler/noirc_frontend/src/tests/arithmetic_generics.rs b/compiler/noirc_frontend/src/tests/arithmetic_generics.rs index 3fa41b80279..3328fe439ae 100644 --- a/compiler/noirc_frontend/src/tests/arithmetic_generics.rs +++ b/compiler/noirc_frontend/src/tests/arithmetic_generics.rs @@ -27,6 +27,36 @@ fn arithmetic_generics_canonicalization_deduplication_regression() { assert_eq!(errors.len(), 0); } +#[test] +fn checked_casts_do_not_prevent_canonicalization() { + // Regression test for https://github.com/noir-lang/noir/issues/6495 + let source = r#" + pub trait Serialize { + fn serialize(self) -> [Field; N]; + } + + pub struct Counted { + pub inner: T, + } + + pub fn append(array1: [T; N]) -> [T; N + 1] { + [array1[0]; N + 1] + } + + impl Serialize for Counted + where + T: Serialize, + { + fn serialize(self) -> [Field; N] { + append(self.inner.serialize()) + } + } + "#; + let errors = get_program_errors(source); + println!("{:?}", errors); + assert_eq!(errors.len(), 0); +} + #[test] fn arithmetic_generics_checked_cast_zeros() { let source = r#" diff --git a/cspell.json b/cspell.json index aa0f749bd9c..d8315c35910 100644 --- a/cspell.json +++ b/cspell.json @@ -38,8 +38,10 @@ "callsites", "callstack", "callstacks", + "canonicalization", "canonicalize", "canonicalized", + "canonicalizing", "castable", "catmcgee", "Celo", From 34ad6669b210173ddf0484b04e47161b2cfbcadf Mon Sep 17 00:00:00 2001 From: Ary Borenszweig Date: Tue, 12 Nov 2024 16:49:46 -0300 Subject: [PATCH 002/109] fix: parse Slice type in SSa (#6507) --- .../src/ssa/opt/as_slice_length.rs | 31 ++++++++++++++++++ compiler/noirc_evaluator/src/ssa/parser.rs | 32 +++++++++++-------- .../noirc_evaluator/src/ssa/parser/lexer.rs | 2 +- .../noirc_evaluator/src/ssa/parser/tests.rs | 12 +++++++ 4 files changed, 62 insertions(+), 15 deletions(-) diff --git a/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs b/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs index 59917e8589b..76705dcc9db 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/as_slice_length.rs @@ -75,3 +75,34 @@ fn replace_known_slice_lengths( func.dfg.set_value_from_id(original_slice_length, known_length); }); } + +#[cfg(test)] +mod test { + use crate::ssa::opt::assert_normalized_ssa_equals; + + use super::Ssa; + + #[test] + fn as_slice_length_optimization() { + // In this code we expect `return v2` to be replaced with `return u32 3` because + // that's the length of the v0 array. + let src = " + acir(inline) fn main f0 { + b0(v0: [Field; 3]): + v2, v3 = call as_slice(v0) -> (u32, [Field]) + return v2 + } + "; + let ssa = Ssa::from_str(src).unwrap(); + + let expected = " + acir(inline) fn main f0 { + b0(v0: [Field; 3]): + v2, v3 = call as_slice(v0) -> (u32, [Field]) + return u32 3 + } + "; + let ssa = ssa.as_slice_optimization(); + assert_normalized_ssa_equals(ssa, expected); + } +} diff --git a/compiler/noirc_evaluator/src/ssa/parser.rs b/compiler/noirc_evaluator/src/ssa/parser.rs index bfdb08f85e6..717d2691b2f 100644 --- a/compiler/noirc_evaluator/src/ssa/parser.rs +++ b/compiler/noirc_evaluator/src/ssa/parser.rs @@ -81,15 +81,15 @@ impl Debug for SsaErrorWithSource { pub(crate) enum SsaError { #[error("{0}")] ParserError(ParserError), - #[error("Unknown variable `{0}`")] + #[error("Unknown variable '{0}'")] UnknownVariable(Identifier), - #[error("Unknown block `{0}`")] + #[error("Unknown block '{0}'")] UnknownBlock(Identifier), - #[error("Unknown function `{0}`")] + #[error("Unknown function '{0}'")] UnknownFunction(Identifier), #[error("Mismatched return values")] MismatchedReturnValues { returns: Vec, expected: usize }, - #[error("Variable `{0}` already defined")] + #[error("Variable '{0}' already defined")] VariableAlreadyDefined(Identifier), } @@ -647,10 +647,14 @@ impl<'a> Parser<'a> { if self.eat(Token::LeftBracket)? { let element_types = self.parse_types()?; - self.eat_or_error(Token::Semicolon)?; - let length = self.eat_int_or_error()?; - self.eat_or_error(Token::RightBracket)?; - return Ok(Type::Array(Arc::new(element_types), length.to_u128() as usize)); + if self.eat(Token::Semicolon)? { + let length = self.eat_int_or_error()?; + self.eat_or_error(Token::RightBracket)?; + return Ok(Type::Array(Arc::new(element_types), length.to_u128() as usize)); + } else { + self.eat_or_error(Token::RightBracket)?; + return Ok(Type::Slice(Arc::new(element_types))); + } } if let Some(typ) = self.parse_mutable_reference_type()? { @@ -857,19 +861,19 @@ impl<'a> Parser<'a> { pub(crate) enum ParserError { #[error("{0}")] LexerError(LexerError), - #[error("Expected {token}, found {token}")] + #[error("Expected '{token}', found '{found}'")] ExpectedToken { token: Token, found: Token, span: Span }, #[error("Expected one of {tokens:?}, found {found}")] ExpectedOneOfTokens { tokens: Vec, found: Token, span: Span }, - #[error("Expected an identifier, found {found}")] + #[error("Expected an identifier, found '{found}'")] ExpectedIdentifier { found: Token, span: Span }, - #[error("Expected an int, found {found}")] + #[error("Expected an int, found '{found}'")] ExpectedInt { found: Token, span: Span }, - #[error("Expected a type, found {found}")] + #[error("Expected a type, found '{found}'")] ExpectedType { found: Token, span: Span }, - #[error("Expected an instruction or terminator, found {found}")] + #[error("Expected an instruction or terminator, found '{found}'")] ExpectedInstructionOrTerminator { found: Token, span: Span }, - #[error("Expected a value, found {found}")] + #[error("Expected a value, found '{found}'")] ExpectedValue { found: Token, span: Span }, #[error("Multiple return values only allowed for call")] MultipleReturnValuesOnlyAllowedForCall { second_target: Identifier }, diff --git a/compiler/noirc_evaluator/src/ssa/parser/lexer.rs b/compiler/noirc_evaluator/src/ssa/parser/lexer.rs index 456e5ec15e0..ac4c3b77205 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/lexer.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/lexer.rs @@ -240,7 +240,7 @@ type SpannedTokenResult = Result; #[derive(Debug, Error)] pub(crate) enum LexerError { - #[error("Unexpected character: {char}")] + #[error("Unexpected character: {char:?}")] UnexpectedCharacter { char: char, span: Span }, #[error("Invalid integer literal")] InvalidIntegerLiteral { span: Span, found: String }, diff --git a/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/compiler/noirc_evaluator/src/ssa/parser/tests.rs index 1ec0161649b..3ed6be57b5e 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -413,3 +413,15 @@ fn test_parses_with_comments() { let ssa = Ssa::from_str(src).unwrap(); assert_normalized_ssa_equals(ssa, expected); } + +#[test] +fn test_slice() { + let src = " + acir(inline) fn main f0 { + b0(v0: [Field; 3]): + v2, v3 = call as_slice(v0) -> (u32, [Field]) + return + } + "; + assert_ssa_roundtrip(src); +} From 1df8c456d6d256f120d6df6ae3e6735cb7eb7dae Mon Sep 17 00:00:00 2001 From: Ary Borenszweig Date: Tue, 12 Nov 2024 18:26:55 -0300 Subject: [PATCH 003/109] fix: set local_module before elaborating each trait (#6506) --- .../noirc_frontend/src/elaborator/traits.rs | 2 ++ .../noirc_frontend/src/tests/unused_items.rs | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/compiler/noirc_frontend/src/elaborator/traits.rs b/compiler/noirc_frontend/src/elaborator/traits.rs index ae278616e03..e1be45927ca 100644 --- a/compiler/noirc_frontend/src/elaborator/traits.rs +++ b/compiler/noirc_frontend/src/elaborator/traits.rs @@ -23,6 +23,8 @@ use super::Elaborator; impl<'context> Elaborator<'context> { pub fn collect_traits(&mut self, traits: &BTreeMap) { for (trait_id, unresolved_trait) in traits { + self.local_module = unresolved_trait.module_id; + self.recover_generics(|this| { this.current_trait = Some(*trait_id); diff --git a/compiler/noirc_frontend/src/tests/unused_items.rs b/compiler/noirc_frontend/src/tests/unused_items.rs index 86f77fc397a..5f9fc887b27 100644 --- a/compiler/noirc_frontend/src/tests/unused_items.rs +++ b/compiler/noirc_frontend/src/tests/unused_items.rs @@ -294,3 +294,23 @@ fn no_warning_on_self_in_trait_impl() { "#; assert_no_errors(src); } + +#[test] +fn resolves_trait_where_clause_in_the_correct_module() { + // This is a regression test for https://github.com/noir-lang/noir/issues/6479 + let src = r#" + mod foo { + pub trait Foo {} + } + + use foo::Foo; + + pub trait Bar + where + T: Foo, + {} + + fn main() {} + "#; + assert_no_errors(src); +} From cfc22cb0ca133fce49a25c3f055f5a6b8bd9b58e Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Wed, 13 Nov 2024 16:00:37 +0000 Subject: [PATCH 004/109] fix(tests): Use a file lock as well as a mutex to isolate tests cases (#6508) --- Cargo.lock | 11 +++++++++++ tooling/nargo_cli/Cargo.toml | 1 + tooling/nargo_cli/build.rs | 21 +++++++++++++++++---- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35ff97f55e3..15ad7806a17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1510,6 +1510,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "file-lock" +version = "2.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "040b48f80a749da50292d0f47a1e2d5bf1d772f52836c07f64bfccc62ba6e664" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "filetime" version = "0.2.25" @@ -2777,6 +2787,7 @@ dependencies = [ "criterion", "dap", "dirs", + "file-lock", "fm", "iai", "iter-extended", diff --git a/tooling/nargo_cli/Cargo.toml b/tooling/nargo_cli/Cargo.toml index 317706bb237..02e669f5c68 100644 --- a/tooling/nargo_cli/Cargo.toml +++ b/tooling/nargo_cli/Cargo.toml @@ -78,6 +78,7 @@ dirs.workspace = true assert_cmd = "2.0.8" assert_fs = "1.0.10" predicates = "2.1.5" +file-lock = "2.1.11" fm.workspace = true criterion.workspace = true pprof.workspace = true diff --git a/tooling/nargo_cli/build.rs b/tooling/nargo_cli/build.rs index 984419fe388..a3b718b157d 100644 --- a/tooling/nargo_cli/build.rs +++ b/tooling/nargo_cli/build.rs @@ -168,7 +168,11 @@ fn generate_test_cases( } let test_cases = test_cases.join("\n"); - // Use a common mutex for all test cases. + // We need to isolate test cases in the same group, otherwise they overwrite each other's artifacts. + // On CI we use `cargo nextest`, which runs tests in different processes; for this we use a file lock. + // Locally we might be using `cargo test`, which run tests in the same process; in this case the file lock + // wouldn't work, becuase the process itself has the lock, and it looks like it can have N instances without + // any problems; for this reason we also use a `Mutex`. let mutex_name = format! {"TEST_MUTEX_{}", test_name.to_uppercase()}; write!( test_file, @@ -180,10 +184,16 @@ lazy_static::lazy_static! {{ {test_cases} fn test_{test_name}(force_brillig: bool, inliner_aggressiveness: i64) {{ + let test_program_dir = PathBuf::from("{test_dir}"); + // Ignore poisoning errors if some of the matrix cases failed. - let _guard = {mutex_name}.lock().unwrap_or_else(|e| e.into_inner()); + let mutex_guard = {mutex_name}.lock().unwrap_or_else(|e| e.into_inner()); - let test_program_dir = PathBuf::from("{test_dir}"); + let file_guard = file_lock::FileLock::lock( + test_program_dir.join("Nargo.toml"), + true, + file_lock::FileOptions::new().read(true).write(true).append(true) + ).expect("failed to lock Nargo.toml"); let mut nargo = Command::cargo_bin("nargo").unwrap(); nargo.arg("--program-dir").arg(test_program_dir); @@ -194,6 +204,9 @@ fn test_{test_name}(force_brillig: bool, inliner_aggressiveness: i64) {{ }} {test_content} + + drop(file_guard); + drop(mutex_guard); }} "# ) @@ -363,7 +376,7 @@ fn generate_compile_success_empty_tests(test_file: &mut File, test_data_dir: &Pa "info", &format!( r#" - nargo.arg("--json"); + nargo.arg("--json"); {assert_zero_opcodes} "#, ), From f81c6497ff88e1cc6f3f5c183e679090c6433c65 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Wed, 13 Nov 2024 19:31:59 +0000 Subject: [PATCH 005/109] feat: avoid unnecessary ssa passes while loop unrolling (#6509) --- .../noirc_evaluator/src/ssa/opt/unrolling.rs | 98 ++++++++++--------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 661109c1786..267dc6a3c20 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -40,54 +40,47 @@ use fxhash::FxHashMap as HashMap; impl Ssa { /// Loop unrolling can return errors, since ACIR functions need to be fully unrolled. /// This meta-pass will keep trying to unroll loops and simplifying the SSA until no more errors are found. + #[tracing::instrument(level = "trace", skip(ssa))] pub(crate) fn unroll_loops_iteratively(mut ssa: Ssa) -> Result { - // Try to unroll loops first: - let mut unroll_errors; - (ssa, unroll_errors) = ssa.try_to_unroll_loops(); - - // Keep unrolling until no more errors are found - while !unroll_errors.is_empty() { - let prev_unroll_err_count = unroll_errors.len(); - - // Simplify the SSA before retrying - - // Do a mem2reg after the last unroll to aid simplify_cfg - ssa = ssa.mem2reg(); - ssa = ssa.simplify_cfg(); - // Do another mem2reg after simplify_cfg to aid the next unroll - ssa = ssa.mem2reg(); - - // Unroll again - (ssa, unroll_errors) = ssa.try_to_unroll_loops(); - // If we didn't manage to unroll any more loops, exit - if unroll_errors.len() >= prev_unroll_err_count { - return Err(unroll_errors.swap_remove(0)); + let acir_functions = ssa.functions.iter_mut().filter(|(_, func)| { + // Loop unrolling in brillig can lead to a code explosion currently. This can + // also be true for ACIR, but we have no alternative to unrolling in ACIR. + // Brillig also generally prefers smaller code rather than faster code. + !matches!(func.runtime(), RuntimeType::Brillig(_)) + }); + + for (_, function) in acir_functions { + // Try to unroll loops first: + let mut unroll_errors = function.try_to_unroll_loops(); + + // Keep unrolling until no more errors are found + while !unroll_errors.is_empty() { + let prev_unroll_err_count = unroll_errors.len(); + + // Simplify the SSA before retrying + + // Do a mem2reg after the last unroll to aid simplify_cfg + function.mem2reg(); + function.simplify_function(); + // Do another mem2reg after simplify_cfg to aid the next unroll + function.mem2reg(); + + // Unroll again + unroll_errors = function.try_to_unroll_loops(); + // If we didn't manage to unroll any more loops, exit + if unroll_errors.len() >= prev_unroll_err_count { + return Err(unroll_errors.swap_remove(0)); + } } } - Ok(ssa) - } - /// Tries to unroll all loops in each SSA function. - /// If any loop cannot be unrolled, it is left as-is or in a partially unrolled state. - /// Returns the ssa along with all unrolling errors encountered - #[tracing::instrument(level = "trace", skip(self))] - pub(crate) fn try_to_unroll_loops(mut self) -> (Ssa, Vec) { - let mut errors = vec![]; - for function in self.functions.values_mut() { - function.try_to_unroll_loops(&mut errors); - } - (self, errors) + Ok(ssa) } } impl Function { - pub(crate) fn try_to_unroll_loops(&mut self, errors: &mut Vec) { - // Loop unrolling in brillig can lead to a code explosion currently. This can - // also be true for ACIR, but we have no alternative to unrolling in ACIR. - // Brillig also generally prefers smaller code rather than faster code. - if !matches!(self.runtime(), RuntimeType::Brillig(_)) { - errors.extend(find_all_loops(self).unroll_each_loop(self)); - } + fn try_to_unroll_loops(&mut self) -> Vec { + find_all_loops(self).unroll_each_loop(self) } } @@ -507,11 +500,26 @@ impl<'f> LoopIteration<'f> { #[cfg(test)] mod tests { - use crate::ssa::{ - function_builder::FunctionBuilder, - ir::{instruction::BinaryOp, map::Id, types::Type}, + use crate::{ + errors::RuntimeError, + ssa::{ + function_builder::FunctionBuilder, + ir::{instruction::BinaryOp, map::Id, types::Type}, + }, }; + use super::Ssa; + + /// Tries to unroll all loops in each SSA function. + /// If any loop cannot be unrolled, it is left as-is or in a partially unrolled state. + fn try_to_unroll_loops(mut ssa: Ssa) -> (Ssa, Vec) { + let mut errors = vec![]; + for function in ssa.functions.values_mut() { + errors.extend(function.try_to_unroll_loops()); + } + (ssa, errors) + } + #[test] fn unroll_nested_loops() { // fn main() { @@ -630,7 +638,7 @@ mod tests { // } // The final block count is not 1 because unrolling creates some unnecessary jmps. // If a simplify cfg pass is ran afterward, the expected block count will be 1. - let (ssa, errors) = ssa.try_to_unroll_loops(); + let (ssa, errors) = try_to_unroll_loops(ssa); assert_eq!(errors.len(), 0, "All loops should be unrolled"); assert_eq!(ssa.main().reachable_blocks().len(), 5); } @@ -680,7 +688,7 @@ mod tests { assert_eq!(ssa.main().reachable_blocks().len(), 4); // Expected that we failed to unroll the loop - let (_, errors) = ssa.try_to_unroll_loops(); + let (_, errors) = try_to_unroll_loops(ssa); assert_eq!(errors.len(), 1, "Expected to fail to unroll loop"); } } From 0fc0c53ec183890370c69aa4148952b3123cb055 Mon Sep 17 00:00:00 2001 From: jfecher Date: Wed, 13 Nov 2024 14:40:43 -0600 Subject: [PATCH 006/109] chore: Parse negatives in SSA parser (#6510) --- compiler/noirc_evaluator/src/ssa/parser.rs | 9 ++++++++- compiler/noirc_evaluator/src/ssa/parser/lexer.rs | 1 + compiler/noirc_evaluator/src/ssa/parser/tests.rs | 11 +++++++++++ compiler/noirc_evaluator/src/ssa/parser/token.rs | 3 +++ 4 files changed, 23 insertions(+), 1 deletion(-) diff --git a/compiler/noirc_evaluator/src/ssa/parser.rs b/compiler/noirc_evaluator/src/ssa/parser.rs index 717d2691b2f..11d43284786 100644 --- a/compiler/noirc_evaluator/src/ssa/parser.rs +++ b/compiler/noirc_evaluator/src/ssa/parser.rs @@ -735,10 +735,17 @@ impl<'a> Parser<'a> { } fn eat_int(&mut self) -> ParseResult> { + let negative = self.eat(Token::Dash)?; + if matches!(self.token.token(), Token::Int(..)) { let token = self.bump()?; match token.into_token() { - Token::Int(int) => Ok(Some(int)), + Token::Int(mut int) => { + if negative { + int = -int; + } + Ok(Some(int)) + } _ => unreachable!(), } } else { diff --git a/compiler/noirc_evaluator/src/ssa/parser/lexer.rs b/compiler/noirc_evaluator/src/ssa/parser/lexer.rs index ac4c3b77205..4c90475be74 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/lexer.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/lexer.rs @@ -60,6 +60,7 @@ impl<'a> Lexer<'a> { Some(']') => self.single_char_token(Token::RightBracket), Some('&') => self.single_char_token(Token::Ampersand), Some('-') if self.peek_char() == Some('>') => self.double_char_token(Token::Arrow), + Some('-') => self.single_char_token(Token::Dash), Some(ch) if ch.is_ascii_alphanumeric() || ch == '_' => self.eat_alpha_numeric(ch), Some(char) => Err(LexerError::UnexpectedCharacter { char, diff --git a/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/compiler/noirc_evaluator/src/ssa/parser/tests.rs index 3ed6be57b5e..9205353151e 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -425,3 +425,14 @@ fn test_slice() { "; assert_ssa_roundtrip(src); } + +#[test] +fn test_negative() { + let src = " + acir(inline) fn main f0 { + b0(): + return Field -1 + } + "; + assert_ssa_roundtrip(src); +} diff --git a/compiler/noirc_evaluator/src/ssa/parser/token.rs b/compiler/noirc_evaluator/src/ssa/parser/token.rs index 41c4f9ca164..d648f58de41 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/token.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/token.rs @@ -57,6 +57,8 @@ pub(crate) enum Token { Equal, /// & Ampersand, + /// - + Dash, Eof, } @@ -90,6 +92,7 @@ impl Display for Token { Token::Arrow => write!(f, "->"), Token::Equal => write!(f, "=="), Token::Ampersand => write!(f, "&"), + Token::Dash => write!(f, "-"), Token::Eof => write!(f, "(end of stream)"), } } From 78e784879716e1a8372fea06cdf7dc6df3b04339 Mon Sep 17 00:00:00 2001 From: jfecher Date: Thu, 14 Nov 2024 04:39:08 -0600 Subject: [PATCH 007/109] chore: Reverse ssa parser diff order (#6511) --- compiler/noirc_evaluator/src/ssa/opt/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/compiler/noirc_evaluator/src/ssa/opt/mod.rs b/compiler/noirc_evaluator/src/ssa/opt/mod.rs index 5576b494570..098f62bceba 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mod.rs @@ -44,7 +44,7 @@ pub(crate) fn assert_normalized_ssa_equals(mut ssa: super::Ssa, expected: &str) let expected = trim_leading_whitespace_from_lines(expected); if ssa != expected { - println!("Got:\n~~~\n{}\n~~~\nExpected:\n~~~\n{}\n~~~", ssa, expected); - similar_asserts::assert_eq!(ssa, expected); + println!("Expected:\n~~~\n{expected}\n~~~\nGot:\n~~~\n{ssa}\n~~~"); + similar_asserts::assert_eq!(expected, ssa); } } From 61d10f21736948f3b2631dc5219b4284dc2525f4 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Thu, 14 Nov 2024 11:19:37 +0000 Subject: [PATCH 008/109] chore(ssa): Skip array_set pass for Brillig functions (#6513) --- .../noirc_evaluator/src/ssa/opt/array_set.rs | 41 ++++++------------- 1 file changed, 12 insertions(+), 29 deletions(-) diff --git a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs index e7f69f87da8..865a1e31eb3 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs @@ -28,17 +28,21 @@ impl Ssa { impl Function { pub(crate) fn array_set_optimization(&mut self) { + if matches!(self.runtime(), RuntimeType::Brillig(_)) { + // Brillig is supposed to use refcounting to decide whether to mutate an array; + // array mutation was only meant for ACIR. We could use it with Brillig as well, + // but then some of the optimizations that we can do in ACIR around shared + // references have to be skipped, which makes it more cumbersome. + return; + } + let reachable_blocks = self.reachable_blocks(); if !self.runtime().is_entry_point() { assert_eq!(reachable_blocks.len(), 1, "Expected there to be 1 block remaining in Acir function for array_set optimization"); } - let mut context = Context::new( - &self.dfg, - self.parameters(), - matches!(self.runtime(), RuntimeType::Brillig(_)), - ); + let mut context = Context::new(&self.dfg); for block in reachable_blocks.iter() { context.analyze_last_uses(*block); @@ -53,8 +57,6 @@ impl Function { struct Context<'f> { dfg: &'f DataFlowGraph, - function_parameters: &'f [ValueId], - is_brillig_runtime: bool, array_to_last_use: HashMap, instructions_that_can_be_made_mutable: HashSet, // Mapping of an array that comes from a load and whether the address @@ -64,15 +66,9 @@ struct Context<'f> { } impl<'f> Context<'f> { - fn new( - dfg: &'f DataFlowGraph, - function_parameters: &'f [ValueId], - is_brillig_runtime: bool, - ) -> Self { + fn new(dfg: &'f DataFlowGraph) -> Self { Context { dfg, - function_parameters, - is_brillig_runtime, array_to_last_use: HashMap::default(), instructions_that_can_be_made_mutable: HashSet::default(), arrays_from_load: HashMap::default(), @@ -94,21 +90,12 @@ impl<'f> Context<'f> { self.instructions_that_can_be_made_mutable.remove(&existing); } } - Instruction::ArraySet { array, value, .. } => { + Instruction::ArraySet { array, .. } => { let array = self.dfg.resolve(*array); if let Some(existing) = self.array_to_last_use.insert(array, *instruction_id) { self.instructions_that_can_be_made_mutable.remove(&existing); } - if self.is_brillig_runtime { - let value = self.dfg.resolve(*value); - - if let Some(existing) = self.inner_nested_arrays.get(&value) { - self.instructions_that_can_be_made_mutable.remove(existing); - } - let result = self.dfg.instruction_results(*instruction_id)[0]; - self.inner_nested_arrays.insert(result, *instruction_id); - } // If the array we are setting does not come from a load we can safely mark it mutable. // If the array comes from a load we may potentially being mutating an array at a reference @@ -128,17 +115,13 @@ impl<'f> Context<'f> { } }); - // We cannot safely mutate slices that are inputs to the function, as they might be shared with the caller. - // NB checking the block parameters is not enough, as we might have jumped into a parameterless blocks inside the function. - let is_function_param = self.function_parameters.contains(&array); - let can_mutate = if let Some(is_from_param) = self.arrays_from_load.get(&array) { // If the array was loaded from a reference parameter, we cannot // safely mark that array mutable as it may be shared by another value. !is_from_param && is_return_block } else { - !is_array_in_terminator && !is_function_param + !is_array_in_terminator }; if can_mutate { From 35408ab303f1018c1e2c38e6ea55430a2c89dc4c Mon Sep 17 00:00:00 2001 From: Ary Borenszweig Date: Thu, 14 Nov 2024 08:27:20 -0300 Subject: [PATCH 009/109] fix: disallow `#[test]` on associated functions (#6449) Co-authored-by: Michael J Klein --- .../noirc_frontend/src/elaborator/comptime.rs | 9 +++- .../src/hir/def_collector/dc_mod.rs | 38 +++++++++++++-- .../src/hir/def_collector/errors.rs | 8 ++++ compiler/noirc_frontend/src/tests.rs | 48 +++++++++++++++++++ 4 files changed, 98 insertions(+), 5 deletions(-) diff --git a/compiler/noirc_frontend/src/elaborator/comptime.rs b/compiler/noirc_frontend/src/elaborator/comptime.rs index 279adc331ea..a27e2bf0163 100644 --- a/compiler/noirc_frontend/src/elaborator/comptime.rs +++ b/compiler/noirc_frontend/src/elaborator/comptime.rs @@ -452,7 +452,14 @@ impl<'context> Elaborator<'context> { } ItemKind::Impl(r#impl) => { let module = self.module_id(); - dc_mod::collect_impl(self.interner, generated_items, r#impl, self.file, module); + dc_mod::collect_impl( + self.interner, + generated_items, + r#impl, + self.file, + module, + &mut self.errors, + ); } ItemKind::ModuleDecl(_) diff --git a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs index a373441b4e0..bae57daae15 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs @@ -97,9 +97,9 @@ pub fn collect_defs( errors.extend(collector.collect_functions(context, ast.functions, crate_id)); - collector.collect_trait_impls(context, ast.trait_impls, crate_id); + errors.extend(collector.collect_trait_impls(context, ast.trait_impls, crate_id)); - collector.collect_impls(context, ast.impls, crate_id); + errors.extend(collector.collect_impls(context, ast.impls, crate_id)); collector.collect_attributes( ast.inner_attributes, @@ -163,7 +163,13 @@ impl<'a> ModCollector<'a> { errors } - fn collect_impls(&mut self, context: &mut Context, impls: Vec, krate: CrateId) { + fn collect_impls( + &mut self, + context: &mut Context, + impls: Vec, + krate: CrateId, + ) -> Vec<(CompilationError, FileId)> { + let mut errors = Vec::new(); let module_id = ModuleId { krate, local_id: self.module_id }; for r#impl in impls { @@ -173,8 +179,11 @@ impl<'a> ModCollector<'a> { r#impl, self.file_id, module_id, + &mut errors, ); } + + errors } fn collect_trait_impls( @@ -182,7 +191,9 @@ impl<'a> ModCollector<'a> { context: &mut Context, impls: Vec, krate: CrateId, - ) { + ) -> Vec<(CompilationError, FileId)> { + let mut errors = Vec::new(); + for mut trait_impl in impls { let trait_name = trait_impl.trait_name.clone(); @@ -198,6 +209,13 @@ impl<'a> ModCollector<'a> { let module = ModuleId { krate, local_id: self.module_id }; for (_, func_id, noir_function) in &mut unresolved_functions.functions { + if noir_function.def.attributes.is_test_function() { + let error = DefCollectorErrorKind::TestOnAssociatedFunction { + span: noir_function.name_ident().span(), + }; + errors.push((error.into(), self.file_id)); + } + let location = Location::new(noir_function.def.span, self.file_id); context.def_interner.push_function(*func_id, &noir_function.def, module, location); } @@ -224,6 +242,8 @@ impl<'a> ModCollector<'a> { self.def_collector.items.trait_impls.push(unresolved_trait_impl); } + + errors } fn collect_functions( @@ -1051,6 +1071,7 @@ pub fn collect_impl( r#impl: TypeImpl, file_id: FileId, module_id: ModuleId, + errors: &mut Vec<(CompilationError, FileId)>, ) { let mut unresolved_functions = UnresolvedFunctions { file_id, functions: Vec::new(), trait_id: None, self_type: None }; @@ -1058,6 +1079,15 @@ pub fn collect_impl( for (method, _) in r#impl.methods { let doc_comments = method.doc_comments; let mut method = method.item; + + if method.def.attributes.is_test_function() { + let error = DefCollectorErrorKind::TestOnAssociatedFunction { + span: method.name_ident().span(), + }; + errors.push((error.into(), file_id)); + continue; + } + let func_id = interner.push_empty_fn(); method.def.where_clause.extend(r#impl.where_clause.clone()); let location = Location::new(method.span(), file_id); diff --git a/compiler/noirc_frontend/src/hir/def_collector/errors.rs b/compiler/noirc_frontend/src/hir/def_collector/errors.rs index d72f493092d..c08b4ff2062 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/errors.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/errors.rs @@ -82,6 +82,8 @@ pub enum DefCollectorErrorKind { }, #[error("{0}")] UnsupportedNumericGenericType(#[from] UnsupportedNumericGenericType), + #[error("The `#[test]` attribute may only be used on a non-associated function")] + TestOnAssociatedFunction { span: Span }, } impl DefCollectorErrorKind { @@ -291,6 +293,12 @@ impl<'a> From<&'a DefCollectorErrorKind> for Diagnostic { diag } DefCollectorErrorKind::UnsupportedNumericGenericType(err) => err.into(), + DefCollectorErrorKind::TestOnAssociatedFunction { span } => Diagnostic::simple_error( + "The `#[test]` attribute is disallowed on `impl` methods".into(), + String::new(), + *span, + ), + } } } diff --git a/compiler/noirc_frontend/src/tests.rs b/compiler/noirc_frontend/src/tests.rs index b709436cccc..23f134cc2a3 100644 --- a/compiler/noirc_frontend/src/tests.rs +++ b/compiler/noirc_frontend/src/tests.rs @@ -3692,3 +3692,51 @@ fn allows_struct_with_generic_infix_type_as_main_input_3() { "#; assert_no_errors(src); } + +#[test] +fn disallows_test_attribute_on_impl_method() { + let src = r#" + pub struct Foo {} + impl Foo { + #[test] + fn foo() {} + } + + fn main() {} + "#; + let errors = get_program_errors(src); + assert_eq!(errors.len(), 1); + + assert!(matches!( + errors[0].0, + CompilationError::DefinitionError(DefCollectorErrorKind::TestOnAssociatedFunction { + span: _ + }) + )); +} + +#[test] +fn disallows_test_attribute_on_trait_impl_method() { + let src = r#" + pub trait Trait { + fn foo() {} + } + + pub struct Foo {} + impl Trait for Foo { + #[test] + fn foo() {} + } + + fn main() {} + "#; + let errors = get_program_errors(src); + assert_eq!(errors.len(), 1); + + assert!(matches!( + errors[0].0, + CompilationError::DefinitionError(DefCollectorErrorKind::TestOnAssociatedFunction { + span: _ + }) + )); +} From 852c87ae9ecdd441ee4c2ab3e78e86b2da07d8a4 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Thu, 14 Nov 2024 14:34:30 +0000 Subject: [PATCH 010/109] fix: allow range checks to be performed within the comptime intepreter (#6514) --- .../src/hir/comptime/interpreter/builtin.rs | 3 ++- .../src/hir/comptime/interpreter/foreign.rs | 26 ++++++++++++++++++- .../Nargo.toml | 6 +++++ .../src/main.nr | 5 ++++ .../Nargo.toml | 6 +++++ .../src/main.nr | 5 ++++ 6 files changed, 49 insertions(+), 2 deletions(-) create mode 100644 test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml create mode 100644 test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr create mode 100644 test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml create mode 100644 test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr diff --git a/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs b/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs index 8a6c46ca50c..80c1ee217c2 100644 --- a/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs +++ b/compiler/noirc_frontend/src/hir/comptime/interpreter/builtin.rs @@ -42,7 +42,7 @@ use crate::{ }; use self::builtin_helpers::{eq_item, get_array, get_ctstring, get_str, get_u8, hash_item, lex}; -use super::Interpreter; +use super::{foreign, Interpreter}; pub(crate) mod builtin_helpers; @@ -57,6 +57,7 @@ impl<'local, 'context> Interpreter<'local, 'context> { let interner = &mut self.elaborator.interner; let call_stack = &self.elaborator.interpreter_call_stack; match name { + "apply_range_constraint" => foreign::apply_range_constraint(arguments, location), "array_as_str_unchecked" => array_as_str_unchecked(interner, arguments, location), "array_len" => array_len(interner, arguments, location), "assert_constant" => Ok(Value::Bool(true)), diff --git a/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs b/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs index d1ab6a1dabd..3de72969cab 100644 --- a/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs +++ b/compiler/noirc_frontend/src/hir/comptime/interpreter/foreign.rs @@ -1,4 +1,6 @@ -use acvm::blackbox_solver::BlackBoxFunctionSolver; +use acvm::{ + acir::BlackBoxFunc, blackbox_solver::BlackBoxFunctionSolver, AcirField, BlackBoxResolutionError, +}; use bn254_blackbox_solver::Bn254BlackBoxSolver; use im::Vector; use iter_extended::try_vecmap; @@ -29,6 +31,28 @@ pub(super) fn call_foreign( } } +pub(super) fn apply_range_constraint( + arguments: Vec<(Value, Location)>, + location: Location, +) -> IResult { + let (value, num_bits) = check_two_arguments(arguments, location)?; + + let input = get_field(value)?; + let num_bits = get_u32(num_bits)?; + + if input.num_bits() < num_bits { + Ok(Value::Unit) + } else { + Err(InterpreterError::BlackBoxError( + BlackBoxResolutionError::Failed( + BlackBoxFunc::RANGE, + "value exceeds range check bounds".to_owned(), + ), + location, + )) + } +} + // poseidon2_permutation(_input: [Field; N], _state_length: u32) -> [Field; N] fn poseidon2_permutation( interner: &mut NodeInterner, diff --git a/test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml b/test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml new file mode 100644 index 00000000000..e14cfeae5d6 --- /dev/null +++ b/test_programs/compile_failure/comptime_apply_failing_range_constraint/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "comptime_apply_failing_range_constraint" +type = "bin" +authors = [""] + +[dependencies] diff --git a/test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr b/test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr new file mode 100644 index 00000000000..752f13a2e51 --- /dev/null +++ b/test_programs/compile_failure/comptime_apply_failing_range_constraint/src/main.nr @@ -0,0 +1,5 @@ +fn main() { + comptime { + 256.assert_max_bit_size::<8>() + } +} diff --git a/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml b/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml new file mode 100644 index 00000000000..84d16060440 --- /dev/null +++ b/test_programs/compile_success_empty/comptime_apply_range_constraint/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "comptime_apply_range_constraint" +type = "bin" +authors = [""] + +[dependencies] diff --git a/test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr b/test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr new file mode 100644 index 00000000000..ff5e0ba9511 --- /dev/null +++ b/test_programs/compile_success_empty/comptime_apply_range_constraint/src/main.nr @@ -0,0 +1,5 @@ +fn main() { + comptime { + 2.assert_max_bit_size::<8>() + } +} From 8932dac4847c643341320c2893f7e4297c78c621 Mon Sep 17 00:00:00 2001 From: jfecher Date: Fri, 15 Nov 2024 10:45:11 -0600 Subject: [PATCH 011/109] fix: Fix poor handling of aliased references in flattening pass causing some values to be zeroed (#6434) Co-authored-by: Tom French Co-authored-by: Tom French <15848336+TomAFrench@users.noreply.github.com> --- .../noirc_evaluator/src/ssa/ir/instruction.rs | 22 +- compiler/noirc_evaluator/src/ssa/opt/die.rs | 2 +- .../src/ssa/opt/flatten_cfg.rs | 269 +++++------------- .../src/ssa/opt/flatten_cfg/value_merger.rs | 43 +-- .../noirc_evaluator/src/ssa/opt/mem2reg.rs | 17 +- 5 files changed, 133 insertions(+), 220 deletions(-) diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index f187a279b9b..254a0afe88b 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -14,11 +14,12 @@ use fxhash::FxHasher; use iter_extended::vecmap; use noirc_frontend::hir_def::types::Type as HirType; -use crate::ssa::opt::flatten_cfg::value_merger::ValueMerger; +use crate::ssa::{ir::function::RuntimeType, opt::flatten_cfg::value_merger::ValueMerger}; use super::{ basic_block::BasicBlockId, dfg::{CallStack, DataFlowGraph}, + function::Function, map::Id, types::{NumericType, Type}, value::{Value, ValueId}, @@ -363,12 +364,12 @@ impl Instruction { } } - pub(crate) fn can_eliminate_if_unused(&self, dfg: &DataFlowGraph) -> bool { + pub(crate) fn can_eliminate_if_unused(&self, function: &Function) -> bool { use Instruction::*; match self { Binary(binary) => { if matches!(binary.operator, BinaryOp::Div | BinaryOp::Mod) { - if let Some(rhs) = dfg.get_numeric_constant(binary.rhs) { + if let Some(rhs) = function.dfg.get_numeric_constant(binary.rhs) { rhs != FieldElement::zero() } else { false @@ -386,15 +387,26 @@ impl Instruction { | IfElse { .. } | ArraySet { .. } => true, + // Store instructions must be removed by DIE in acir code, any load + // instructions should already be unused by that point. + // + // Note that this check assumes that it is being performed after the flattening + // pass and after the last mem2reg pass. This is currently the case for the DIE + // pass where this check is done, but does mean that we cannot perform mem2reg + // after the DIE pass. + Store { .. } => { + matches!(function.runtime(), RuntimeType::Acir(_)) + && function.reachable_blocks().len() == 1 + } + Constrain(..) - | Store { .. } | EnableSideEffectsIf { .. } | IncrementRc { .. } | DecrementRc { .. } | RangeCheck { .. } => false, // Some `Intrinsic`s have side effects so we must check what kind of `Call` this is. - Call { func, .. } => match dfg[*func] { + Call { func, .. } => match function.dfg[*func] { // Explicitly allows removal of unused ec operations, even if they can fail Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) | Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::EmbeddedCurveAdd)) => true, diff --git a/compiler/noirc_evaluator/src/ssa/opt/die.rs b/compiler/noirc_evaluator/src/ssa/opt/die.rs index 57af27e8dcd..a7b7af91a18 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/die.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/die.rs @@ -172,7 +172,7 @@ impl Context { fn is_unused(&self, instruction_id: InstructionId, function: &Function) -> bool { let instruction = &function.dfg[instruction_id]; - if instruction.can_eliminate_if_unused(&function.dfg) { + if instruction.can_eliminate_if_unused(function) { let results = function.dfg.instruction_results(instruction_id); results.iter().all(|result| !self.used_values.contains(result)) } else if let Instruction::Call { func, arguments } = instruction { diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index db2d96aac81..54c21a68ea2 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -132,7 +132,6 @@ //! v12 = add v10, v11 //! store v12 at v5 (new store) use fxhash::FxHashMap as HashMap; -use std::collections::{BTreeMap, HashSet}; use acvm::{acir::AcirField, acir::BlackBoxFunc, FieldElement}; use iter_extended::vecmap; @@ -186,18 +185,6 @@ struct Context<'f> { /// Maps start of branch -> end of branch branch_ends: HashMap, - /// Maps an address to the old and new value of the element at that address - /// These only hold stores for one block at a time and is cleared - /// between inlining of branches. - store_values: HashMap, - - /// Stores all allocations local to the current branch. - /// Since these branches are local to the current branch (ie. only defined within one branch of - /// an if expression), they should not be merged with their previous value or stored value in - /// the other branch since there is no such value. The ValueId here is that which is returned - /// by the allocate instruction. - local_allocations: HashSet, - /// A stack of each jmpif condition that was taken to reach a particular point in the program. /// When two branches are merged back into one, this constitutes a join point, and is analogous /// to the rest of the program after an if statement. When such a join point / end block is @@ -216,13 +203,6 @@ struct Context<'f> { arguments_stack: Vec>, } -#[derive(Clone)] -pub(crate) struct Store { - old_value: ValueId, - new_value: ValueId, - call_stack: CallStack, -} - #[derive(Clone)] struct ConditionalBranch { // Contains the last processed block during the processing of the branch. @@ -231,10 +211,6 @@ struct ConditionalBranch { old_condition: ValueId, // The condition of the branch condition: ValueId, - // The store values accumulated when processing the branch - store_values: HashMap, - // The allocations accumulated when processing the branch - local_allocations: HashSet, } struct ConditionalContext { @@ -263,8 +239,6 @@ fn flatten_function_cfg(function: &mut Function, no_predicates: &HashMap Context<'f> { // If this is not a separate variable, clippy gets confused and says the to_vec is // unnecessary, when removing it actually causes an aliasing/mutability error. let instructions = self.inserter.function.dfg[block].instructions().to_vec(); + let mut previous_allocate_result = None; + for instruction in instructions.iter() { if self.is_no_predicate(no_predicates, instruction) { // disable side effect for no_predicate functions @@ -356,10 +332,10 @@ impl<'f> Context<'f> { None, im::Vector::new(), ); - self.push_instruction(*instruction); + self.push_instruction(*instruction, &mut previous_allocate_result); self.insert_current_side_effects_enabled(); } else { - self.push_instruction(*instruction); + self.push_instruction(*instruction, &mut previous_allocate_result); } } } @@ -429,13 +405,9 @@ impl<'f> Context<'f> { let old_condition = *condition; let then_condition = self.inserter.resolve(old_condition); - let old_stores = std::mem::take(&mut self.store_values); - let old_allocations = std::mem::take(&mut self.local_allocations); let branch = ConditionalBranch { old_condition, condition: self.link_condition(then_condition), - store_values: old_stores, - local_allocations: old_allocations, last_block: *then_destination, }; let cond_context = ConditionalContext { @@ -463,21 +435,11 @@ impl<'f> Context<'f> { ); let else_condition = self.link_condition(else_condition); - // Make sure the else branch sees the previous values of each store - // rather than any values created in the 'then' branch. - let old_stores = std::mem::take(&mut cond_context.then_branch.store_values); - cond_context.then_branch.store_values = std::mem::take(&mut self.store_values); - self.undo_stores_in_then_branch(&cond_context.then_branch.store_values); - - let old_allocations = std::mem::take(&mut self.local_allocations); let else_branch = ConditionalBranch { old_condition: cond_context.then_branch.old_condition, condition: else_condition, - store_values: old_stores, - local_allocations: old_allocations, last_block: *block, }; - cond_context.then_branch.local_allocations.clear(); cond_context.else_branch = Some(else_branch); self.condition_stack.push(cond_context); @@ -499,10 +461,7 @@ impl<'f> Context<'f> { } let mut else_branch = cond_context.else_branch.unwrap(); - let stores_in_branch = std::mem::replace(&mut self.store_values, else_branch.store_values); - self.local_allocations = std::mem::take(&mut else_branch.local_allocations); else_branch.last_block = *block; - else_branch.store_values = stores_in_branch; cond_context.else_branch = Some(else_branch); // We must remember to reset whether side effects are enabled when both branches @@ -571,8 +530,6 @@ impl<'f> Context<'f> { .first() }); - let call_stack = cond_context.call_stack; - self.merge_stores(cond_context.then_branch, cond_context.else_branch, call_stack); self.arguments_stack.pop(); self.arguments_stack.pop(); self.arguments_stack.push(args); @@ -627,130 +584,47 @@ impl<'f> Context<'f> { self.insert_instruction_with_typevars(enable_side_effects, None, call_stack); } - /// Merge any store instructions found in each branch. - /// - /// This function relies on the 'then' branch being merged before the 'else' branch of a jmpif - /// instruction. If this ordering is changed, the ordering that store values are merged within - /// this function also needs to be changed to reflect that. - fn merge_stores( - &mut self, - then_branch: ConditionalBranch, - else_branch: Option, - call_stack: CallStack, - ) { - // Address -> (then_value, else_value, value_before_the_if) - let mut new_map = BTreeMap::new(); - - for (address, store) in then_branch.store_values { - new_map.insert(address, (store.new_value, store.old_value, store.old_value)); - } - - if else_branch.is_some() { - for (address, store) in else_branch.clone().unwrap().store_values { - if let Some(entry) = new_map.get_mut(&address) { - entry.1 = store.new_value; - } else { - new_map.insert(address, (store.old_value, store.new_value, store.old_value)); - } - } - } - - let then_condition = then_branch.condition; - let else_condition = if let Some(branch) = else_branch { - branch.condition - } else { - self.inserter.function.dfg.make_constant(FieldElement::zero(), Type::bool()) - }; - let block = self.inserter.function.entry_block(); - - // Merging must occur in a separate loop as we cannot borrow `self` as mutable while `value_merger` does - let mut new_values = HashMap::default(); - for (address, (then_case, else_case, _)) in &new_map { - let instruction = Instruction::IfElse { - then_condition, - then_value: *then_case, - else_condition, - else_value: *else_case, - }; - let dfg = &mut self.inserter.function.dfg; - let value = dfg - .insert_instruction_and_results(instruction, block, None, call_stack.clone()) - .first(); - - new_values.insert(address, value); - } - - // Replace stores with new merged values - for (address, (_, _, old_value)) in &new_map { - let value = new_values[address]; - let address = *address; - self.insert_instruction_with_typevars( - Instruction::Store { address, value }, - None, - call_stack.clone(), - ); - - if let Some(store) = self.store_values.get_mut(&address) { - store.new_value = value; - } else { - self.store_values.insert( - address, - Store { - old_value: *old_value, - new_value: value, - call_stack: call_stack.clone(), - }, - ); - } - } - } - - fn remember_store(&mut self, address: ValueId, new_value: ValueId, call_stack: CallStack) { - if !self.local_allocations.contains(&address) { - if let Some(store_value) = self.store_values.get_mut(&address) { - store_value.new_value = new_value; - } else { - let load = Instruction::Load { address }; - - let load_type = Some(vec![self.inserter.function.dfg.type_of_value(new_value)]); - let old_value = self - .insert_instruction_with_typevars(load.clone(), load_type, call_stack.clone()) - .first(); - - self.store_values.insert(address, Store { old_value, new_value, call_stack }); - } - } - } - /// Push the given instruction to the end of the entry block of the current function. /// /// Note that each ValueId of the instruction will be mapped via self.inserter.resolve. /// As a result, the instruction that will be pushed will actually be a new instruction /// with a different InstructionId from the original. The results of the given instruction /// will also be mapped to the results of the new instruction. - fn push_instruction(&mut self, id: InstructionId) -> Vec { + /// + /// `previous_allocate_result` should only be set to the result of an allocate instruction + /// if that instruction was the instruction immediately previous to this one - if there are + /// any instructions in between it should be None. + fn push_instruction( + &mut self, + id: InstructionId, + previous_allocate_result: &mut Option, + ) -> Vec { let (instruction, call_stack) = self.inserter.map_instruction(id); - let instruction = self.handle_instruction_side_effects(instruction, call_stack.clone()); - let is_allocate = matches!(instruction, Instruction::Allocate); + let instruction = self.handle_instruction_side_effects( + instruction, + call_stack.clone(), + *previous_allocate_result, + ); + let instruction_is_allocate = matches!(&instruction, Instruction::Allocate); let entry = self.inserter.function.entry_block(); let results = self.inserter.push_instruction_value(instruction, id, entry, call_stack); - // Remember an allocate was created local to this branch so that we do not try to merge store - // values across branches for it later. - if is_allocate { - self.local_allocations.insert(results.first()); - } - + *previous_allocate_result = instruction_is_allocate.then(|| results.first()); results.results().into_owned() } /// If we are currently in a branch, we need to modify constrain instructions /// to multiply them by the branch's condition (see optimization #1 in the module comment). + /// + /// `previous_allocate_result` should only be set to the result of an allocate instruction + /// if that instruction was the instruction immediately previous to this one - if there are + /// any instructions in between it should be None. fn handle_instruction_side_effects( &mut self, instruction: Instruction, call_stack: CallStack, + previous_allocate_result: Option, ) -> Instruction { if let Some(condition) = self.get_last_condition() { match instruction { @@ -779,8 +653,35 @@ impl<'f> Context<'f> { Instruction::Constrain(lhs, rhs, message) } Instruction::Store { address, value } => { - self.remember_store(address, value, call_stack); - Instruction::Store { address, value } + // If this instruction immediately follows an allocate, and stores to that + // address there is no previous value to load and we don't need a merge anyway. + if Some(address) == previous_allocate_result { + Instruction::Store { address, value } + } else { + // Instead of storing `value`, store `if condition { value } else { previous_value }` + let typ = self.inserter.function.dfg.type_of_value(value); + let load = Instruction::Load { address }; + let previous_value = self + .insert_instruction_with_typevars( + load, + Some(vec![typ]), + call_stack.clone(), + ) + .first(); + + let not = Instruction::Not(condition); + let else_condition = self.insert_instruction(not, call_stack.clone()); + + let instruction = Instruction::IfElse { + then_condition: condition, + then_value: value, + else_condition, + else_value: previous_value, + }; + + let updated_value = self.insert_instruction(instruction, call_stack); + Instruction::Store { address, value: updated_value } + } } Instruction::RangeCheck { value, max_bit_size, assert_message } => { // Replace value with `value * predicate` to zero out value when predicate is inactive. @@ -902,16 +803,6 @@ impl<'f> Context<'f> { call_stack, ) } - - fn undo_stores_in_then_branch(&mut self, store_values: &HashMap) { - for (address, store) in store_values { - let address = *address; - let value = store.old_value; - let instruction = Instruction::Store { address, value }; - // Considering the location of undoing a store to be the same as the original store. - self.insert_instruction_with_typevars(instruction, None, store.call_stack.clone()); - } - } } #[cfg(test)] @@ -958,11 +849,9 @@ mod test { v1 = not v0 enable_side_effects u1 1 v3 = cast v0 as Field - v4 = cast v1 as Field - v6 = mul v3, Field 3 - v8 = mul v4, Field 4 - v9 = add v6, v8 - return v9 + v5 = mul v3, Field -1 + v7 = add Field 4, v5 + return v7 } "; @@ -1022,16 +911,14 @@ mod test { b0(v0: u1, v1: &mut Field): enable_side_effects v0 v2 = load v1 -> Field - store Field 5 at v1 - v4 = not v0 - store v2 at v1 + v3 = not v0 + v4 = cast v0 as Field + v6 = sub Field 5, v2 + v7 = mul v4, v6 + v8 = add v2, v7 + store v8 at v1 + v9 = not v0 enable_side_effects u1 1 - v6 = cast v0 as Field - v7 = cast v4 as Field - v8 = mul v6, Field 5 - v9 = mul v7, v2 - v10 = add v8, v9 - store v10 at v1 return } "; @@ -1062,19 +949,21 @@ mod test { b0(v0: u1, v1: &mut Field): enable_side_effects v0 v2 = load v1 -> Field - store Field 5 at v1 - v4 = not v0 - store v2 at v1 - enable_side_effects v4 - v5 = load v1 -> Field - store Field 6 at v1 + v3 = not v0 + v4 = cast v0 as Field + v6 = sub Field 5, v2 + v7 = mul v4, v6 + v8 = add v2, v7 + store v8 at v1 + v9 = not v0 + enable_side_effects v9 + v10 = load v1 -> Field + v11 = cast v9 as Field + v13 = sub Field 6, v10 + v14 = mul v11, v13 + v15 = add v10, v14 + store v15 at v1 enable_side_effects u1 1 - v8 = cast v0 as Field - v9 = cast v4 as Field - v10 = mul v8, Field 5 - v11 = mul v9, Field 6 - v12 = add v10, v11 - store v12 at v1 return } "; @@ -1242,7 +1131,7 @@ mod test { }; let merged_values = get_all_constants_reachable_from_instruction(&main.dfg, ret); - assert_eq!(merged_values, vec![3, 5, 6]); + assert_eq!(merged_values, vec![1, 3, 5, 6]); } #[test] diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 75ee57dd4fa..799378b1678 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -91,7 +91,7 @@ impl<'a> ValueMerger<'a> { dfg: &mut DataFlowGraph, block: BasicBlockId, then_condition: ValueId, - else_condition: ValueId, + _else_condition: ValueId, then_value: ValueId, else_value: ValueId, ) -> ValueId { @@ -114,31 +114,38 @@ impl<'a> ValueMerger<'a> { // We must cast the bool conditions to the actual numeric type used by each value. let then_condition = dfg .insert_instruction_and_results( - Instruction::Cast(then_condition, then_type), - block, - None, - call_stack.clone(), - ) - .first(); - let else_condition = dfg - .insert_instruction_and_results( - Instruction::Cast(else_condition, else_type), + Instruction::Cast(then_condition, Type::field()), block, None, call_stack.clone(), ) .first(); - let mul = Instruction::binary(BinaryOp::Mul, then_condition, then_value); - let then_value = - dfg.insert_instruction_and_results(mul, block, None, call_stack.clone()).first(); + let then_field = Instruction::Cast(then_value, Type::field()); + let then_field_value = + dfg.insert_instruction_and_results(then_field, block, None, call_stack.clone()).first(); + + let else_field = Instruction::Cast(else_value, Type::field()); + let else_field_value = + dfg.insert_instruction_and_results(else_field, block, None, call_stack.clone()).first(); + + let diff = Instruction::binary(BinaryOp::Sub, then_field_value, else_field_value); + let diff_value = + dfg.insert_instruction_and_results(diff, block, None, call_stack.clone()).first(); - let mul = Instruction::binary(BinaryOp::Mul, else_condition, else_value); - let else_value = - dfg.insert_instruction_and_results(mul, block, None, call_stack.clone()).first(); + let conditional_diff = Instruction::binary(BinaryOp::Mul, then_condition, diff_value); + let conditional_diff_value = dfg + .insert_instruction_and_results(conditional_diff, block, None, call_stack.clone()) + .first(); + + let merged_field = + Instruction::binary(BinaryOp::Add, else_field_value, conditional_diff_value); + let merged_field_value = dfg + .insert_instruction_and_results(merged_field, block, None, call_stack.clone()) + .first(); - let add = Instruction::binary(BinaryOp::Add, then_value, else_value); - dfg.insert_instruction_and_results(add, block, None, call_stack).first() + let merged = Instruction::Cast(merged_field_value, then_type); + dfg.insert_instruction_and_results(merged, block, None, call_stack).first() } /// Given an if expression that returns an array: `if c { array1 } else { array2 }`, diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index a052abc5e16..38d73e3dca8 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -428,11 +428,13 @@ impl<'f> PerFunctionContext<'f> { self.check_array_aliasing(references, value); + // FIXME: This causes errors in the sha256 tests + // // If there was another store to this instruction without any (unremoved) loads or // function calls in-between, we can remove the previous store. - if let Some(last_store) = references.last_stores.get(&address) { - self.instructions_to_remove.insert(*last_store); - } + // if let Some(last_store) = references.last_stores.get(&address) { + // self.instructions_to_remove.insert(*last_store); + // } if self.inserter.function.dfg.value_is_reference(value) { if let Some(expression) = references.expressions.get(&value) { @@ -908,16 +910,19 @@ mod tests { // We would need to track whether the store where `v9` is the store value gets removed to know whether // to remove it. assert_eq!(count_stores(main.entry_block(), &main.dfg), 1); + // The first store in b1 is removed since there is another store to the same reference // in the same block, and the store is not needed before the later store. // The rest of the stores are also removed as no loads are done within any blocks // to the stored values. - assert_eq!(count_stores(b1, &main.dfg), 0); + // + // NOTE: This store is not removed due to the FIXME when handling Instruction::Store. + assert_eq!(count_stores(b1, &main.dfg), 1); let b1_instructions = main.dfg[b1].instructions(); - // We expect the last eq to be optimized out - assert_eq!(b1_instructions.len(), 0); + // We expect the last eq to be optimized out, only the store from above remains + assert_eq!(b1_instructions.len(), 1); } #[test] From a666a1f6dcd711db0edf3fa91a9c236c701b7c90 Mon Sep 17 00:00:00 2001 From: Maxim Vezenov Date: Mon, 18 Nov 2024 05:54:45 -0500 Subject: [PATCH 012/109] chore: Remove unused methods from implicit numeric generics (#6541) --- .../noirc_frontend/src/elaborator/types.rs | 111 +----------------- compiler/noirc_frontend/src/hir_def/types.rs | 108 ----------------- 2 files changed, 3 insertions(+), 216 deletions(-) diff --git a/compiler/noirc_frontend/src/elaborator/types.rs b/compiler/noirc_frontend/src/elaborator/types.rs index b296c4f1805..ae2bb942f48 100644 --- a/compiler/noirc_frontend/src/elaborator/types.rs +++ b/compiler/noirc_frontend/src/elaborator/types.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, collections::BTreeMap, rc::Rc}; +use std::{borrow::Cow, rc::Rc}; use acvm::acir::AcirField; use iter_extended::vecmap; @@ -25,7 +25,7 @@ use crate::{ HirBinaryOp, HirCallExpression, HirExpression, HirLiteral, HirMemberAccess, HirMethodReference, HirPrefixExpression, TraitMethod, }, - function::{FuncMeta, Parameters}, + function::FuncMeta, stmt::HirStatement, traits::{NamedType, ResolvedTraitBound, Trait, TraitConstraint}, }, @@ -34,8 +34,7 @@ use crate::{ TraitImplKind, TraitMethodId, }, token::SecondaryAttribute, - Generics, Kind, ResolvedGeneric, Type, TypeBinding, TypeBindings, TypeVariable, - UnificationError, + Generics, Kind, ResolvedGeneric, Type, TypeBinding, TypeBindings, UnificationError, }; use super::{lints, path_resolution::PathResolutionItem, Elaborator}; @@ -1725,110 +1724,6 @@ impl<'context> Elaborator<'context> { } } - pub fn find_numeric_generics( - parameters: &Parameters, - return_type: &Type, - ) -> Vec<(String, TypeVariable)> { - let mut found = BTreeMap::new(); - for (_, parameter, _) in ¶meters.0 { - Self::find_numeric_generics_in_type(parameter, &mut found); - } - Self::find_numeric_generics_in_type(return_type, &mut found); - found.into_iter().collect() - } - - fn find_numeric_generics_in_type(typ: &Type, found: &mut BTreeMap) { - match typ { - Type::FieldElement - | Type::Integer(_, _) - | Type::Bool - | Type::Unit - | Type::Error - | Type::TypeVariable(_) - | Type::Constant(..) - | Type::NamedGeneric(_, _) - | Type::Quoted(_) - | Type::Forall(_, _) => (), - - Type::CheckedCast { from, to } => { - Self::find_numeric_generics_in_type(from, found); - Self::find_numeric_generics_in_type(to, found); - } - - Type::TraitAsType(_, _, args) => { - for arg in &args.ordered { - Self::find_numeric_generics_in_type(arg, found); - } - for arg in &args.named { - Self::find_numeric_generics_in_type(&arg.typ, found); - } - } - - Type::Array(length, element_type) => { - if let Type::NamedGeneric(type_variable, name) = length.as_ref() { - found.insert(name.to_string(), type_variable.clone()); - } - Self::find_numeric_generics_in_type(element_type, found); - } - - Type::Slice(element_type) => { - Self::find_numeric_generics_in_type(element_type, found); - } - - Type::Tuple(fields) => { - for field in fields { - Self::find_numeric_generics_in_type(field, found); - } - } - - Type::Function(parameters, return_type, _env, _unconstrained) => { - for parameter in parameters { - Self::find_numeric_generics_in_type(parameter, found); - } - Self::find_numeric_generics_in_type(return_type, found); - } - - Type::Struct(struct_type, generics) => { - for (i, generic) in generics.iter().enumerate() { - if let Type::NamedGeneric(type_variable, name) = generic { - if struct_type.borrow().generics[i].is_numeric() { - found.insert(name.to_string(), type_variable.clone()); - } - } else { - Self::find_numeric_generics_in_type(generic, found); - } - } - } - Type::Alias(alias, generics) => { - for (i, generic) in generics.iter().enumerate() { - if let Type::NamedGeneric(type_variable, name) = generic { - if alias.borrow().generics[i].is_numeric() { - found.insert(name.to_string(), type_variable.clone()); - } - } else { - Self::find_numeric_generics_in_type(generic, found); - } - } - } - Type::MutableReference(element) => Self::find_numeric_generics_in_type(element, found), - Type::String(length) => { - if let Type::NamedGeneric(type_variable, name) = length.as_ref() { - found.insert(name.to_string(), type_variable.clone()); - } - } - Type::FmtString(length, fields) => { - if let Type::NamedGeneric(type_variable, name) = length.as_ref() { - found.insert(name.to_string(), type_variable.clone()); - } - Self::find_numeric_generics_in_type(fields, found); - } - Type::InfixExpr(lhs, _op, rhs) => { - Self::find_numeric_generics_in_type(lhs, found); - Self::find_numeric_generics_in_type(rhs, found); - } - } - } - /// Push a type variable into the current FunctionContext to be defaulted if needed /// at the end of the earlier of either the current function or the current comptime scope. fn push_type_variable(&mut self, typ: Type) { diff --git a/compiler/noirc_frontend/src/hir_def/types.rs b/compiler/noirc_frontend/src/hir_def/types.rs index a0fea3aa774..ef8a697966c 100644 --- a/compiler/noirc_frontend/src/hir_def/types.rs +++ b/compiler/noirc_frontend/src/hir_def/types.rs @@ -184,10 +184,6 @@ impl Kind { } } - pub(crate) fn is_numeric(&self) -> bool { - matches!(self.follow_bindings(), Self::Numeric { .. }) - } - pub(crate) fn is_type_level_field_element(&self) -> bool { let type_level = false; self.is_field_element(type_level) @@ -375,10 +371,6 @@ impl ResolvedGeneric { pub fn kind(&self) -> Kind { self.type_var.kind() } - - pub(crate) fn is_numeric(&self) -> bool { - self.kind().is_numeric() - } } enum FunctionCoercionResult { @@ -524,13 +516,6 @@ impl StructType { self.fields.iter().map(|field| field.name.clone()).collect() } - /// Search the fields of a struct for any types with a `TypeKind::Numeric` - pub fn find_numeric_generics_in_fields(&self, found_names: &mut Vec) { - for field in self.fields.iter() { - field.typ.find_numeric_type_vars(found_names); - } - } - /// Instantiate this struct type, returning a Vec of the new generic args (in /// the same order as self.generics) pub fn instantiate(&self, interner: &mut NodeInterner) -> Vec { @@ -1102,99 +1087,6 @@ impl Type { } } - pub fn find_numeric_type_vars(&self, found_names: &mut Vec) { - // Return whether the named generic has a Kind::Numeric and save its name - let named_generic_is_numeric = |typ: &Type, found_names: &mut Vec| { - if let Type::NamedGeneric(var, name) = typ { - if var.kind().is_numeric() { - found_names.push(name.to_string()); - true - } else { - false - } - } else { - false - } - }; - - match self { - Type::FieldElement - | Type::Integer(_, _) - | Type::Bool - | Type::Unit - | Type::Error - | Type::Constant(_, _) - | Type::Forall(_, _) - | Type::Quoted(_) => {} - - Type::TypeVariable(type_var) => { - if let TypeBinding::Bound(typ) = &*type_var.borrow() { - named_generic_is_numeric(typ, found_names); - } - } - - Type::NamedGeneric(_, _) => { - named_generic_is_numeric(self, found_names); - } - Type::CheckedCast { from, to } => { - to.find_numeric_type_vars(found_names); - from.find_numeric_type_vars(found_names); - } - - Type::TraitAsType(_, _, args) => { - for arg in args.ordered.iter() { - arg.find_numeric_type_vars(found_names); - } - for arg in args.named.iter() { - arg.typ.find_numeric_type_vars(found_names); - } - } - Type::Array(length, elem) => { - elem.find_numeric_type_vars(found_names); - named_generic_is_numeric(length, found_names); - } - Type::Slice(elem) => elem.find_numeric_type_vars(found_names), - Type::Tuple(fields) => { - for field in fields.iter() { - field.find_numeric_type_vars(found_names); - } - } - Type::Function(parameters, return_type, env, _unconstrained) => { - for parameter in parameters.iter() { - parameter.find_numeric_type_vars(found_names); - } - return_type.find_numeric_type_vars(found_names); - env.find_numeric_type_vars(found_names); - } - Type::Struct(_, generics) => { - for generic in generics.iter() { - if !named_generic_is_numeric(generic, found_names) { - generic.find_numeric_type_vars(found_names); - } - } - } - Type::Alias(_, generics) => { - for generic in generics.iter() { - if !named_generic_is_numeric(generic, found_names) { - generic.find_numeric_type_vars(found_names); - } - } - } - Type::MutableReference(element) => element.find_numeric_type_vars(found_names), - Type::String(length) => { - named_generic_is_numeric(length, found_names); - } - Type::FmtString(length, elements) => { - elements.find_numeric_type_vars(found_names); - named_generic_is_numeric(length, found_names); - } - Type::InfixExpr(lhs, _op, rhs) => { - lhs.find_numeric_type_vars(found_names); - rhs.find_numeric_type_vars(found_names); - } - } - } - /// True if this type can be used as a parameter to `main` or a contract function. /// This is only false for unsized types like slices or slices that do not make sense /// as a program input such as named generics or mutable references. From c11a6963a0f79101848e7a6acac36eea74fef5a2 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 18 Nov 2024 11:23:05 +0000 Subject: [PATCH 013/109] chore(test): More descriptive labels in test matrix (#6542) --- tooling/nargo_cli/build.rs | 11 +++++++---- tooling/nargo_cli/tests/execute.rs | 6 ++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tooling/nargo_cli/build.rs b/tooling/nargo_cli/build.rs index a3b718b157d..ad1f82f4e45 100644 --- a/tooling/nargo_cli/build.rs +++ b/tooling/nargo_cli/build.rs @@ -163,7 +163,10 @@ fn generate_test_cases( if *brillig && inliner.value() < matrix_config.min_inliner { continue; } - test_cases.push(format!("#[test_case::test_case({brillig}, {})]", inliner.label())); + test_cases.push(format!( + "#[test_case::test_case(ForceBrillig({brillig}), Inliner({}))]", + inliner.label() + )); } } let test_cases = test_cases.join("\n"); @@ -183,7 +186,7 @@ lazy_static::lazy_static! {{ }} {test_cases} -fn test_{test_name}(force_brillig: bool, inliner_aggressiveness: i64) {{ +fn test_{test_name}(force_brillig: ForceBrillig, inliner_aggressiveness: Inliner) {{ let test_program_dir = PathBuf::from("{test_dir}"); // Ignore poisoning errors if some of the matrix cases failed. @@ -198,8 +201,8 @@ fn test_{test_name}(force_brillig: bool, inliner_aggressiveness: i64) {{ let mut nargo = Command::cargo_bin("nargo").unwrap(); nargo.arg("--program-dir").arg(test_program_dir); nargo.arg("{test_command}").arg("--force"); - nargo.arg("--inliner-aggressiveness").arg(inliner_aggressiveness.to_string()); - if force_brillig {{ + nargo.arg("--inliner-aggressiveness").arg(inliner_aggressiveness.0.to_string()); + if force_brillig.0 {{ nargo.arg("--force-brillig"); }} diff --git a/tooling/nargo_cli/tests/execute.rs b/tooling/nargo_cli/tests/execute.rs index e2bef43b571..561520c57a9 100644 --- a/tooling/nargo_cli/tests/execute.rs +++ b/tooling/nargo_cli/tests/execute.rs @@ -14,6 +14,12 @@ mod tests { test_binary::build_test_binary_once!(mock_backend, "../backend_interface/test-binaries"); + // Utilities to keep the test matrix labels more intuitive. + #[derive(Debug, Clone, Copy)] + struct ForceBrillig(pub bool); + #[derive(Debug, Clone, Copy)] + struct Inliner(pub i64); + // include tests generated by `build.rs` include!(concat!(env!("OUT_DIR"), "/execute.rs")); } From 743a9b1304e2f13cb70a46e1dcf8173660de1ad8 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:45:12 +0000 Subject: [PATCH 014/109] chore(ci): bump mac github runner image to `macos-14` (#6545) --- .github/workflows/publish-nargo.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-nargo.yml b/.github/workflows/publish-nargo.yml index 55ba754e6ae..fa0b6f2d9fb 100644 --- a/.github/workflows/publish-nargo.yml +++ b/.github/workflows/publish-nargo.yml @@ -23,7 +23,7 @@ permissions: jobs: build-apple-darwin: - runs-on: macos-12 + runs-on: macos-14 env: CROSS_CONFIG: ${{ github.workspace }}/.github/Cross.toml NIGHTLY_RELEASE: ${{ inputs.tag == '' }} @@ -41,7 +41,7 @@ jobs: - name: Setup for Apple Silicon if: matrix.target == 'aarch64-apple-darwin' run: | - sudo xcode-select -s /Applications/Xcode_13.2.1.app/Contents/Developer/ + sudo xcode-select -s /Applications/Xcode_15.4.0.app/Contents/Developer/ echo "SDKROOT=$(xcrun -sdk macosx$(sw_vers -productVersion) --show-sdk-path)" >> $GITHUB_ENV echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx$(sw_vers -productVersion) --show-sdk-platform-version)" >> $GITHUB_ENV From 2f823a7f90641b29023fcf82b26aa6787a68615c Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:45:25 +0000 Subject: [PATCH 015/109] chore: convert some tests to use SSA parser (#6543) --- .../src/ssa/opt/flatten_cfg.rs | 203 ++++++++---------- 1 file changed, 95 insertions(+), 108 deletions(-) diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index 54c21a68ea2..de15bfcb9b8 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -807,8 +807,6 @@ impl<'f> Context<'f> { #[cfg(test)] mod test { - use std::sync::Arc; - use acvm::acir::AcirField; use crate::ssa::{ @@ -1148,56 +1146,41 @@ mod test { // }; // } // - // // Translates to the following before the flattening pass: - // fn main f2 { - // b0(v0: u1): - // jmpif v0 then: b1, else: b2 - // b1(): - // v2 = allocate - // store Field 0 at v2 - // v4 = load v2 - // jmp b2() - // b2(): - // return - // } + // Translates to the following before the flattening pass: + let src = " + acir(inline) fn main f0 { + b0(v0: u1): + jmpif v0 then: b1, else: b2 + b1(): + v1 = allocate -> &mut Field + store Field 0 at v1 + v3 = load v1 -> Field + jmp b2() + b2(): + return + }"; // The bug is that the flattening pass previously inserted a load // before the first store to allocate, which loaded an uninitialized value. // In this test we assert the ordering is strictly Allocate then Store then Load. - let main_id = Id::test_new(0); - let mut builder = FunctionBuilder::new("main".into(), main_id); - - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - - let v0 = builder.add_parameter(Type::bool()); - builder.terminate_with_jmpif(v0, b1, b2); - - builder.switch_to_block(b1); - let v2 = builder.insert_allocate(Type::field()); - let zero = builder.field_constant(0u128); - builder.insert_store(v2, zero); - let _v4 = builder.insert_load(v2, Type::field()); - builder.terminate_with_jmp(b2, vec![]); - - builder.switch_to_block(b2); - builder.terminate_with_return(vec![]); - - let ssa = builder.finish().flatten_cfg(); - let main = ssa.main(); + let ssa = Ssa::from_str(src).unwrap(); + let flattened_ssa = ssa.flatten_cfg(); // Now assert that there is not a load between the allocate and its first store // The Expected IR is: - // - // fn main f2 { - // b0(v0: u1): - // enable_side_effects v0 - // v6 = allocate - // store Field 0 at v6 - // v7 = load v6 - // v8 = not v0 - // enable_side_effects u1 1 - // return - // } + let expected = " + acir(inline) fn main f0 { + b0(v0: u1): + enable_side_effects v0 + v1 = allocate -> &mut Field + store Field 0 at v1 + v3 = load v1 -> Field + v4 = not v0 + enable_side_effects u1 1 + return + } + "; + + let main = flattened_ssa.main(); let instructions = main.dfg[main.entry_block()].instructions(); let find_instruction = |predicate: fn(&Instruction) -> bool| { @@ -1210,6 +1193,8 @@ mod test { assert!(allocate_index < store_index); assert!(store_index < load_index); + + assert_normalized_ssa_equals(flattened_ssa, expected); } /// Work backwards from an instruction to find all the constant values @@ -1282,71 +1267,71 @@ mod test { fn should_not_merge_incorrectly_to_false() { // Regression test for #1792 // Tests that it does not simplify a true constraint an always-false constraint - // acir(inline) fn main f1 { - // b0(v0: [u8; 2]): - // v5 = array_get v0, index u8 0 - // v6 = cast v5 as u32 - // v8 = truncate v6 to 1 bits, max_bit_size: 32 - // v9 = cast v8 as u1 - // v10 = allocate - // store u8 0 at v10 - // jmpif v9 then: b2, else: b3 - // b2(): - // v12 = cast v5 as Field - // v13 = add v12, Field 1 - // store v13 at v10 - // jmp b4() - // b4(): - // constrain v9 == u1 1 - // return - // b3(): - // store u8 0 at v10 - // jmp b4() - // } - let main_id = Id::test_new(1); - let mut builder = FunctionBuilder::new("main".into(), main_id); - - builder.insert_block(); // b0 - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - let b3 = builder.insert_block(); - - let element_type = Arc::new(vec![Type::unsigned(8)]); - let array_type = Type::Array(element_type.clone(), 2); - let array = builder.add_parameter(array_type); - - let zero = builder.numeric_constant(0_u128, Type::unsigned(8)); - - let v5 = builder.insert_array_get(array, zero, Type::unsigned(8)); - let v6 = builder.insert_cast(v5, Type::unsigned(32)); - let i_two = builder.numeric_constant(2_u128, Type::unsigned(32)); - let v8 = builder.insert_binary(v6, BinaryOp::Mod, i_two); - let v9 = builder.insert_cast(v8, Type::bool()); - - let v10 = builder.insert_allocate(Type::field()); - builder.insert_store(v10, zero); - - builder.terminate_with_jmpif(v9, b1, b2); - - builder.switch_to_block(b1); - let one = builder.field_constant(1_u128); - let v5b = builder.insert_cast(v5, Type::field()); - let v13: Id = builder.insert_binary(v5b, BinaryOp::Add, one); - let v14 = builder.insert_cast(v13, Type::unsigned(8)); - builder.insert_store(v10, v14); - builder.terminate_with_jmp(b3, vec![]); + let src = " + acir(inline) fn main f0 { + b0(v0: [u8; 2]): + v2 = array_get v0, index u8 0 -> u8 + v3 = cast v2 as u32 + v4 = truncate v3 to 1 bits, max_bit_size: 32 + v5 = cast v4 as u1 + v6 = allocate -> &mut Field + store u8 0 at v6 + jmpif v5 then: b2, else: b1 + b2(): + v7 = cast v2 as Field + v9 = add v7, Field 1 + v10 = cast v9 as u8 + store v10 at v6 + jmp b3() + b3(): + constrain v5 == u1 1 + return + b1(): + store u8 0 at v6 + jmp b3() + } + "; - builder.switch_to_block(b2); - builder.insert_store(v10, zero); - builder.terminate_with_jmp(b3, vec![]); + let ssa = Ssa::from_str(src).unwrap(); - builder.switch_to_block(b3); - let v_true = builder.numeric_constant(true, Type::bool()); - let v12 = builder.insert_binary(v9, BinaryOp::Eq, v_true); - builder.insert_constrain(v12, v_true, None); - builder.terminate_with_return(vec![]); + let expected = " + acir(inline) fn main f0 { + b0(v0: [u8; 2]): + v2 = array_get v0, index u8 0 -> u8 + v3 = cast v2 as u32 + v4 = truncate v3 to 1 bits, max_bit_size: 32 + v5 = cast v4 as u1 + v6 = allocate -> &mut Field + store u8 0 at v6 + enable_side_effects v5 + v7 = cast v2 as Field + v9 = add v7, Field 1 + v10 = cast v9 as u8 + v11 = load v6 -> u8 + v12 = not v5 + v13 = cast v4 as Field + v14 = cast v11 as Field + v15 = sub v9, v14 + v16 = mul v13, v15 + v17 = add v14, v16 + v18 = cast v17 as u8 + store v18 at v6 + v19 = not v5 + enable_side_effects v19 + v20 = load v6 -> u8 + v21 = cast v19 as Field + v22 = cast v20 as Field + v24 = sub Field 0, v22 + v25 = mul v21, v24 + v26 = add v22, v25 + v27 = cast v26 as u8 + store v27 at v6 + enable_side_effects u1 1 + constrain v5 == u1 1 + return + } + "; - let ssa = builder.finish(); let flattened_ssa = ssa.flatten_cfg(); let main = flattened_ssa.main(); @@ -1363,6 +1348,8 @@ mod test { } } assert_eq!(constrain_count, 1); + + assert_normalized_ssa_equals(flattened_ssa, expected); } #[test] From 1cad7c887893ebfb5de57a71d7965c8b88158a14 Mon Sep 17 00:00:00 2001 From: Maxim Vezenov Date: Mon, 18 Nov 2024 10:46:05 -0500 Subject: [PATCH 016/109] feat(profiler): Reduce memory in Brillig execution flamegraph (#6538) Co-authored-by: Tom French <15848336+TomAFrench@users.noreply.github.com> --- tooling/profiler/Cargo.toml | 3 - .../src/cli/execution_flamegraph_cmd.rs | 21 ++-- .../profiler/src/cli/gates_flamegraph_cmd.rs | 17 ++- .../src/cli/opcodes_flamegraph_cmd.rs | 18 +-- tooling/profiler/src/flamegraph.rs | 104 +++++++++++++----- tooling/profiler/src/opcode_formatter.rs | 19 +--- 6 files changed, 110 insertions(+), 72 deletions(-) diff --git a/tooling/profiler/Cargo.toml b/tooling/profiler/Cargo.toml index 604208b5a54..798a21ea0d6 100644 --- a/tooling/profiler/Cargo.toml +++ b/tooling/profiler/Cargo.toml @@ -44,6 +44,3 @@ noirc_abi.workspace = true noirc_driver.workspace = true tempfile.workspace = true -[features] -default = ["bn254"] -bn254 = ["acir/bn254"] diff --git a/tooling/profiler/src/cli/execution_flamegraph_cmd.rs b/tooling/profiler/src/cli/execution_flamegraph_cmd.rs index a0b3d6a3128..981d08a3eb1 100644 --- a/tooling/profiler/src/cli/execution_flamegraph_cmd.rs +++ b/tooling/profiler/src/cli/execution_flamegraph_cmd.rs @@ -1,13 +1,12 @@ use std::path::{Path, PathBuf}; use acir::circuit::OpcodeLocation; -use acir::FieldElement; use clap::Args; use color_eyre::eyre::{self, Context}; -use crate::flamegraph::{FlamegraphGenerator, InfernoFlamegraphGenerator, Sample}; +use crate::flamegraph::{BrilligExecutionSample, FlamegraphGenerator, InfernoFlamegraphGenerator}; use crate::fs::{read_inputs_from_file, read_program_from_file}; -use crate::opcode_formatter::AcirOrBrilligOpcode; +use crate::opcode_formatter::format_brillig_opcode; use bn254_blackbox_solver::Bn254BlackBoxSolver; use nargo::ops::DefaultForeignCallExecutor; use noirc_abi::input_parser::Format; @@ -51,7 +50,7 @@ fn run_with_generator( let initial_witness = program.abi.encode(&inputs_map, None)?; println!("Executing"); - let (_, profiling_samples) = nargo::ops::execute_program_with_profiling( + let (_, mut profiling_samples) = nargo::ops::execute_program_with_profiling( &program.bytecode, initial_witness, &Bn254BlackBoxSolver, @@ -59,11 +58,13 @@ fn run_with_generator( )?; println!("Executed"); - let profiling_samples: Vec> = profiling_samples - .into_iter() + println!("Collecting {} samples", profiling_samples.len()); + + let profiling_samples: Vec = profiling_samples + .iter_mut() .map(|sample| { - let call_stack = sample.call_stack; - let brillig_function_id = sample.brillig_function_id; + let call_stack = std::mem::take(&mut sample.call_stack); + let brillig_function_id = std::mem::take(&mut sample.brillig_function_id); let last_entry = call_stack.last(); let opcode = brillig_function_id .and_then(|id| program.bytecode.unconstrained_functions.get(id.0 as usize)) @@ -74,8 +75,8 @@ fn run_with_generator( None } }) - .map(|opcode| AcirOrBrilligOpcode::Brillig(opcode.clone())); - Sample { opcode, call_stack, count: 1, brillig_function_id } + .map(format_brillig_opcode); + BrilligExecutionSample { opcode, call_stack, brillig_function_id } }) .collect(); diff --git a/tooling/profiler/src/cli/gates_flamegraph_cmd.rs b/tooling/profiler/src/cli/gates_flamegraph_cmd.rs index 20cc1b747c3..c3ae29de058 100644 --- a/tooling/profiler/src/cli/gates_flamegraph_cmd.rs +++ b/tooling/profiler/src/cli/gates_flamegraph_cmd.rs @@ -6,10 +6,10 @@ use color_eyre::eyre::{self, Context}; use noirc_artifacts::debug::DebugArtifact; -use crate::flamegraph::{FlamegraphGenerator, InfernoFlamegraphGenerator, Sample}; +use crate::flamegraph::{CompilationSample, FlamegraphGenerator, InfernoFlamegraphGenerator}; use crate::fs::read_program_from_file; use crate::gates_provider::{BackendGatesProvider, GatesProvider}; -use crate::opcode_formatter::AcirOrBrilligOpcode; +use crate::opcode_formatter::format_acir_opcode; #[derive(Debug, Clone, Args)] pub(crate) struct GatesFlamegraphCommand { @@ -83,8 +83,8 @@ fn run_with_provider( .into_iter() .zip(bytecode.opcodes) .enumerate() - .map(|(index, (gates, opcode))| Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(opcode)), + .map(|(index, (gates, opcode))| CompilationSample { + opcode: Some(format_acir_opcode(&opcode)), call_stack: vec![OpcodeLocation::Acir(index)], count: gates, brillig_function_id: None, @@ -106,10 +106,7 @@ fn run_with_provider( #[cfg(test)] mod tests { - use acir::{ - circuit::{Circuit, Program}, - AcirField, - }; + use acir::circuit::{Circuit, Program}; use color_eyre::eyre::{self}; use fm::codespan_files::Files; use noirc_artifacts::program::ProgramArtifact; @@ -143,9 +140,9 @@ mod tests { struct TestFlamegraphGenerator {} impl super::FlamegraphGenerator for TestFlamegraphGenerator { - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - _samples: Vec>, + _samples: Vec, _debug_symbols: &DebugInfo, _files: &'files impl Files<'files, FileId = fm::FileId>, _artifact_name: &str, diff --git a/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs b/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs index bb3df86c339..4e271c9f838 100644 --- a/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs +++ b/tooling/profiler/src/cli/opcodes_flamegraph_cmd.rs @@ -7,9 +7,9 @@ use color_eyre::eyre::{self, Context}; use noirc_artifacts::debug::DebugArtifact; -use crate::flamegraph::{FlamegraphGenerator, InfernoFlamegraphGenerator, Sample}; +use crate::flamegraph::{CompilationSample, FlamegraphGenerator, InfernoFlamegraphGenerator}; use crate::fs::read_program_from_file; -use crate::opcode_formatter::AcirOrBrilligOpcode; +use crate::opcode_formatter::{format_acir_opcode, format_brillig_opcode}; #[derive(Debug, Clone, Args)] pub(crate) struct OpcodesFlamegraphCommand { @@ -59,8 +59,8 @@ fn run_with_generator( .opcodes .iter() .enumerate() - .map(|(index, opcode)| Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(opcode.clone())), + .map(|(index, opcode)| CompilationSample { + opcode: Some(format_acir_opcode(opcode)), call_stack: vec![OpcodeLocation::Acir(index)], count: 1, brillig_function_id: None, @@ -96,8 +96,8 @@ fn run_with_generator( .bytecode .into_iter() .enumerate() - .map(|(brillig_index, opcode)| Sample { - opcode: Some(AcirOrBrilligOpcode::Brillig(opcode)), + .map(|(brillig_index, opcode)| CompilationSample { + opcode: Some(format_brillig_opcode(&opcode)), call_stack: vec![OpcodeLocation::Brillig { acir_index: acir_opcode_index, brillig_index, @@ -146,7 +146,7 @@ mod tests { brillig::{BrilligBytecode, BrilligFunctionId}, Circuit, Opcode, Program, }, - AcirField, FieldElement, + FieldElement, }; use color_eyre::eyre::{self}; use fm::codespan_files::Files; @@ -160,9 +160,9 @@ mod tests { struct TestFlamegraphGenerator {} impl super::FlamegraphGenerator for TestFlamegraphGenerator { - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - _samples: Vec>, + _samples: Vec, _debug_symbols: &DebugInfo, _files: &'files impl Files<'files, FileId = fm::FileId>, _artifact_name: &str, diff --git a/tooling/profiler/src/flamegraph.rs b/tooling/profiler/src/flamegraph.rs index 7882ac903ef..38966902314 100644 --- a/tooling/profiler/src/flamegraph.rs +++ b/tooling/profiler/src/flamegraph.rs @@ -3,7 +3,6 @@ use std::{collections::BTreeMap, io::BufWriter}; use acir::circuit::brillig::BrilligFunctionId; use acir::circuit::OpcodeLocation; -use acir::AcirField; use color_eyre::eyre::{self}; use fm::codespan_files::Files; use fxhash::FxHashMap as HashMap; @@ -13,18 +12,66 @@ use noirc_errors::reporter::line_and_column_from_span; use noirc_errors::Location; use noirc_evaluator::brillig::ProcedureId; -use crate::opcode_formatter::AcirOrBrilligOpcode; +pub(crate) trait Sample { + fn count(&self) -> usize; -use super::opcode_formatter::format_opcode; + fn brillig_function_id(&self) -> Option; + + fn call_stack(&self) -> &[OpcodeLocation]; + + fn opcode(self) -> Option; +} #[derive(Debug)] -pub(crate) struct Sample { - pub(crate) opcode: Option>, +pub(crate) struct CompilationSample { + pub(crate) opcode: Option, pub(crate) call_stack: Vec, pub(crate) count: usize, pub(crate) brillig_function_id: Option, } +impl Sample for CompilationSample { + fn count(&self) -> usize { + self.count + } + + fn brillig_function_id(&self) -> Option { + self.brillig_function_id + } + + fn call_stack(&self) -> &[OpcodeLocation] { + &self.call_stack + } + + fn opcode(self) -> Option { + self.opcode + } +} + +pub(crate) struct BrilligExecutionSample { + pub(crate) opcode: Option, + pub(crate) call_stack: Vec, + pub(crate) brillig_function_id: Option, +} + +impl Sample for BrilligExecutionSample { + fn count(&self) -> usize { + 1 + } + + fn brillig_function_id(&self) -> Option { + self.brillig_function_id + } + + fn call_stack(&self) -> &[OpcodeLocation] { + &self.call_stack + } + + fn opcode(self) -> Option { + self.opcode + } +} + #[derive(Debug, Default)] pub(crate) struct FoldedStackItem { pub(crate) total_samples: usize, @@ -33,9 +80,9 @@ pub(crate) struct FoldedStackItem { pub(crate) trait FlamegraphGenerator { #[allow(clippy::too_many_arguments)] - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - samples: Vec>, + samples: Vec, debug_symbols: &DebugInfo, files: &'files impl Files<'files, FileId = fm::FileId>, artifact_name: &str, @@ -49,9 +96,9 @@ pub(crate) struct InfernoFlamegraphGenerator { } impl FlamegraphGenerator for InfernoFlamegraphGenerator { - fn generate_flamegraph<'files, F: AcirField>( + fn generate_flamegraph<'files, S: Sample>( &self, - samples: Vec>, + samples: Vec, debug_symbols: &DebugInfo, files: &'files impl Files<'files, FileId = fm::FileId>, artifact_name: &str, @@ -82,8 +129,8 @@ impl FlamegraphGenerator for InfernoFlamegraphGenerator { } } -fn generate_folded_sorted_lines<'files, F: AcirField>( - samples: Vec>, +fn generate_folded_sorted_lines<'files, S: Sample>( + samples: Vec, debug_symbols: &DebugInfo, files: &'files impl Files<'files, FileId = fm::FileId>, ) -> Vec { @@ -92,15 +139,15 @@ fn generate_folded_sorted_lines<'files, F: AcirField>( let mut resolution_cache: HashMap> = HashMap::default(); for sample in samples { - let mut location_names = Vec::with_capacity(sample.call_stack.len()); - for opcode_location in sample.call_stack { + let mut location_names = Vec::with_capacity(sample.call_stack().len()); + for opcode_location in sample.call_stack() { let callsite_labels = resolution_cache - .entry(opcode_location) + .entry(*opcode_location) .or_insert_with(|| { find_callsite_labels( debug_symbols, - &opcode_location, - sample.brillig_function_id, + opcode_location, + sample.brillig_function_id(), files, ) }) @@ -109,11 +156,14 @@ fn generate_folded_sorted_lines<'files, F: AcirField>( location_names.extend(callsite_labels); } - if let Some(opcode) = &sample.opcode { - location_names.push(format_opcode(opcode)); + // We move `sample` by calling `sample.opcode()` so we want to fetch the sample count here. + let count = sample.count(); + + if let Some(opcode) = sample.opcode() { + location_names.push(opcode); } - add_locations_to_folded_stack_items(&mut folded_stack_items, location_names, sample.count); + add_locations_to_folded_stack_items(&mut folded_stack_items, location_names, count); } to_folded_sorted_lines(&folded_stack_items, Default::default()) @@ -251,7 +301,7 @@ mod tests { use noirc_errors::{debug_info::DebugInfo, Location, Span}; use std::{collections::BTreeMap, path::Path}; - use crate::{flamegraph::Sample, opcode_formatter::AcirOrBrilligOpcode}; + use crate::{flamegraph::CompilationSample, opcode_formatter::format_acir_opcode}; use super::generate_folded_sorted_lines; @@ -338,25 +388,25 @@ mod tests { BTreeMap::default(), ); - let samples: Vec> = vec![ - Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(AcirOpcode::AssertZero( + let samples: Vec = vec![ + CompilationSample { + opcode: Some(format_acir_opcode(&AcirOpcode::AssertZero::( Expression::default(), ))), call_stack: vec![OpcodeLocation::Acir(0)], count: 10, brillig_function_id: None, }, - Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(AcirOpcode::AssertZero( + CompilationSample { + opcode: Some(format_acir_opcode(&AcirOpcode::AssertZero::( Expression::default(), ))), call_stack: vec![OpcodeLocation::Acir(1)], count: 20, brillig_function_id: None, }, - Sample { - opcode: Some(AcirOrBrilligOpcode::Acir(AcirOpcode::MemoryInit { + CompilationSample { + opcode: Some(format_acir_opcode(&AcirOpcode::MemoryInit:: { block_id: BlockId(0), init: vec![], block_type: acir::circuit::opcodes::BlockType::Memory, diff --git a/tooling/profiler/src/opcode_formatter.rs b/tooling/profiler/src/opcode_formatter.rs index 68057b6d86f..32aa9980b88 100644 --- a/tooling/profiler/src/opcode_formatter.rs +++ b/tooling/profiler/src/opcode_formatter.rs @@ -2,12 +2,6 @@ use acir::brillig::{BinaryFieldOp, BinaryIntOp, BlackBoxOp, Opcode as BrilligOpc use acir::circuit::{directives::Directive, opcodes::BlackBoxFuncCall, Opcode as AcirOpcode}; use acir::AcirField; -#[derive(Debug)] -pub(crate) enum AcirOrBrilligOpcode { - Acir(AcirOpcode), - Brillig(BrilligOpcode), -} - fn format_blackbox_function(call: &BlackBoxFuncCall) -> String { match call { BlackBoxFuncCall::AES128Encrypt { .. } => "aes128_encrypt".to_string(), @@ -136,11 +130,10 @@ fn format_brillig_opcode_kind(opcode: &BrilligOpcode) -> String { } } -pub(crate) fn format_opcode(opcode: &AcirOrBrilligOpcode) -> String { - match opcode { - AcirOrBrilligOpcode::Acir(opcode) => format!("acir::{}", format_acir_opcode_kind(opcode)), - AcirOrBrilligOpcode::Brillig(opcode) => { - format!("brillig::{}", format_brillig_opcode_kind(opcode)) - } - } +pub(crate) fn format_acir_opcode(opcode: &AcirOpcode) -> String { + format!("acir::{}", format_acir_opcode_kind(opcode)) +} + +pub(crate) fn format_brillig_opcode(opcode: &BrilligOpcode) -> String { + format!("brillig::{}", format_brillig_opcode_kind(opcode)) } From 2d999f05981d126a0ed139e4c662652aad70d67e Mon Sep 17 00:00:00 2001 From: jfecher Date: Mon, 18 Nov 2024 10:11:58 -0600 Subject: [PATCH 017/109] chore: Add `Instruction::MakeArray` to SSA (#6071) Co-authored-by: Tom French <15848336+TomAFrench@users.noreply.github.com> Co-authored-by: Maxim Vezenov --- .../src/brillig/brillig_gen/brillig_block.rs | 87 +++++----- .../brillig_gen/constant_allocation.rs | 5 +- .../brillig/brillig_gen/variable_liveness.rs | 27 +--- .../src/brillig/brillig_ir/codegen_stack.rs | 2 + .../noirc_evaluator/src/ssa/acir_gen/mod.rs | 26 +-- .../check_for_underconstrained_values.rs | 6 +- .../src/ssa/function_builder/data_bus.rs | 17 +- .../src/ssa/function_builder/mod.rs | 22 +-- compiler/noirc_evaluator/src/ssa/ir/dfg.rs | 21 +-- compiler/noirc_evaluator/src/ssa/ir/dom.rs | 2 +- .../src/ssa/ir/function_inserter.rs | 107 ++++++++++--- .../noirc_evaluator/src/ssa/ir/instruction.rs | 61 +++++-- .../src/ssa/ir/instruction/call.rs | 106 ++++++++++--- .../src/ssa/ir/instruction/call/blackbox.rs | 41 +++-- .../noirc_evaluator/src/ssa/ir/printer.rs | 33 ++-- compiler/noirc_evaluator/src/ssa/ir/value.rs | 4 - .../noirc_evaluator/src/ssa/opt/array_set.rs | 36 +++-- .../src/ssa/opt/constant_folding.rs | 149 ++++++++++++++---- compiler/noirc_evaluator/src/ssa/opt/die.rs | 43 ++--- .../src/ssa/opt/flatten_cfg.rs | 26 ++- .../ssa/opt/flatten_cfg/capacity_tracker.rs | 32 ++-- .../src/ssa/opt/flatten_cfg/value_merger.rs | 14 +- .../noirc_evaluator/src/ssa/opt/inlining.rs | 4 - .../noirc_evaluator/src/ssa/opt/mem2reg.rs | 64 +++----- .../src/ssa/opt/normalize_value_ids.rs | 13 -- compiler/noirc_evaluator/src/ssa/opt/rc.rs | 7 +- .../src/ssa/opt/remove_enable_side_effects.rs | 3 +- .../noirc_evaluator/src/ssa/opt/unrolling.rs | 29 +++- compiler/noirc_evaluator/src/ssa/parser.rs | 30 ++-- .../noirc_evaluator/src/ssa/parser/ast.rs | 8 +- .../src/ssa/parser/into_ssa.rs | 17 +- .../noirc_evaluator/src/ssa/parser/tests.rs | 19 ++- .../noirc_evaluator/src/ssa/parser/token.rs | 3 + .../noirc_evaluator/src/ssa/ssa_gen/mod.rs | 2 +- 34 files changed, 631 insertions(+), 435 deletions(-) diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 0e9ebd8900d..9661406abee 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -160,13 +160,9 @@ impl<'block> BrilligBlock<'block> { ); } TerminatorInstruction::Return { return_values, .. } => { - let return_registers: Vec<_> = return_values - .iter() - .map(|value_id| { - let return_variable = self.convert_ssa_value(*value_id, dfg); - return_variable.extract_register() - }) - .collect(); + let return_registers = vecmap(return_values, |value_id| { + self.convert_ssa_value(*value_id, dfg).extract_register() + }); self.brillig_context.codegen_return(&return_registers); } } @@ -763,6 +759,43 @@ impl<'block> BrilligBlock<'block> { Instruction::IfElse { .. } => { unreachable!("IfElse instructions should not be possible in brillig") } + Instruction::MakeArray { elements: array, typ } => { + let value_id = dfg.instruction_results(instruction_id)[0]; + if !self.variables.is_allocated(&value_id) { + let new_variable = self.variables.define_variable( + self.function_context, + self.brillig_context, + value_id, + dfg, + ); + + // Initialize the variable + match new_variable { + BrilligVariable::BrilligArray(brillig_array) => { + self.brillig_context.codegen_initialize_array(brillig_array); + } + BrilligVariable::BrilligVector(vector) => { + let size = self + .brillig_context + .make_usize_constant_instruction(array.len().into()); + self.brillig_context.codegen_initialize_vector(vector, size, None); + self.brillig_context.deallocate_single_addr(size); + } + _ => unreachable!( + "ICE: Cannot initialize array value created as {new_variable:?}" + ), + }; + + // Write the items + let items_pointer = self + .brillig_context + .codegen_make_array_or_vector_items_pointer(new_variable); + + self.initialize_constant_array(array, typ, dfg, items_pointer); + + self.brillig_context.deallocate_register(items_pointer); + } + } }; let dead_variables = self @@ -1500,46 +1533,6 @@ impl<'block> BrilligBlock<'block> { new_variable } } - Value::Array { array, typ } => { - if self.variables.is_allocated(&value_id) { - self.variables.get_allocation(self.function_context, value_id, dfg) - } else { - let new_variable = self.variables.define_variable( - self.function_context, - self.brillig_context, - value_id, - dfg, - ); - - // Initialize the variable - match new_variable { - BrilligVariable::BrilligArray(brillig_array) => { - self.brillig_context.codegen_initialize_array(brillig_array); - } - BrilligVariable::BrilligVector(vector) => { - let size = self - .brillig_context - .make_usize_constant_instruction(array.len().into()); - self.brillig_context.codegen_initialize_vector(vector, size, None); - self.brillig_context.deallocate_single_addr(size); - } - _ => unreachable!( - "ICE: Cannot initialize array value created as {new_variable:?}" - ), - }; - - // Write the items - let items_pointer = self - .brillig_context - .codegen_make_array_or_vector_items_pointer(new_variable); - - self.initialize_constant_array(array, typ, dfg, items_pointer); - - self.brillig_context.deallocate_register(items_pointer); - - new_variable - } - } Value::Function(_) => { // For the debugger instrumentation we want to allow passing // around values representing function pointers, even though diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs index f9ded224b33..61ca20be2f5 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/constant_allocation.rs @@ -89,8 +89,7 @@ impl ConstantAllocation { } if let Some(terminator_instruction) = block.terminator() { terminator_instruction.for_each_value(|value_id| { - let variables = collect_variables_of_value(value_id, &func.dfg); - for variable in variables { + if let Some(variable) = collect_variables_of_value(value_id, &func.dfg) { record_if_constant(block_id, variable, InstructionLocation::Terminator); } }); @@ -166,7 +165,7 @@ impl ConstantAllocation { } pub(crate) fn is_constant_value(id: ValueId, dfg: &DataFlowGraph) -> bool { - matches!(&dfg[dfg.resolve(id)], Value::NumericConstant { .. } | Value::Array { .. }) + matches!(&dfg[dfg.resolve(id)], Value::NumericConstant { .. }) } /// For a given function, finds all the blocks that are within loops diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs index a18461bc0cd..87165c36dff 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs @@ -45,32 +45,19 @@ fn find_back_edges( } /// Collects the underlying variables inside a value id. It might be more than one, for example in constant arrays that are constructed with multiple vars. -pub(crate) fn collect_variables_of_value(value_id: ValueId, dfg: &DataFlowGraph) -> Vec { +pub(crate) fn collect_variables_of_value( + value_id: ValueId, + dfg: &DataFlowGraph, +) -> Option { let value_id = dfg.resolve(value_id); let value = &dfg[value_id]; match value { - Value::Instruction { .. } | Value::Param { .. } => { - vec![value_id] - } - // Literal arrays are constants, but might use variable values to initialize. - Value::Array { array, .. } => { - let mut value_ids = vec![value_id]; - - array.iter().for_each(|item_id| { - let underlying_ids = collect_variables_of_value(*item_id, dfg); - value_ids.extend(underlying_ids); - }); - - value_ids - } - Value::NumericConstant { .. } => { - vec![value_id] + Value::Instruction { .. } | Value::Param { .. } | Value::NumericConstant { .. } => { + Some(value_id) } // Functions are not variables in a defunctionalized SSA. Only constant function values should appear. - Value::ForeignFunction(_) | Value::Function(_) | Value::Intrinsic(..) => { - vec![] - } + Value::ForeignFunction(_) | Value::Function(_) | Value::Intrinsic(..) => None, } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs index a0e2a500e20..599c05fc0e8 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_stack.rs @@ -47,6 +47,7 @@ impl BrilligContext< let destinations_of_temp = movements_map.remove(first_source).unwrap(); movements_map.insert(temp_register, destinations_of_temp); } + // After removing loops we should have an DAG with each node having only one ancestor (but could have multiple successors) // Now we should be able to move the registers just by performing a DFS on the movements map let heads: Vec<_> = movements_map @@ -54,6 +55,7 @@ impl BrilligContext< .filter(|source| !destinations_set.contains(source)) .copied() .collect(); + for head in heads { self.perform_movements(&movements_map, head); } diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index d8c870a5006..4aca59855a1 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -767,6 +767,12 @@ impl<'a> Context<'a> { Instruction::IfElse { .. } => { unreachable!("IfElse instruction remaining in acir-gen") } + Instruction::MakeArray { elements, typ: _ } => { + let elements = elements.iter().map(|element| self.convert_value(*element, dfg)); + let value = AcirValue::Array(elements.collect()); + let result = dfg.instruction_results(instruction_id)[0]; + self.ssa_values.insert(result, value); + } } self.acir_context.set_call_stack(CallStack::new()); @@ -1557,7 +1563,7 @@ impl<'a> Context<'a> { if !already_initialized { let value = &dfg[array]; match value { - Value::Array { .. } | Value::Instruction { .. } => { + Value::Instruction { .. } => { let value = self.convert_value(array, dfg); let array_typ = dfg.type_of_value(array); let len = if !array_typ.contains_slice_element() { @@ -1600,13 +1606,6 @@ impl<'a> Context<'a> { match array_typ { Type::Array(_, _) | Type::Slice(_) => { match &dfg[array_id] { - Value::Array { array, .. } => { - for (i, value) in array.iter().enumerate() { - flat_elem_type_sizes.push( - self.flattened_slice_size(*value, dfg) + flat_elem_type_sizes[i], - ); - } - } Value::Instruction { .. } | Value::Param { .. } => { // An instruction representing the slice means it has been processed previously during ACIR gen. // Use the previously defined result of an array operation to fetch the internal type information. @@ -1739,13 +1738,6 @@ impl<'a> Context<'a> { fn flattened_slice_size(&mut self, array_id: ValueId, dfg: &DataFlowGraph) -> usize { let mut size = 0; match &dfg[array_id] { - Value::Array { array, .. } => { - // The array is going to be the flattened outer array - // Flattened slice size from SSA value does not need to be multiplied by the len - for value in array { - size += self.flattened_slice_size(*value, dfg); - } - } Value::NumericConstant { .. } => { size += 1; } @@ -1909,10 +1901,6 @@ impl<'a> Context<'a> { Value::NumericConstant { constant, typ } => { AcirValue::Var(self.acir_context.add_constant(*constant), typ.into()) } - Value::Array { array, .. } => { - let elements = array.iter().map(|element| self.convert_value(*element, dfg)); - AcirValue::Array(elements.collect()) - } Value::Intrinsic(..) => todo!(), Value::Function(function_id) => { // This conversion is for debugging support only, to allow the diff --git a/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs b/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs index 90eb79ccb69..cf884c98be9 100644 --- a/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs +++ b/compiler/noirc_evaluator/src/ssa/checks/check_for_underconstrained_values.rs @@ -191,7 +191,8 @@ impl Context { | Instruction::Load { .. } | Instruction::Not(..) | Instruction::Store { .. } - | Instruction::Truncate { .. } => { + | Instruction::Truncate { .. } + | Instruction::MakeArray { .. } => { self.value_sets.push(instruction_arguments_and_results); } @@ -247,8 +248,7 @@ impl Context { Value::ForeignFunction(..) => { panic!("Should not be able to reach foreign function from non-brillig functions, {func_id} in function {}", function.name()); } - Value::Array { .. } - | Value::Instruction { .. } + Value::Instruction { .. } | Value::NumericConstant { .. } | Value::Param { .. } => { panic!("At the point we are running disconnect there shouldn't be any other values as arguments") diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs b/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs index 5a62e9c8e9a..e4a2eeb8c22 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use crate::ssa::ir::{types::Type, value::ValueId}; +use crate::ssa::ir::{function::RuntimeType, types::Type, value::ValueId}; use acvm::FieldElement; use fxhash::FxHashMap as HashMap; use noirc_frontend::ast; @@ -100,7 +100,8 @@ impl DataBus { ) -> DataBus { let mut call_data_args = Vec::new(); for call_data_item in call_data { - let array_id = call_data_item.databus.expect("Call data should have an array id"); + // databus can be None if `main` is a brillig function + let Some(array_id) = call_data_item.databus else { continue }; let call_data_id = call_data_item.call_data_id.expect("Call data should have a user id"); call_data_args.push(CallData { array_id, call_data_id, index_map: call_data_item.map }); @@ -161,13 +162,11 @@ impl FunctionBuilder { } let len = databus.values.len(); - let array = if len > 0 { - let array = self - .array_constant(databus.values, Type::Array(Arc::new(vec![Type::field()]), len)); - Some(array) - } else { - None - }; + let array = (len > 0 && matches!(self.current_function.runtime(), RuntimeType::Acir(_))) + .then(|| { + let array_type = Type::Array(Arc::new(vec![Type::field()]), len); + self.insert_make_array(databus.values, array_type) + }); DataBusBuilder { index: 0, diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index 61284b0be95..6f76585336a 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -136,11 +136,6 @@ impl FunctionBuilder { self.numeric_constant(value.into(), Type::length_type()) } - /// Insert an array constant into the current function with the given element values. - pub(crate) fn array_constant(&mut self, elements: im::Vector, typ: Type) -> ValueId { - self.current_function.dfg.make_array(elements, typ) - } - /// Returns the type of the given value. pub(crate) fn type_of_value(&self, value: ValueId) -> Type { self.current_function.dfg.type_of_value(value) @@ -355,6 +350,17 @@ impl FunctionBuilder { self.insert_instruction(Instruction::EnableSideEffectsIf { condition }, None); } + /// Insert a `make_array` instruction to create a new array or slice. + /// Returns the new array value. Expects `typ` to be an array or slice type. + pub(crate) fn insert_make_array( + &mut self, + elements: im::Vector, + typ: Type, + ) -> ValueId { + assert!(matches!(typ, Type::Array(..) | Type::Slice(_))); + self.insert_instruction(Instruction::MakeArray { elements, typ }, None).first() + } + /// Terminates the current block with the given terminator instruction /// if the current block does not already have a terminator instruction. fn terminate_block_with(&mut self, terminator: TerminatorInstruction) { @@ -504,7 +510,6 @@ mod tests { instruction::{Endian, Intrinsic}, map::Id, types::Type, - value::Value, }; use super::FunctionBuilder; @@ -526,10 +531,7 @@ mod tests { let call_results = builder.insert_call(to_bits_id, vec![input, length], result_types).into_owned(); - let slice = match &builder.current_function.dfg[call_results[0]] { - Value::Array { array, .. } => array, - _ => panic!(), - }; + let slice = builder.current_function.dfg.get_array_constant(call_results[0]).unwrap().0; assert_eq!(slice[0], one); assert_eq!(slice[1], one); assert_eq!(slice[2], one); diff --git a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs index 2be9ffa9afa..e3f3f33682b 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs @@ -266,12 +266,6 @@ impl DataFlowGraph { id } - /// Create a new constant array value from the given elements - pub(crate) fn make_array(&mut self, array: im::Vector, typ: Type) -> ValueId { - assert!(matches!(typ, Type::Array(..) | Type::Slice(_))); - self.make_value(Value::Array { array, typ }) - } - /// Gets or creates a ValueId for the given FunctionId. pub(crate) fn import_function(&mut self, function: FunctionId) -> ValueId { if let Some(existing) = self.functions.get(&function) { @@ -458,8 +452,11 @@ impl DataFlowGraph { /// Otherwise, this returns None. pub(crate) fn get_array_constant(&self, value: ValueId) -> Option<(im::Vector, Type)> { match &self.values[self.resolve(value)] { + Value::Instruction { instruction, .. } => match &self.instructions[*instruction] { + Instruction::MakeArray { elements, typ } => Some((elements.clone(), typ.clone())), + _ => None, + }, // Arrays are shared, so cloning them is cheap - Value::Array { array, typ } => Some((array.clone(), typ.clone())), _ => None, } } @@ -522,8 +519,13 @@ impl DataFlowGraph { /// True if the given ValueId refers to a (recursively) constant value pub(crate) fn is_constant(&self, argument: ValueId) -> bool { match &self[self.resolve(argument)] { - Value::Instruction { .. } | Value::Param { .. } => false, - Value::Array { array, .. } => array.iter().all(|element| self.is_constant(*element)), + Value::Param { .. } => false, + Value::Instruction { instruction, .. } => match &self[*instruction] { + Instruction::MakeArray { elements, .. } => { + elements.iter().all(|element| self.is_constant(*element)) + } + _ => false, + }, _ => true, } } @@ -575,6 +577,7 @@ impl std::ops::IndexMut for DataFlowGraph { // The result of calling DataFlowGraph::insert_instruction can // be a list of results or a single ValueId if the instruction was simplified // to an existing value. +#[derive(Debug)] pub(crate) enum InsertInstructionResult<'dfg> { /// Results is the standard case containing the instruction id and the results of that instruction. Results(InstructionId, &'dfg [ValueId]), diff --git a/compiler/noirc_evaluator/src/ssa/ir/dom.rs b/compiler/noirc_evaluator/src/ssa/ir/dom.rs index 94f7a405c05..c1a7f14e0d1 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dom.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dom.rs @@ -86,7 +86,7 @@ impl DominatorTree { /// /// This function panics if either of the blocks are unreachable. /// - /// An instruction is considered to dominate itself. + /// A block is considered to dominate itself. pub(crate) fn dominates(&mut self, block_a_id: BasicBlockId, block_b_id: BasicBlockId) -> bool { if let Some(res) = self.cache.get(&(block_a_id, block_b_id)) { return *res; diff --git a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs index 991ff22c902..708b02b9102 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs @@ -18,15 +18,26 @@ pub(crate) struct FunctionInserter<'f> { pub(crate) function: &'f mut Function, values: HashMap, + /// Map containing repeat array constants so that we do not initialize a new /// array unnecessarily. An extra tuple field is included as part of the key to /// distinguish between array/slice types. - const_arrays: HashMap<(im::Vector, Type), ValueId>, + /// + /// This is optional since caching arrays relies on the inserter inserting strictly + /// in control-flow order. Otherwise, if arrays later in the program are cached first, + /// they may be refered to by instructions earlier in the program. + array_cache: Option, + + /// If this pass is loop unrolling, store the block before the loop to optionally + /// hoist any make_array instructions up to after they are retrieved from the `array_cache`. + pre_loop: Option, } +pub(crate) type ArrayCache = HashMap, HashMap>; + impl<'f> FunctionInserter<'f> { pub(crate) fn new(function: &'f mut Function) -> FunctionInserter<'f> { - Self { function, values: HashMap::default(), const_arrays: HashMap::default() } + Self { function, values: HashMap::default(), array_cache: None, pre_loop: None } } /// Resolves a ValueId to its new, updated value. @@ -36,27 +47,7 @@ impl<'f> FunctionInserter<'f> { value = self.function.dfg.resolve(value); match self.values.get(&value) { Some(value) => self.resolve(*value), - None => match &self.function.dfg[value] { - super::value::Value::Array { array, typ } => { - let array = array.clone(); - let typ = typ.clone(); - let new_array: im::Vector = - array.iter().map(|id| self.resolve(*id)).collect(); - - if let Some(fetched_value) = - self.const_arrays.get(&(new_array.clone(), typ.clone())) - { - return *fetched_value; - }; - - let new_array_clone = new_array.clone(); - let new_id = self.function.dfg.make_array(new_array, typ.clone()); - self.values.insert(value, new_id); - self.const_arrays.insert((new_array_clone, typ), new_id); - new_id - } - _ => value, - }, + None => value, } } @@ -122,7 +113,7 @@ impl<'f> FunctionInserter<'f> { &mut self, instruction: Instruction, id: InstructionId, - block: BasicBlockId, + mut block: BasicBlockId, call_stack: CallStack, ) -> InsertInstructionResult { let results = self.function.dfg.instruction_results(id); @@ -132,6 +123,30 @@ impl<'f> FunctionInserter<'f> { .requires_ctrl_typevars() .then(|| vecmap(&results, |result| self.function.dfg.type_of_value(*result))); + // Large arrays can lead to OOM panics if duplicated from being unrolled in loops. + // To prevent this, try to reuse the same ID for identical arrays instead of inserting + // another MakeArray instruction. Note that this assumes the function inserter is inserting + // in control-flow order. Otherwise we could refer to ValueIds defined later in the program. + let make_array = if let Instruction::MakeArray { elements, typ } = &instruction { + if self.array_is_constant(elements) { + if let Some(fetched_value) = self.get_cached_array(elements, typ) { + assert_eq!(results.len(), 1); + self.values.insert(results[0], fetched_value); + return InsertInstructionResult::SimplifiedTo(fetched_value); + } + + // Hoist constant arrays out of the loop and cache their value + if let Some(pre_loop) = self.pre_loop { + block = pre_loop; + } + Some((elements.clone(), typ.clone())) + } else { + None + } + } else { + None + }; + let new_results = self.function.dfg.insert_instruction_and_results( instruction, block, @@ -139,10 +154,54 @@ impl<'f> FunctionInserter<'f> { call_stack, ); + // Cache an array in the fresh_array_cache if array caching is enabled. + // The fresh cache isn't used for deduplication until an external pass confirms we + // pass a sequence point and all blocks that may be before the current insertion point + // are finished. + if let Some((elements, typ)) = make_array { + Self::cache_array(&mut self.array_cache, elements, typ, new_results.first()); + } + Self::insert_new_instruction_results(&mut self.values, &results, &new_results); new_results } + fn get_cached_array(&self, elements: &im::Vector, typ: &Type) -> Option { + self.array_cache.as_ref()?.get(elements)?.get(typ).copied() + } + + fn cache_array( + arrays: &mut Option, + elements: im::Vector, + typ: Type, + result_id: ValueId, + ) { + if let Some(arrays) = arrays { + arrays.entry(elements).or_default().insert(typ, result_id); + } + } + + fn array_is_constant(&self, elements: &im::Vector) -> bool { + elements.iter().all(|element| self.function.dfg.is_constant(*element)) + } + + pub(crate) fn set_array_cache( + &mut self, + new_cache: Option, + pre_loop: BasicBlockId, + ) { + self.array_cache = new_cache; + self.pre_loop = Some(pre_loop); + } + + /// Finish this inserter, returning its array cache merged with the fresh array cache. + /// Since this consumes the inserter this assumes we're at a sequence point where all + /// predecessor blocks to the current block are finished. Since this is true, the fresh + /// array cache can be merged with the existing array cache. + pub(crate) fn into_array_cache(self) -> Option { + self.array_cache + } + /// Modify the values HashMap to remember the mapping between an instruction result's previous /// ValueId (from the source_function) and its new ValueId in the destination function. pub(crate) fn insert_new_instruction_results( diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 254a0afe88b..f56dfaa7c65 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -282,6 +282,12 @@ pub(crate) enum Instruction { else_condition: ValueId, else_value: ValueId, }, + + /// Creates a new array or slice. + /// + /// `typ` should be an array or slice type with an element type + /// matching each of the `elements` values' types. + MakeArray { elements: im::Vector, typ: Type }, } impl Instruction { @@ -294,7 +300,9 @@ impl Instruction { pub(crate) fn result_type(&self) -> InstructionResultType { match self { Instruction::Binary(binary) => binary.result_type(), - Instruction::Cast(_, typ) => InstructionResultType::Known(typ.clone()), + Instruction::Cast(_, typ) | Instruction::MakeArray { typ, .. } => { + InstructionResultType::Known(typ.clone()) + } Instruction::Not(value) | Instruction::Truncate { value, .. } | Instruction::ArraySet { array: value, .. } @@ -348,6 +356,9 @@ impl Instruction { // We can deduplicate these instructions if we know the predicate is also the same. Constrain(..) | RangeCheck { .. } => deduplicate_with_predicate, + // This should never be side-effectful + MakeArray { .. } => true, + // These can have different behavior depending on the EnableSideEffectsIf context. // Replacing them with a similar instruction potentially enables replacing an instruction // with one that was disabled. See @@ -385,7 +396,8 @@ impl Instruction { | Load { .. } | ArrayGet { .. } | IfElse { .. } - | ArraySet { .. } => true, + | ArraySet { .. } + | MakeArray { .. } => true, // Store instructions must be removed by DIE in acir code, any load // instructions should already be unused by that point. @@ -459,7 +471,8 @@ impl Instruction { | Instruction::Store { .. } | Instruction::IfElse { .. } | Instruction::IncrementRc { .. } - | Instruction::DecrementRc { .. } => false, + | Instruction::DecrementRc { .. } + | Instruction::MakeArray { .. } => false, } } @@ -487,7 +500,7 @@ impl Instruction { let assert_message = assert_message.as_ref().map(|error| match error { ConstrainError::Dynamic(selector, payload_values) => ConstrainError::Dynamic( *selector, - payload_values.iter().map(|&value| f(value)).collect(), + vecmap(payload_values.iter().copied(), f), ), _ => error.clone(), }); @@ -531,6 +544,10 @@ impl Instruction { else_value: f(*else_value), } } + Instruction::MakeArray { elements, typ } => Instruction::MakeArray { + elements: elements.iter().copied().map(f).collect(), + typ: typ.clone(), + }, } } @@ -591,6 +608,11 @@ impl Instruction { f(*else_condition); f(*else_value); } + Instruction::MakeArray { elements, typ: _ } => { + for element in elements { + f(*element); + } + } } } @@ -646,20 +668,28 @@ impl Instruction { None } } - Instruction::ArraySet { array, index, value, .. } => { - let array_const = dfg.get_array_constant(*array); - let index_const = dfg.get_numeric_constant(*index); - if let (Some((array, element_type)), Some(index)) = (array_const, index_const) { + Instruction::ArraySet { array: array_id, index: index_id, value, .. } => { + let array = dfg.get_array_constant(*array_id); + let index = dfg.get_numeric_constant(*index_id); + if let (Some((array, _element_type)), Some(index)) = (array, index) { let index = index.try_to_u32().expect("Expected array index to fit in u32") as usize; if index < array.len() { - let new_array = dfg.make_array(array.update(index, *value), element_type); - return SimplifiedTo(new_array); + let elements = array.update(index, *value); + let typ = dfg.type_of_value(*array_id); + let instruction = Instruction::MakeArray { elements, typ }; + let new_array = dfg.insert_instruction_and_results( + instruction, + block, + Option::None, + call_stack.clone(), + ); + return SimplifiedTo(new_array.first()); } } - try_optimize_array_set_from_previous_get(dfg, *array, *index, *value) + try_optimize_array_set_from_previous_get(dfg, *array_id, *index_id, *value) } Instruction::Truncate { value, bit_size, max_bit_size } => { if bit_size == max_bit_size { @@ -772,6 +802,7 @@ impl Instruction { None } } + Instruction::MakeArray { .. } => None, } } } @@ -815,13 +846,13 @@ fn try_optimize_array_get_from_previous_set( return SimplifyResult::None; } } + Instruction::MakeArray { elements: array, typ: _ } => { + elements = Some(array.clone()); + break; + } _ => return SimplifyResult::None, } } - Value::Array { array, typ: _ } => { - elements = Some(array.clone()); - break; - } _ => return SimplifyResult::None, } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 9dbd2c56993..5f0626b7ee8 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -60,7 +60,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, 2, limb_count, dfg) + constant_to_radix(endian, field, 2, limb_count, dfg, block, call_stack) } else { SimplifyResult::None } @@ -77,7 +77,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, radix, limb_count, dfg) + constant_to_radix(endian, field, radix, limb_count, dfg, block, call_stack) } else { SimplifyResult::None } @@ -109,7 +109,8 @@ pub(super) fn simplify_call( let slice_length_value = array.len() / elements_size; let slice_length = dfg.make_constant(slice_length_value.into(), Type::length_type()); - let new_slice = dfg.make_array(array, Type::Slice(inner_element_types)); + let new_slice = + make_array(dfg, array, Type::Slice(inner_element_types), block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![slice_length, new_slice]) } else { SimplifyResult::None @@ -129,7 +130,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, call_stack); return SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]); } @@ -154,7 +155,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } else { SimplifyResult::None @@ -196,7 +197,7 @@ pub(super) fn simplify_call( results.push(new_slice_length); - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); // The slice is the last item returned for pop_front results.push(new_slice); @@ -227,7 +228,7 @@ pub(super) fn simplify_call( let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Add, block); - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } else { SimplifyResult::None @@ -260,7 +261,7 @@ pub(super) fn simplify_call( results.push(slice.remove(index)); } - let new_slice = dfg.make_array(slice, typ); + let new_slice = make_array(dfg, slice, typ, block, call_stack); results.insert(0, new_slice); let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Sub, block); @@ -317,7 +318,9 @@ pub(super) fn simplify_call( SimplifyResult::None } } - Intrinsic::BlackBox(bb_func) => simplify_black_box_func(bb_func, arguments, dfg), + Intrinsic::BlackBox(bb_func) => { + simplify_black_box_func(bb_func, arguments, dfg, block, call_stack) + } Intrinsic::AsField => { let instruction = Instruction::Cast( arguments[0], @@ -350,7 +353,7 @@ pub(super) fn simplify_call( Intrinsic::IsUnconstrained => SimplifyResult::None, Intrinsic::DerivePedersenGenerators => { if let Some(Type::Array(_, len)) = ctrl_typevars.unwrap().first() { - simplify_derive_generators(dfg, arguments, *len as u32) + simplify_derive_generators(dfg, arguments, *len as u32, block, call_stack) } else { unreachable!("Derive Pedersen Generators must return an array"); } @@ -419,7 +422,7 @@ fn simplify_slice_push_back( } let slice_size = slice.len(); let element_size = element_type.element_size(); - let new_slice = dfg.make_array(slice, element_type); + let new_slice = make_array(dfg, slice, element_type, block, &call_stack); let set_last_slice_value_instr = Instruction::ArraySet { array: new_slice, @@ -505,6 +508,8 @@ fn simplify_black_box_func( bb_func: BlackBoxFunc, arguments: &[ValueId], dfg: &mut DataFlowGraph, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { cfg_if::cfg_if! { if #[cfg(feature = "bn254")] { @@ -514,8 +519,12 @@ fn simplify_black_box_func( } }; match bb_func { - BlackBoxFunc::Blake2s => simplify_hash(dfg, arguments, acvm::blackbox_solver::blake2s), - BlackBoxFunc::Blake3 => simplify_hash(dfg, arguments, acvm::blackbox_solver::blake3), + BlackBoxFunc::Blake2s => { + simplify_hash(dfg, arguments, acvm::blackbox_solver::blake2s, block, call_stack) + } + BlackBoxFunc::Blake3 => { + simplify_hash(dfg, arguments, acvm::blackbox_solver::blake3, block, call_stack) + } BlackBoxFunc::Keccakf1600 => { if let Some((array_input, _)) = dfg.get_array_constant(arguments[0]) { if array_is_constant(dfg, &array_input) { @@ -533,8 +542,14 @@ fn simplify_black_box_func( const_input.try_into().expect("Keccakf1600 input should have length of 25"), ) .expect("Rust solvable black box function should not fail"); - let state_values = vecmap(state, |x| FieldElement::from(x as u128)); - let result_array = make_constant_array(dfg, state_values, Type::unsigned(64)); + let state_values = state.iter().map(|x| FieldElement::from(*x as u128)); + let result_array = make_constant_array( + dfg, + state_values, + Type::unsigned(64), + block, + call_stack, + ); SimplifyResult::SimplifiedTo(result_array) } else { SimplifyResult::None @@ -544,7 +559,7 @@ fn simplify_black_box_func( } } BlackBoxFunc::Poseidon2Permutation => { - blackbox::simplify_poseidon2_permutation(dfg, solver, arguments) + blackbox::simplify_poseidon2_permutation(dfg, solver, arguments, block, call_stack) } BlackBoxFunc::EcdsaSecp256k1 => blackbox::simplify_signature( dfg, @@ -558,7 +573,9 @@ fn simplify_black_box_func( ), BlackBoxFunc::MultiScalarMul => SimplifyResult::None, - BlackBoxFunc::EmbeddedCurveAdd => blackbox::simplify_ec_add(dfg, solver, arguments), + BlackBoxFunc::EmbeddedCurveAdd => { + blackbox::simplify_ec_add(dfg, solver, arguments, block, call_stack) + } BlackBoxFunc::SchnorrVerify => blackbox::simplify_schnorr_verify(dfg, solver, arguments), BlackBoxFunc::BigIntAdd @@ -585,23 +602,47 @@ fn simplify_black_box_func( } } -fn make_constant_array(dfg: &mut DataFlowGraph, results: Vec, typ: Type) -> ValueId { - let result_constants = vecmap(results, |element| dfg.make_constant(element, typ.clone())); +fn make_constant_array( + dfg: &mut DataFlowGraph, + results: impl Iterator, + typ: Type, + block: BasicBlockId, + call_stack: &CallStack, +) -> ValueId { + let result_constants: im::Vector<_> = + results.map(|element| dfg.make_constant(element, typ.clone())).collect(); let typ = Type::Array(Arc::new(vec![typ]), result_constants.len()); - dfg.make_array(result_constants.into(), typ) + make_array(dfg, result_constants, typ, block, call_stack) +} + +fn make_array( + dfg: &mut DataFlowGraph, + elements: im::Vector, + typ: Type, + block: BasicBlockId, + call_stack: &CallStack, +) -> ValueId { + let instruction = Instruction::MakeArray { elements, typ }; + let call_stack = call_stack.clone(); + dfg.insert_instruction_and_results(instruction, block, None, call_stack).first() } fn make_constant_slice( dfg: &mut DataFlowGraph, results: Vec, typ: Type, + block: BasicBlockId, + call_stack: &CallStack, ) -> (ValueId, ValueId) { let result_constants = vecmap(results, |element| dfg.make_constant(element, typ.clone())); let typ = Type::Slice(Arc::new(vec![typ])); let length = FieldElement::from(result_constants.len() as u128); - (dfg.make_constant(length, Type::length_type()), dfg.make_array(result_constants.into(), typ)) + let length = dfg.make_constant(length, Type::length_type()); + + let slice = make_array(dfg, result_constants.into(), typ, block, call_stack); + (length, slice) } /// Returns a slice (represented by a tuple (len, slice)) of constants corresponding to the limbs of the radix decomposition. @@ -611,6 +652,8 @@ fn constant_to_radix( radix: u32, limb_count: u32, dfg: &mut DataFlowGraph, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { let bit_size = u32::BITS - (radix - 1).leading_zeros(); let radix_big = BigUint::from(radix); @@ -631,7 +674,13 @@ fn constant_to_radix( if endian == Endian::Big { limbs.reverse(); } - let result_array = make_constant_array(dfg, limbs, Type::unsigned(bit_size)); + let result_array = make_constant_array( + dfg, + limbs.into_iter(), + Type::unsigned(bit_size), + block, + call_stack, + ); SimplifyResult::SimplifiedTo(result_array) } } @@ -656,6 +705,8 @@ fn simplify_hash( dfg: &mut DataFlowGraph, arguments: &[ValueId], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match dfg.get_array_constant(arguments[0]) { Some((input, _)) if array_is_constant(dfg, &input) => { @@ -664,9 +715,10 @@ fn simplify_hash( let hash = hash_function(&input_bytes) .expect("Rust solvable black box function should not fail"); - let hash_values = vecmap(hash, |byte| FieldElement::from_be_bytes_reduce(&[byte])); + let hash_values = hash.iter().map(|byte| FieldElement::from_be_bytes_reduce(&[*byte])); - let result_array = make_constant_array(dfg, hash_values, Type::unsigned(8)); + let result_array = + make_constant_array(dfg, hash_values, Type::unsigned(8), block, call_stack); SimplifyResult::SimplifiedTo(result_array) } _ => SimplifyResult::None, @@ -725,6 +777,8 @@ fn simplify_derive_generators( dfg: &mut DataFlowGraph, arguments: &[ValueId], num_generators: u32, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { if arguments.len() == 2 { let domain_separator_string = dfg.get_array_constant(arguments[0]); @@ -754,8 +808,8 @@ fn simplify_derive_generators( results.push(is_infinite); } let len = results.len(); - let result = - dfg.make_array(results.into(), Type::Array(vec![Type::field()].into(), len)); + let typ = Type::Array(vec![Type::field()].into(), len); + let result = make_array(dfg, results.into(), typ, block, call_stack); SimplifyResult::SimplifiedTo(result) } else { SimplifyResult::None diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs index 3881646d5e4..75405a87181 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs @@ -1,8 +1,13 @@ +use std::sync::Arc; + use acvm::{acir::AcirField, BlackBoxFunctionSolver, BlackBoxResolutionError, FieldElement}; -use iter_extended::vecmap; use crate::ssa::ir::{ - dfg::DataFlowGraph, instruction::SimplifyResult, types::Type, value::ValueId, + basic_block::BasicBlockId, + dfg::{CallStack, DataFlowGraph}, + instruction::{Instruction, SimplifyResult}, + types::Type, + value::ValueId, }; use super::{array_is_constant, make_constant_array, to_u8_vec}; @@ -11,6 +16,8 @@ pub(super) fn simplify_ec_add( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match ( dfg.get_numeric_constant(arguments[0]), @@ -39,13 +46,18 @@ pub(super) fn simplify_ec_add( return SimplifyResult::None; }; - let result_array = make_constant_array( - dfg, - vec![result_x, result_y, result_is_infinity], - Type::field(), - ); + let result_x = dfg.make_constant(result_x, Type::field()); + let result_y = dfg.make_constant(result_y, Type::field()); + let result_is_infinity = dfg.make_constant(result_is_infinity, Type::bool()); - SimplifyResult::SimplifiedTo(result_array) + let typ = Type::Array(Arc::new(vec![Type::field()]), 3); + + let elements = im::vector![result_x, result_y, result_is_infinity]; + let instruction = Instruction::MakeArray { elements, typ }; + let result_array = + dfg.insert_instruction_and_results(instruction, block, None, call_stack.clone()); + + SimplifyResult::SimplifiedTo(result_array.first()) } _ => SimplifyResult::None, } @@ -55,6 +67,8 @@ pub(super) fn simplify_poseidon2_permutation( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match (dfg.get_array_constant(arguments[0]), dfg.get_numeric_constant(arguments[1])) { (Some((state, _)), Some(state_length)) if array_is_constant(dfg, &state) => { @@ -74,7 +88,9 @@ pub(super) fn simplify_poseidon2_permutation( return SimplifyResult::None; }; - let result_array = make_constant_array(dfg, new_state, Type::field()); + let new_state = new_state.into_iter(); + let typ = Type::field(); + let result_array = make_constant_array(dfg, new_state, typ, block, call_stack); SimplifyResult::SimplifiedTo(result_array) } @@ -119,6 +135,8 @@ pub(super) fn simplify_hash( dfg: &mut DataFlowGraph, arguments: &[ValueId], hash_function: fn(&[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, + block: BasicBlockId, + call_stack: &CallStack, ) -> SimplifyResult { match dfg.get_array_constant(arguments[0]) { Some((input, _)) if array_is_constant(dfg, &input) => { @@ -127,9 +145,10 @@ pub(super) fn simplify_hash( let hash = hash_function(&input_bytes) .expect("Rust solvable black box function should not fail"); - let hash_values = vecmap(hash, |byte| FieldElement::from_be_bytes_reduce(&[byte])); + let hash_values = hash.iter().map(|byte| FieldElement::from_be_bytes_reduce(&[*byte])); - let result_array = make_constant_array(dfg, hash_values, Type::unsigned(8)); + let u8_type = Type::unsigned(8); + let result_array = make_constant_array(dfg, hash_values, u8_type, block, call_stack); SimplifyResult::SimplifiedTo(result_array) } _ => SimplifyResult::None, diff --git a/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/compiler/noirc_evaluator/src/ssa/ir/printer.rs index 21de85dbecb..a34e9c132ec 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -70,17 +70,6 @@ fn value(function: &Function, id: ValueId) -> String { } Value::Function(id) => id.to_string(), Value::Intrinsic(intrinsic) => intrinsic.to_string(), - Value::Array { array, typ } => { - let elements = vecmap(array, |element| value(function, *element)); - let element_types = &typ.clone().element_types(); - let element_types_str = - element_types.iter().map(|typ| typ.to_string()).collect::>().join(", "); - if element_types.len() == 1 { - format!("[{}] of {}", elements.join(", "), element_types_str) - } else { - format!("[{}] of ({})", elements.join(", "), element_types_str) - } - } Value::Param { .. } | Value::Instruction { .. } | Value::ForeignFunction(_) => { id.to_string() } @@ -231,6 +220,18 @@ fn display_instruction_inner( "if {then_condition} then {then_value} else if {else_condition} then {else_value}" ) } + Instruction::MakeArray { elements, typ } => { + write!(f, "make_array [")?; + + for (i, element) in elements.iter().enumerate() { + if i != 0 { + write!(f, ", ")?; + } + write!(f, "{}", show(*element))?; + } + + writeln!(f, "] : {typ}") + } } } @@ -254,13 +255,9 @@ pub(crate) fn try_to_extract_string_from_error_payload( ((error_selector == STRING_ERROR_SELECTOR) && (values.len() == 1)) .then_some(()) .and_then(|()| { - let Value::Array { array: values, .. } = &dfg[values[0]] else { - return None; - }; - let fields: Option> = - values.iter().map(|value_id| dfg.get_numeric_constant(*value_id)).collect(); - - fields + let (values, _) = &dfg.get_array_constant(values[0])?; + let values = values.iter().map(|value_id| dfg.get_numeric_constant(*value_id)); + values.collect::>>() }) .map(|fields| { fields diff --git a/compiler/noirc_evaluator/src/ssa/ir/value.rs b/compiler/noirc_evaluator/src/ssa/ir/value.rs index 795d45c75e9..ef494200308 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/value.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/value.rs @@ -36,9 +36,6 @@ pub(crate) enum Value { /// This Value originates from a numeric constant NumericConstant { constant: FieldElement, typ: Type }, - /// Represents a constant array value - Array { array: im::Vector, typ: Type }, - /// This Value refers to a function in the IR. /// Functions always have the type Type::Function. /// If the argument or return types are needed, users should retrieve @@ -63,7 +60,6 @@ impl Value { Value::Instruction { typ, .. } => typ, Value::Param { typ, .. } => typ, Value::NumericConstant { typ, .. } => typ, - Value::Array { typ, .. } => typ, Value::Function { .. } => &Type::Function, Value::Intrinsic { .. } => &Type::Function, Value::ForeignFunction { .. } => &Type::Function, diff --git a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs index 865a1e31eb3..96de22600a4 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/array_set.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/array_set.rs @@ -199,29 +199,31 @@ mod tests { let src = " brillig(inline) fn main f0 { b0(): - v1 = allocate -> &mut [Field; 5] - store [[Field 0, Field 0, Field 0, Field 0, Field 0] of Field, [Field 0, Field 0, Field 0, Field 0, Field 0] of Field] of [Field; 5] at v1 - v6 = allocate -> &mut [Field; 5] - store [[Field 0, Field 0, Field 0, Field 0, Field 0] of Field, [Field 0, Field 0, Field 0, Field 0, Field 0] of Field] of [Field; 5] at v6 + v2 = make_array [Field 0, Field 0, Field 0, Field 0, Field 0] : [Field; 5] + v3 = make_array [v2, v2] : [[Field; 5]; 2] + v4 = allocate -> &mut [Field; 5] + store v3 at v4 + v5 = allocate -> &mut [Field; 5] + store v3 at v5 jmp b1(u32 0) b1(v0: u32): - v12 = lt v0, u32 5 - jmpif v12 then: b3, else: b2 + v8 = lt v0, u32 5 + jmpif v8 then: b3, else: b2 b3(): - v13 = eq v0, u32 5 - jmpif v13 then: b4, else: b5 + v9 = eq v0, u32 5 + jmpif v9 then: b4, else: b5 b4(): - v14 = load v1 -> [[Field; 5]; 2] - store v14 at v6 + v10 = load v4 -> [[Field; 5]; 2] + store v10 at v5 jmp b5() b5(): - v15 = load v1 -> [[Field; 5]; 2] - v16 = array_get v15, index Field 0 -> [Field; 5] - v18 = array_set v16, index v0, value Field 20 - v19 = array_set v15, index v0, value v18 - store v19 at v1 - v21 = add v0, u32 1 - jmp b1(v21) + v11 = load v4 -> [[Field; 5]; 2] + v12 = array_get v11, index Field 0 -> [Field; 5] + v14 = array_set v12, index v0, value Field 20 + v15 = array_set v11, index v0, value v14 + store v15 at v4 + v17 = add v0, u32 1 + jmp b1(v17) b2(): return } diff --git a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index de8e5b25926..32f66e5a0f0 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -115,7 +115,7 @@ type InstructionResultCache = HashMap, Resu /// For more information see [`InstructionResultCache`]. #[derive(Default)] struct ResultCache { - results: Vec<(BasicBlockId, Vec)>, + result: Option<(BasicBlockId, Vec)>, } impl Context { @@ -150,7 +150,7 @@ impl Context { fn fold_constants_into_instruction( &mut self, dfg: &mut DataFlowGraph, - block: BasicBlockId, + mut block: BasicBlockId, id: InstructionId, side_effects_enabled_var: &mut ValueId, ) { @@ -159,11 +159,22 @@ impl Context { let old_results = dfg.instruction_results(id).to_vec(); // If a copy of this instruction exists earlier in the block, then reuse the previous results. - if let Some(cached_results) = + if let Some(cache_result) = self.get_cached(dfg, &instruction, *side_effects_enabled_var, block) { - Self::replace_result_ids(dfg, &old_results, cached_results); - return; + match cache_result { + CacheResult::Cached(cached) => { + Self::replace_result_ids(dfg, &old_results, cached); + return; + } + CacheResult::NeedToHoistToCommonBlock(dominator, _cached) => { + // Just change the block to insert in the common dominator instead. + // This will only move the current instance of the instruction right now. + // When constant folding is run a second time later on, it'll catch + // that the previous instance can be deduplicated to this instance. + block = dominator; + } + } } // Otherwise, try inserting the instruction again to apply any optimizations using the newly resolved inputs. @@ -317,13 +328,13 @@ impl Context { } } - fn get_cached<'a>( - &'a mut self, + fn get_cached( + &mut self, dfg: &DataFlowGraph, instruction: &Instruction, side_effects_enabled_var: ValueId, block: BasicBlockId, - ) -> Option<&'a [ValueId]> { + ) -> Option { let results_for_instruction = self.cached_instruction_results.get(instruction)?; let predicate = self.use_constraint_info && instruction.requires_acir_gen_predicate(dfg); @@ -336,7 +347,9 @@ impl Context { impl ResultCache { /// Records that an `Instruction` in block `block` produced the result values `results`. fn cache(&mut self, block: BasicBlockId, results: Vec) { - self.results.push((block, results)); + if self.result.is_none() { + self.result = Some((block, results)); + } } /// Returns a set of [`ValueId`]s produced from a copy of this [`Instruction`] which sits @@ -345,16 +358,24 @@ impl ResultCache { /// We require that the cached instruction's block dominates `block` in order to avoid /// cycles causing issues (e.g. two instructions being replaced with the results of each other /// such that neither instruction exists anymore.) - fn get(&self, block: BasicBlockId, dom: &mut DominatorTree) -> Option<&[ValueId]> { - for (origin_block, results) in &self.results { + fn get(&self, block: BasicBlockId, dom: &mut DominatorTree) -> Option { + self.result.as_ref().map(|(origin_block, results)| { if dom.dominates(*origin_block, block) { - return Some(results); + CacheResult::Cached(results) + } else { + // Insert a copy of this instruction in the common dominator + let dominator = dom.common_dominator(*origin_block, block); + CacheResult::NeedToHoistToCommonBlock(dominator, results) } - } - None + }) } } +enum CacheResult<'a> { + Cached(&'a [ValueId]), + NeedToHoistToCommonBlock(BasicBlockId, &'a [ValueId]), +} + #[cfg(test)] mod test { use std::sync::Arc; @@ -484,7 +505,8 @@ mod test { acir(inline) fn main f0 { b0(v0: Field): v2 = add v0, Field 1 - return [v2] of Field + v3 = make_array [v2] : [Field; 1] + return v3 } "; let ssa = Ssa::from_str(src).unwrap(); @@ -590,6 +612,16 @@ mod test { // Regression for #4600 #[test] fn array_get_regression() { + // fn main f0 { + // b0(v0: u1, v1: u64): + // enable_side_effects_if v0 + // v2 = make_array [Field 0, Field 1] + // v3 = array_get v2, index v1 + // v4 = not v0 + // enable_side_effects_if v4 + // v5 = array_get v2, index v1 + // } + // // We want to make sure after constant folding both array_gets remain since they are // under different enable_side_effects_if contexts and thus one may be disabled while // the other is not. If one is removed, it is possible e.g. v4 is replaced with v2 which @@ -598,10 +630,11 @@ mod test { acir(inline) fn main f0 { b0(v0: u1, v1: u64): enable_side_effects v0 - v5 = array_get [Field 0, Field 1] of Field, index v1 -> Field + v4 = make_array [Field 0, Field 1] : [Field; 2] + v5 = array_get v4, index v1 -> Field v6 = not v0 enable_side_effects v6 - v8 = array_get [Field 0, Field 1] of Field, index v1 -> Field + v7 = array_get v4, index v1 -> Field return } "; @@ -662,14 +695,14 @@ mod test { assert_normalized_ssa_equals(ssa, expected); } - // This test currently fails. It being fixed will address the issue https://github.com/noir-lang/noir/issues/5756 #[test] - #[should_panic] fn constant_array_deduplication() { // fn main f0 { // b0(v0: u64): - // v5 = call keccakf1600([v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0]) - // v6 = call keccakf1600([v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0]) + // v1 = make_array [v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] + // v2 = make_array [v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] + // v5 = call keccakf1600(v1) + // v6 = call keccakf1600(v2) // } // // Here we're checking a situation where two identical arrays are being initialized twice and being assigned separate `ValueId`s. @@ -682,19 +715,20 @@ mod test { let zero = builder.numeric_constant(0u128, Type::unsigned(64)); let typ = Type::Array(Arc::new(vec![Type::unsigned(64)]), 25); - let array_contents = vec![ + let array_contents = im::vector![ v0, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, zero, ]; - let array1 = builder.array_constant(array_contents.clone().into(), typ.clone()); - let array2 = builder.array_constant(array_contents.into(), typ.clone()); + let array1 = builder.insert_make_array(array_contents.clone(), typ.clone()); + let array2 = builder.insert_make_array(array_contents, typ.clone()); - assert_eq!(array1, array2, "arrays were assigned different value ids"); + assert_ne!(array1, array2, "arrays were not assigned different value ids"); let keccakf1600 = builder.import_intrinsic("keccakf1600").expect("keccakf1600 intrinsic should exist"); let _v10 = builder.insert_call(keccakf1600, vec![array1], vec![typ.clone()]); let _v11 = builder.insert_call(keccakf1600, vec![array2], vec![typ.clone()]); + builder.terminate_with_return(Vec::new()); let mut ssa = builder.finish(); ssa.normalize_ids(); @@ -704,8 +738,13 @@ mod test { let main = ssa.main(); let instructions = main.dfg[main.entry_block()].instructions(); let starting_instruction_count = instructions.len(); - assert_eq!(starting_instruction_count, 2); + assert_eq!(starting_instruction_count, 4); + // fn main f0 { + // b0(v0: u64): + // v1 = make_array [v0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] + // v5 = call keccakf1600(v1) + // } let ssa = ssa.fold_constants(); println!("{ssa}"); @@ -713,7 +752,7 @@ mod test { let main = ssa.main(); let instructions = main.dfg[main.entry_block()].instructions(); let ending_instruction_count = instructions.len(); - assert_eq!(ending_instruction_count, 1); + assert_eq!(ending_instruction_count, 2); } #[test] @@ -759,4 +798,60 @@ mod test { assert_eq!(main.dfg[main.entry_block()].instructions().len(), 1); assert_eq!(main.dfg[b1].instructions().len(), 0); } + + #[test] + fn deduplicate_across_non_dominated_blocks() { + let src = " + brillig(inline) fn main f0 { + b0(v0: u32): + v2 = lt u32 1000, v0 + jmpif v2 then: b1, else: b2 + b1(): + v4 = add v0, u32 1 + v5 = lt v0, v4 + constrain v5 == u1 1 + jmp b2() + b2(): + v7 = lt u32 1000, v0 + jmpif v7 then: b3, else: b4 + b3(): + v8 = add v0, u32 1 + v9 = lt v0, v8 + constrain v9 == u1 1 + jmp b4() + b4(): + return + } + "; + let ssa = Ssa::from_str(src).unwrap(); + + // v4 has been hoisted, although: + // - v5 has not yet been removed since it was encountered earlier in the program + // - v8 hasn't been recognized as a duplicate of v6 yet since they still reference v4 and + // v5 respectively + let expected = " + brillig(inline) fn main f0 { + b0(v0: u32): + v2 = lt u32 1000, v0 + v4 = add v0, u32 1 + jmpif v2 then: b1, else: b2 + b1(): + v5 = add v0, u32 1 + v6 = lt v0, v5 + constrain v6 == u1 1 + jmp b2() + b2(): + jmpif v2 then: b3, else: b4 + b3(): + v8 = lt v0, v4 + constrain v8 == u1 1 + jmp b4() + b4(): + return + } + "; + + let ssa = ssa.fold_constants_using_constraints(); + assert_normalized_ssa_equals(ssa, expected); + } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/die.rs b/compiler/noirc_evaluator/src/ssa/opt/die.rs index a7b7af91a18..d8f33f6494d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/die.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/die.rs @@ -192,29 +192,11 @@ impl Context { }); } - /// Inspects a value recursively (as it could be an array) and marks all comprised instruction - /// results as used. + /// Inspects a value and marks all instruction results as used. fn mark_used_instruction_results(&mut self, dfg: &DataFlowGraph, value_id: ValueId) { let value_id = dfg.resolve(value_id); - match &dfg[value_id] { - Value::Instruction { .. } => { - self.used_values.insert(value_id); - } - Value::Array { array, .. } => { - self.used_values.insert(value_id); - for elem in array { - self.mark_used_instruction_results(dfg, *elem); - } - } - Value::Param { .. } => { - self.used_values.insert(value_id); - } - Value::NumericConstant { .. } => { - self.used_values.insert(value_id); - } - _ => { - // Does not comprise of any instruction results - } + if matches!(&dfg[value_id], Value::Instruction { .. } | Value::Param { .. }) { + self.used_values.insert(value_id); } } @@ -740,10 +722,11 @@ mod test { fn keep_inc_rc_on_borrowed_array_store() { // acir(inline) fn main f0 { // b0(): + // v1 = make_array [u32 0, u32 0] // v2 = allocate - // inc_rc [u32 0, u32 0] - // store [u32 0, u32 0] at v2 - // inc_rc [u32 0, u32 0] + // inc_rc v1 + // store v1 at v2 + // inc_rc v1 // jmp b1() // b1(): // v3 = load v2 @@ -756,11 +739,11 @@ mod test { let mut builder = FunctionBuilder::new("main".into(), main_id); let zero = builder.numeric_constant(0u128, Type::unsigned(32)); let array_type = Type::Array(Arc::new(vec![Type::unsigned(32)]), 2); - let array = builder.array_constant(vector![zero, zero], array_type.clone()); + let v1 = builder.insert_make_array(vector![zero, zero], array_type.clone()); let v2 = builder.insert_allocate(array_type.clone()); - builder.increment_array_reference_count(array); - builder.insert_store(v2, array); - builder.increment_array_reference_count(array); + builder.increment_array_reference_count(v1); + builder.insert_store(v2, v1); + builder.increment_array_reference_count(v1); let b1 = builder.insert_block(); builder.terminate_with_jmp(b1, vec![]); @@ -775,14 +758,14 @@ mod test { let main = ssa.main(); // The instruction count never includes the terminator instruction - assert_eq!(main.dfg[main.entry_block()].instructions().len(), 4); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 5); assert_eq!(main.dfg[b1].instructions().len(), 2); // We expect the output to be unchanged let ssa = ssa.dead_instruction_elimination(); let main = ssa.main(); - assert_eq!(main.dfg[main.entry_block()].instructions().len(), 4); + assert_eq!(main.dfg[main.entry_block()].instructions().len(), 5); assert_eq!(main.dfg[b1].instructions().len(), 2); } diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index de15bfcb9b8..82f67a7c535 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -598,7 +598,7 @@ impl<'f> Context<'f> { &mut self, id: InstructionId, previous_allocate_result: &mut Option, - ) -> Vec { + ) { let (instruction, call_stack) = self.inserter.map_instruction(id); let instruction = self.handle_instruction_side_effects( instruction, @@ -609,9 +609,7 @@ impl<'f> Context<'f> { let instruction_is_allocate = matches!(&instruction, Instruction::Allocate); let entry = self.inserter.function.entry_block(); let results = self.inserter.push_instruction_value(instruction, id, entry, call_stack); - *previous_allocate_result = instruction_is_allocate.then(|| results.first()); - results.results().into_owned() } /// If we are currently in a branch, we need to modify constrain instructions @@ -727,8 +725,8 @@ impl<'f> Context<'f> { } Value::Intrinsic(Intrinsic::BlackBox(BlackBoxFunc::MultiScalarMul)) => { let points_array_idx = if matches!( - self.inserter.function.dfg[arguments[0]], - Value::Array { .. } + self.inserter.function.dfg.type_of_value(arguments[0]), + Type::Array { .. } ) { 0 } else { @@ -736,15 +734,15 @@ impl<'f> Context<'f> { // which means the array is the second argument 1 }; - let (array_with_predicate, array_typ) = self - .apply_predicate_to_msm_argument( - arguments[points_array_idx], - condition, - call_stack.clone(), - ); + let (elements, typ) = self.apply_predicate_to_msm_argument( + arguments[points_array_idx], + condition, + call_stack.clone(), + ); - arguments[points_array_idx] = - self.inserter.function.dfg.make_array(array_with_predicate, array_typ); + let instruction = Instruction::MakeArray { elements, typ }; + let array = self.insert_instruction(instruction, call_stack); + arguments[points_array_idx] = array; Instruction::Call { func, arguments } } _ => Instruction::Call { func, arguments }, @@ -767,7 +765,7 @@ impl<'f> Context<'f> { ) -> (im::Vector, Type) { let array_typ; let mut array_with_predicate = im::Vector::new(); - if let Value::Array { array, typ } = &self.inserter.function.dfg[argument] { + if let Some((array, typ)) = &self.inserter.function.dfg.get_array_constant(argument) { array_typ = typ.clone(); for (i, value) in array.clone().iter().enumerate() { if i % 3 == 2 { diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs index ef208588718..ddc8b0bfe6b 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs @@ -26,25 +26,23 @@ impl<'a> SliceCapacityTracker<'a> { ) { match instruction { Instruction::ArrayGet { array, .. } => { - let array_typ = self.dfg.type_of_value(*array); - let array_value = &self.dfg[*array]; - if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() - { - // Initial insertion into the slice sizes map - // Any other insertions should only occur if the value is already - // a part of the map. - self.compute_slice_capacity(*array, slice_sizes); + if let Some((_, array_type)) = self.dfg.get_array_constant(*array) { + if array_type.contains_slice_element() { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } } } Instruction::ArraySet { array, value, .. } => { - let array_typ = self.dfg.type_of_value(*array); - let array_value = &self.dfg[*array]; - if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() - { - // Initial insertion into the slice sizes map - // Any other insertions should only occur if the value is already - // a part of the map. - self.compute_slice_capacity(*array, slice_sizes); + if let Some((_, array_type)) = self.dfg.get_array_constant(*array) { + if array_type.contains_slice_element() { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } } let value_typ = self.dfg.type_of_value(*value); @@ -161,7 +159,7 @@ impl<'a> SliceCapacityTracker<'a> { array_id: ValueId, slice_sizes: &mut HashMap, ) { - if let Value::Array { array, typ } = &self.dfg[array_id] { + if let Some((array, typ)) = self.dfg.get_array_constant(array_id) { // Compiler sanity check assert!(!typ.is_nested_slice(), "ICE: Nested slices are not allowed and should not have reached the flattening pass of SSA"); if let Type::Slice(_) = typ { diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 799378b1678..3d45c43f587 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -209,7 +209,9 @@ impl<'a> ValueMerger<'a> { } } - self.dfg.make_array(merged, typ) + let instruction = Instruction::MakeArray { elements: merged, typ }; + let call_stack = self.call_stack.clone(); + self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack).first() } fn merge_slice_values( @@ -283,7 +285,9 @@ impl<'a> ValueMerger<'a> { } } - self.dfg.make_array(merged, typ) + let instruction = Instruction::MakeArray { elements: merged, typ }; + let call_stack = self.call_stack.clone(); + self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack).first() } /// Construct a dummy value to be attached to the smaller of two slices being merged. @@ -303,7 +307,11 @@ impl<'a> ValueMerger<'a> { array.push_back(self.make_slice_dummy_data(typ)); } } - self.dfg.make_array(array, typ.clone()) + let instruction = Instruction::MakeArray { elements: array, typ: typ.clone() }; + let call_stack = self.call_stack.clone(); + self.dfg + .insert_instruction_and_results(instruction, self.block, None, call_stack) + .first() } Type::Slice(_) => { // TODO(#3188): Need to update flattening to use true user facing length of slices diff --git a/compiler/noirc_evaluator/src/ssa/opt/inlining.rs b/compiler/noirc_evaluator/src/ssa/opt/inlining.rs index 2eb0f2eda0f..f91487fd73e 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/inlining.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/inlining.rs @@ -476,10 +476,6 @@ impl<'function> PerFunctionContext<'function> { Value::ForeignFunction(function) => { self.context.builder.import_foreign_function(function) } - Value::Array { array, typ } => { - let elements = array.iter().map(|value| self.translate_value(*value)).collect(); - self.context.builder.array_constant(elements, typ.clone()) - } }; self.values.insert(id, new_value); diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index 38d73e3dca8..77133d7d88d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -171,9 +171,7 @@ impl<'f> PerFunctionContext<'f> { let block_params = self.inserter.function.dfg.block_parameters(*block_id); per_func_block_params.extend(block_params.iter()); let terminator = self.inserter.function.dfg[*block_id].unwrap_terminator(); - terminator.for_each_value(|value| { - self.recursively_add_values(value, &mut all_terminator_values); - }); + terminator.for_each_value(|value| all_terminator_values.insert(value)); } // If we never load from an address within a function we can remove all stores to that address. @@ -268,15 +266,6 @@ impl<'f> PerFunctionContext<'f> { .collect() } - fn recursively_add_values(&self, value: ValueId, set: &mut HashSet) { - set.insert(value); - if let Some((elements, _)) = self.inserter.function.dfg.get_array_constant(value) { - for array_element in elements { - self.recursively_add_values(array_element, set); - } - } - } - /// The value of each reference at the start of the given block is the unification /// of the value of the same reference at the end of its predecessor blocks. fn find_starting_references(&mut self, block: BasicBlockId) -> Block { @@ -426,8 +415,6 @@ impl<'f> PerFunctionContext<'f> { let address = self.inserter.function.dfg.resolve(*address); let value = self.inserter.function.dfg.resolve(*value); - self.check_array_aliasing(references, value); - // FIXME: This causes errors in the sha256 tests // // If there was another store to this instruction without any (unremoved) loads or @@ -514,24 +501,22 @@ impl<'f> PerFunctionContext<'f> { } self.mark_all_unknown(arguments, references); } - _ => (), - } - } - - /// If `array` is an array constant that contains reference types, then insert each element - /// as a potential alias to the array itself. - fn check_array_aliasing(&self, references: &mut Block, array: ValueId) { - if let Some((elements, typ)) = self.inserter.function.dfg.get_array_constant(array) { - if Self::contains_references(&typ) { - // TODO: Check if type directly holds references or holds arrays that hold references - let expr = Expression::ArrayElement(Box::new(Expression::Other(array))); - references.expressions.insert(array, expr.clone()); - let aliases = references.aliases.entry(expr).or_default(); - - for element in elements { - aliases.insert(element); + Instruction::MakeArray { elements, typ } => { + // If `array` is an array constant that contains reference types, then insert each element + // as a potential alias to the array itself. + if Self::contains_references(typ) { + let array = self.inserter.function.dfg.instruction_results(instruction)[0]; + + let expr = Expression::ArrayElement(Box::new(Expression::Other(array))); + references.expressions.insert(array, expr.clone()); + let aliases = references.aliases.entry(expr).or_default(); + + for element in elements { + aliases.insert(*element); + } } } + _ => (), } } @@ -636,10 +621,11 @@ mod tests { // fn func() { // b0(): // v0 = allocate - // store [Field 1, Field 2] in v0 - // v1 = load v0 - // v2 = array_get v1, index 1 - // return v2 + // v1 = make_array [Field 1, Field 2] + // store v1 in v0 + // v2 = load v0 + // v3 = array_get v2, index 1 + // return v3 // } let func_id = Id::test_new(0); @@ -650,12 +636,12 @@ mod tests { let element_type = Arc::new(vec![Type::field()]); let array_type = Type::Array(element_type, 2); - let array = builder.array_constant(vector![one, two], array_type.clone()); + let v1 = builder.insert_make_array(vector![one, two], array_type.clone()); - builder.insert_store(v0, array); - let v1 = builder.insert_load(v0, array_type); - let v2 = builder.insert_array_get(v1, one, Type::field()); - builder.terminate_with_return(vec![v2]); + builder.insert_store(v0, v1); + let v2 = builder.insert_load(v0, array_type); + let v3 = builder.insert_array_get(v2, one, Type::field()); + builder.terminate_with_return(vec![v3]); let ssa = builder.finish().mem2reg().fold_constants(); diff --git a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs index 6914bf87c5d..a5b60fb5fcd 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs @@ -179,19 +179,6 @@ impl IdMaps { Value::NumericConstant { constant, typ } => { new_function.dfg.make_constant(*constant, typ.clone()) } - Value::Array { array, typ } => { - if let Some(value) = self.values.get(&old_value) { - return *value; - } - - let array = array - .iter() - .map(|value| self.map_value(new_function, old_function, *value)) - .collect(); - let new_value = new_function.dfg.make_array(array, typ.clone()); - self.values.insert(old_value, new_value); - new_value - } Value::Intrinsic(intrinsic) => new_function.dfg.import_intrinsic(*intrinsic), Value::ForeignFunction(name) => new_function.dfg.import_foreign_function(name), } diff --git a/compiler/noirc_evaluator/src/ssa/opt/rc.rs b/compiler/noirc_evaluator/src/ssa/opt/rc.rs index c3606ac4311..ffe4ada39b7 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/rc.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/rc.rs @@ -197,7 +197,8 @@ mod test { // inc_rc v0 // inc_rc v0 // dec_rc v0 - // return [v0] + // v1 = make_array [v0] + // return v1 // } let main_id = Id::test_new(0); let mut builder = FunctionBuilder::new("foo".into(), main_id); @@ -211,8 +212,8 @@ mod test { builder.insert_dec_rc(v0); let outer_array_type = Type::Array(Arc::new(vec![inner_array_type]), 1); - let array = builder.array_constant(vec![v0].into(), outer_array_type); - builder.terminate_with_return(vec![array]); + let v1 = builder.insert_make_array(vec![v0].into(), outer_array_type); + builder.terminate_with_return(vec![v1]); let ssa = builder.finish().remove_paired_rc(); let main = ssa.main(); diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs index 012f6e6b27d..0517f9ef89f 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_enable_side_effects.rs @@ -145,7 +145,8 @@ impl Context { | RangeCheck { .. } | IfElse { .. } | IncrementRc { .. } - | DecrementRc { .. } => false, + | DecrementRc { .. } + | MakeArray { .. } => false, EnableSideEffectsIf { .. } | ArrayGet { .. } diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 267dc6a3c20..3ff0e630a69 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -27,7 +27,7 @@ use crate::{ dfg::{CallStack, DataFlowGraph}, dom::DominatorTree, function::{Function, RuntimeType}, - function_inserter::FunctionInserter, + function_inserter::{ArrayCache, FunctionInserter}, instruction::{Instruction, TerminatorInstruction}, post_order::PostOrder, value::ValueId, @@ -213,11 +213,21 @@ fn unroll_loop( ) -> Result<(), CallStack> { let mut unroll_into = get_pre_header(cfg, loop_); let mut jump_value = get_induction_variable(function, unroll_into)?; + let mut array_cache = Some(ArrayCache::default()); - while let Some(context) = unroll_loop_header(function, loop_, unroll_into, jump_value)? { - let (last_block, last_value) = context.unroll_loop_iteration(); - unroll_into = last_block; - jump_value = last_value; + while let Some(mut context) = unroll_loop_header(function, loop_, unroll_into, jump_value)? { + // The inserter's array cache must be explicitly enabled. This is to + // confirm that we're inserting in insertion order. This is true here since: + // 1. We have a fresh inserter for each loop + // 2. Each loop is unrolled in iteration order + // + // Within a loop we do not insert in insertion order. This is fine however since the + // array cache is buffered with a separate fresh_array_cache which collects arrays + // but does not deduplicate. When we later call `into_array_cache`, that will merge + // the fresh cache in with the old one so that each iteration of the loop can cache + // from previous iterations but not the current iteration. + context.inserter.set_array_cache(array_cache, unroll_into); + (unroll_into, jump_value, array_cache) = context.unroll_loop_iteration(); } Ok(()) @@ -357,7 +367,7 @@ impl<'f> LoopIteration<'f> { /// It is expected the terminator instructions are set up to branch into an empty block /// for further unrolling. When the loop is finished this will need to be mutated to /// jump to the end of the loop instead. - fn unroll_loop_iteration(mut self) -> (BasicBlockId, ValueId) { + fn unroll_loop_iteration(mut self) -> (BasicBlockId, ValueId, Option) { let mut next_blocks = self.unroll_loop_block(); while let Some(block) = next_blocks.pop() { @@ -370,8 +380,11 @@ impl<'f> LoopIteration<'f> { } } - self.induction_value - .expect("Expected to find the induction variable by end of loop iteration") + let (end_block, induction_value) = self + .induction_value + .expect("Expected to find the induction variable by end of loop iteration"); + + (end_block, induction_value, self.inserter.into_array_cache()) } /// Unroll a single block in the current iteration of the loop diff --git a/compiler/noirc_evaluator/src/ssa/parser.rs b/compiler/noirc_evaluator/src/ssa/parser.rs index 11d43284786..2db2c636a8f 100644 --- a/compiler/noirc_evaluator/src/ssa/parser.rs +++ b/compiler/noirc_evaluator/src/ssa/parser.rs @@ -429,6 +429,15 @@ impl<'a> Parser<'a> { return Ok(ParsedInstruction::Load { target, value, typ }); } + if self.eat_keyword(Keyword::MakeArray)? { + self.eat_or_error(Token::LeftBracket)?; + let elements = self.parse_comma_separated_values()?; + self.eat_or_error(Token::RightBracket)?; + self.eat_or_error(Token::Colon)?; + let typ = self.parse_type()?; + return Ok(ParsedInstruction::MakeArray { target, elements, typ }); + } + if self.eat_keyword(Keyword::Not)? { let value = self.parse_value_or_error()?; return Ok(ParsedInstruction::Not { target, value }); @@ -557,10 +566,6 @@ impl<'a> Parser<'a> { return Ok(Some(value)); } - if let Some(value) = self.parse_array_value()? { - return Ok(Some(value)); - } - if let Some(identifier) = self.eat_identifier()? { return Ok(Some(ParsedValue::Variable(identifier))); } @@ -590,23 +595,6 @@ impl<'a> Parser<'a> { } } - fn parse_array_value(&mut self) -> ParseResult> { - if self.eat(Token::LeftBracket)? { - let values = self.parse_comma_separated_values()?; - self.eat_or_error(Token::RightBracket)?; - self.eat_or_error(Token::Keyword(Keyword::Of))?; - let types = self.parse_types()?; - let types_len = types.len(); - let values_len = values.len(); - Ok(Some(ParsedValue::Array { - typ: Type::Array(Arc::new(types), values_len / types_len), - values, - })) - } else { - Ok(None) - } - } - fn parse_types(&mut self) -> ParseResult> { if self.eat(Token::LeftParen)? { let types = self.parse_comma_separated_types()?; diff --git a/compiler/noirc_evaluator/src/ssa/parser/ast.rs b/compiler/noirc_evaluator/src/ssa/parser/ast.rs index f8fe8c68a98..a34b7fd70d3 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/ast.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/ast.rs @@ -104,6 +104,11 @@ pub(crate) enum ParsedInstruction { value: ParsedValue, typ: Type, }, + MakeArray { + target: Identifier, + elements: Vec, + typ: Type, + }, Not { target: Identifier, value: ParsedValue, @@ -131,9 +136,8 @@ pub(crate) enum ParsedTerminator { Return(Vec), } -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum ParsedValue { NumericConstant { constant: FieldElement, typ: Type }, - Array { values: Vec, typ: Type }, Variable(Identifier), } diff --git a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs index 2a94a4fd1eb..9ca4f52cb14 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs @@ -1,7 +1,5 @@ use std::collections::HashMap; -use im::Vector; - use crate::ssa::{ function_builder::FunctionBuilder, ir::{basic_block::BasicBlockId, function::FunctionId, value::ValueId}, @@ -213,6 +211,14 @@ impl Translator { let value = self.translate_value(value)?; self.builder.increment_array_reference_count(value); } + ParsedInstruction::MakeArray { target, elements, typ } => { + let elements = elements + .into_iter() + .map(|element| self.translate_value(element)) + .collect::>()?; + let value_id = self.builder.insert_make_array(elements, typ); + self.define_variable(target, value_id)?; + } ParsedInstruction::Load { target, value, typ } => { let value = self.translate_value(value)?; let value_id = self.builder.insert_load(value, typ); @@ -255,13 +261,6 @@ impl Translator { ParsedValue::NumericConstant { constant, typ } => { Ok(self.builder.numeric_constant(constant, typ)) } - ParsedValue::Array { values, typ } => { - let mut translated_values = Vector::new(); - for value in values { - translated_values.push_back(self.translate_value(value)?); - } - Ok(self.builder.array_constant(translated_values, typ)) - } ParsedValue::Variable(identifier) => self.lookup_variable(identifier), } } diff --git a/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/compiler/noirc_evaluator/src/ssa/parser/tests.rs index 9205353151e..f318e317473 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -54,33 +54,36 @@ fn test_return_integer() { } #[test] -fn test_return_array() { +fn test_make_array() { let src = " acir(inline) fn main f0 { b0(): - return [Field 1] of Field + v1 = make_array [Field 1] : [Field; 1] + return v1 } "; assert_ssa_roundtrip(src); } #[test] -fn test_return_empty_array() { +fn test_make_empty_array() { let src = " acir(inline) fn main f0 { b0(): - return [] of Field + v0 = make_array [] : [Field; 0] + return v0 } "; assert_ssa_roundtrip(src); } #[test] -fn test_return_composite_array() { +fn test_make_composite_array() { let src = " acir(inline) fn main f0 { b0(): - return [Field 1, Field 2] of (Field, Field) + v2 = make_array [Field 1, Field 2] : [(Field, Field); 1] + return v2 } "; assert_ssa_roundtrip(src); @@ -151,7 +154,9 @@ fn test_call_multiple_return_values() { } acir(inline) fn foo f1 { b0(): - return [Field 1, Field 2, Field 3] of Field, [Field 4] of Field + v3 = make_array [Field 1, Field 2, Field 3] : [Field; 3] + v5 = make_array [Field 4] : [Field; 1] + return v3, v5 } "; assert_ssa_roundtrip(src); diff --git a/compiler/noirc_evaluator/src/ssa/parser/token.rs b/compiler/noirc_evaluator/src/ssa/parser/token.rs index d648f58de41..f663879e899 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/token.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/token.rs @@ -136,6 +136,7 @@ pub(crate) enum Keyword { Jmpif, Load, Lt, + MakeArray, MaxBitSize, Mod, Mul, @@ -190,6 +191,7 @@ impl Keyword { "jmpif" => Keyword::Jmpif, "load" => Keyword::Load, "lt" => Keyword::Lt, + "make_array" => Keyword::MakeArray, "max_bit_size" => Keyword::MaxBitSize, "mod" => Keyword::Mod, "mul" => Keyword::Mul, @@ -248,6 +250,7 @@ impl Display for Keyword { Keyword::Jmpif => write!(f, "jmpif"), Keyword::Load => write!(f, "load"), Keyword::Lt => write!(f, "lt"), + Keyword::MakeArray => write!(f, "make_array"), Keyword::MaxBitSize => write!(f, "max_bit_size"), Keyword::Mod => write!(f, "mod"), Keyword::Mul => write!(f, "mul"), diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index 8bf3a740b3a..aac6e33bf5b 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -291,7 +291,7 @@ impl<'a> FunctionContext<'a> { }); } - self.builder.array_constant(array, typ).into() + self.builder.insert_make_array(array, typ).into() } fn codegen_block(&mut self, block: &[Expression]) -> Result { From 713df69aad56fc5aaefd5d140275a3217de4d866 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:19:01 +0000 Subject: [PATCH 018/109] fix: take blackbox function outputs into account when merging expressions (#6532) --- .../compiler/optimizers/merge_expressions.rs | 72 +++++++++++++++---- 1 file changed, 60 insertions(+), 12 deletions(-) diff --git a/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs b/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs index c3c80bec2ae..e2585f64acd 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs @@ -153,15 +153,18 @@ impl MergeExpressionsOptimizer { // Returns the input witnesses used by the opcode fn witness_inputs(&self, opcode: &Opcode) -> BTreeSet { - let mut witnesses = BTreeSet::new(); match opcode { Opcode::AssertZero(expr) => CircuitSimulator::expr_wit(expr), - Opcode::BlackBoxFuncCall(bb_func) => bb_func.get_input_witnesses(), + Opcode::BlackBoxFuncCall(bb_func) => { + let mut witnesses = bb_func.get_input_witnesses(); + witnesses.extend(bb_func.get_outputs_vec()); + + witnesses + } Opcode::Directive(Directive::ToLeRadix { a, .. }) => CircuitSimulator::expr_wit(a), Opcode::MemoryOp { block_id: _, op, predicate } => { //index et value, et predicate - let mut witnesses = BTreeSet::new(); - witnesses.extend(CircuitSimulator::expr_wit(&op.index)); + let mut witnesses = CircuitSimulator::expr_wit(&op.index); witnesses.extend(CircuitSimulator::expr_wit(&op.value)); if let Some(p) = predicate { witnesses.extend(CircuitSimulator::expr_wit(p)); @@ -173,6 +176,7 @@ impl MergeExpressionsOptimizer { init.iter().cloned().collect() } Opcode::BrilligCall { inputs, outputs, .. } => { + let mut witnesses = BTreeSet::new(); for i in inputs { witnesses.extend(self.brillig_input_wit(i)); } @@ -182,12 +186,9 @@ impl MergeExpressionsOptimizer { witnesses } Opcode::Call { id: _, inputs, outputs, predicate } => { - for i in inputs { - witnesses.insert(*i); - } - for i in outputs { - witnesses.insert(*i); - } + let mut witnesses: BTreeSet = BTreeSet::from_iter(inputs.iter().copied()); + witnesses.extend(outputs); + if let Some(p) = predicate { witnesses.extend(CircuitSimulator::expr_wit(p)); } @@ -235,7 +236,7 @@ mod tests { acir_field::AcirField, circuit::{ brillig::{BrilligFunctionId, BrilligOutputs}, - opcodes::FunctionInput, + opcodes::{BlackBoxFuncCall, FunctionInput}, Circuit, ExpressionWidth, Opcode, PublicInputs, }, native_types::{Expression, Witness}, @@ -243,7 +244,7 @@ mod tests { }; use std::collections::BTreeSet; - fn check_circuit(circuit: Circuit) { + fn check_circuit(circuit: Circuit) -> Circuit { assert!(CircuitSimulator::default().check_circuit(&circuit)); let mut merge_optimizer = MergeExpressionsOptimizer::new(); let acir_opcode_positions = vec![0; 20]; @@ -253,6 +254,7 @@ mod tests { optimized_circuit.opcodes = opcodes; // check that the circuit is still valid after optimization assert!(CircuitSimulator::default().check_circuit(&optimized_circuit)); + optimized_circuit } #[test] @@ -352,4 +354,50 @@ mod tests { }; check_circuit(circuit); } + + #[test] + fn takes_blackbox_opcode_outputs_into_account() { + // Regression test for https://github.com/noir-lang/noir/issues/6527 + // Previously we would not track the usage of witness 4 in the output of the blackbox function. + // We would then merge the final two opcodes losing the check that the brillig call must match + // with `_0 ^ _1`. + + let circuit: Circuit = Circuit { + current_witness_index: 7, + opcodes: vec![ + Opcode::BrilligCall { + id: BrilligFunctionId(0), + inputs: Vec::new(), + outputs: vec![BrilligOutputs::Simple(Witness(3))], + predicate: None, + }, + Opcode::BlackBoxFuncCall(BlackBoxFuncCall::AND { + lhs: FunctionInput::witness(Witness(0), 8), + rhs: FunctionInput::witness(Witness(1), 8), + output: Witness(4), + }), + Opcode::AssertZero(Expression { + linear_combinations: vec![ + (FieldElement::one(), Witness(3)), + (-FieldElement::one(), Witness(4)), + ], + ..Default::default() + }), + Opcode::AssertZero(Expression { + linear_combinations: vec![ + (-FieldElement::one(), Witness(2)), + (FieldElement::one(), Witness(4)), + ], + ..Default::default() + }), + ], + expression_width: ExpressionWidth::Bounded { width: 4 }, + private_parameters: BTreeSet::from([Witness(0), Witness(1)]), + return_values: PublicInputs(BTreeSet::from([Witness(2)])), + ..Default::default() + }; + + let new_circuit = check_circuit(circuit.clone()); + assert_eq!(circuit, new_circuit); + } } From 4cd0c1e65c5f20845164b653192b92654a300ef5 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:47:58 +0000 Subject: [PATCH 019/109] chore: restructure `noirc_evaluator` crate (#6534) --- .../acir_ir => acir}/acir_variable.rs | 39 +++++---- .../{ssa/acir_gen/acir_ir => acir}/big_int.rs | 0 .../brillig_gen => acir}/brillig_directive.rs | 0 .../acir_ir => acir}/generated_acir.rs | 16 ++-- .../src/{ssa/acir_gen => acir}/mod.rs | 87 +++++++++---------- .../src/brillig/brillig_gen.rs | 1 - compiler/noirc_evaluator/src/lib.rs | 6 +- compiler/noirc_evaluator/src/ssa.rs | 9 +- .../src/ssa/acir_gen/acir_ir.rs | 3 - .../src/ssa/{ir.rs => ir/mod.rs} | 0 .../src/ssa/{parser.rs => parser/mod.rs} | 0 11 files changed, 75 insertions(+), 86 deletions(-) rename compiler/noirc_evaluator/src/{ssa/acir_gen/acir_ir => acir}/acir_variable.rs (99%) rename compiler/noirc_evaluator/src/{ssa/acir_gen/acir_ir => acir}/big_int.rs (100%) rename compiler/noirc_evaluator/src/{brillig/brillig_gen => acir}/brillig_directive.rs (100%) rename compiler/noirc_evaluator/src/{ssa/acir_gen/acir_ir => acir}/generated_acir.rs (99%) rename compiler/noirc_evaluator/src/{ssa/acir_gen => acir}/mod.rs (98%) delete mode 100644 compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs rename compiler/noirc_evaluator/src/ssa/{ir.rs => ir/mod.rs} (100%) rename compiler/noirc_evaluator/src/ssa/{parser.rs => parser/mod.rs} (100%) diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/compiler/noirc_evaluator/src/acir/acir_variable.rs similarity index 99% rename from compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs rename to compiler/noirc_evaluator/src/acir/acir_variable.rs index 3f4420a416e..6aa8bc32975 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/acir/acir_variable.rs @@ -1,27 +1,18 @@ -use super::big_int::BigIntContext; -use super::generated_acir::{BrilligStdlibFunc, GeneratedAcir, PLACEHOLDER_BRILLIG_INDEX}; -use crate::brillig::brillig_gen::brillig_directive; -use crate::brillig::brillig_ir::artifact::GeneratedBrillig; -use crate::errors::{InternalBug, InternalError, RuntimeError, SsaReport}; -use crate::ssa::acir_gen::{AcirDynamicArray, AcirValue}; -use crate::ssa::ir::dfg::CallStack; -use crate::ssa::ir::types::Type as SsaType; -use crate::ssa::ir::{instruction::Endian, types::NumericType}; -use acvm::acir::circuit::brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}; -use acvm::acir::circuit::opcodes::{ - AcirFunctionId, BlockId, BlockType, ConstantOrWitnessEnum, MemOp, -}; -use acvm::acir::circuit::{AssertionPayload, ExpressionOrMemory, ExpressionWidth, Opcode}; -use acvm::brillig_vm::{MemoryValue, VMStatus, VM}; -use acvm::BlackBoxFunctionSolver; use acvm::{ - acir::AcirField, acir::{ brillig::Opcode as BrilligOpcode, - circuit::opcodes::FunctionInput, + circuit::{ + brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, + opcodes::{ + AcirFunctionId, BlockId, BlockType, ConstantOrWitnessEnum, FunctionInput, MemOp, + }, + AssertionPayload, ExpressionOrMemory, ExpressionWidth, Opcode, + }, native_types::{Expression, Witness}, - BlackBoxFunc, + AcirField, BlackBoxFunc, }, + brillig_vm::{MemoryValue, VMStatus, VM}, + BlackBoxFunctionSolver, }; use fxhash::FxHashMap as HashMap; use iter_extended::{try_vecmap, vecmap}; @@ -29,6 +20,16 @@ use num_bigint::BigUint; use std::cmp::Ordering; use std::{borrow::Cow, hash::Hash}; +use crate::brillig::brillig_ir::artifact::GeneratedBrillig; +use crate::errors::{InternalBug, InternalError, RuntimeError, SsaReport}; +use crate::ssa::ir::{ + dfg::CallStack, instruction::Endian, types::NumericType, types::Type as SsaType, +}; + +use super::big_int::BigIntContext; +use super::generated_acir::{BrilligStdlibFunc, GeneratedAcir, PLACEHOLDER_BRILLIG_INDEX}; +use super::{brillig_directive, AcirDynamicArray, AcirValue}; + #[derive(Clone, Debug, PartialEq, Eq, Hash)] /// High level Type descriptor for Variables. /// diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/big_int.rs b/compiler/noirc_evaluator/src/acir/big_int.rs similarity index 100% rename from compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/big_int.rs rename to compiler/noirc_evaluator/src/acir/big_int.rs diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs b/compiler/noirc_evaluator/src/acir/brillig_directive.rs similarity index 100% rename from compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs rename to compiler/noirc_evaluator/src/acir/brillig_directive.rs diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs b/compiler/noirc_evaluator/src/acir/generated_acir.rs similarity index 99% rename from compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs rename to compiler/noirc_evaluator/src/acir/generated_acir.rs index 9a12fbe2441..09653d5b73b 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs +++ b/compiler/noirc_evaluator/src/acir/generated_acir.rs @@ -1,12 +1,7 @@ //! `GeneratedAcir` is constructed as part of the `acir_gen` pass to accumulate all of the ACIR //! program as it is being converted from SSA form. -use std::{collections::BTreeMap, u32}; +use std::collections::BTreeMap; -use crate::{ - brillig::{brillig_gen::brillig_directive, brillig_ir::artifact::GeneratedBrillig}, - errors::{InternalError, RuntimeError, SsaReport}, - ssa::ir::dfg::CallStack, -}; use acvm::acir::{ circuit::{ brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, @@ -21,6 +16,13 @@ use acvm::{ acir::{circuit::directives::Directive, native_types::Expression}, }; +use super::brillig_directive; +use crate::{ + brillig::brillig_ir::artifact::GeneratedBrillig, + errors::{InternalError, RuntimeError, SsaReport}, + ssa::ir::dfg::CallStack, +}; + use iter_extended::vecmap; use noirc_errors::debug_info::ProcedureDebugId; use num_bigint::BigUint; @@ -153,7 +155,7 @@ impl GeneratedAcir { /// This means you cannot multiply an infinite amount of `Expression`s together. /// Once the `Expression` goes over degree-2, then it needs to be reduced to a `Witness` /// which has degree-1 in order to be able to continue the multiplication chain. - pub(crate) fn create_witness_for_expression(&mut self, expression: &Expression) -> Witness { + fn create_witness_for_expression(&mut self, expression: &Expression) -> Witness { let fresh_witness = self.next_witness_index(); // Create a constraint that sets them to be equal to each other diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/compiler/noirc_evaluator/src/acir/mod.rs similarity index 98% rename from compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs rename to compiler/noirc_evaluator/src/acir/mod.rs index 4aca59855a1..16b07b40863 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/compiler/noirc_evaluator/src/acir/mod.rs @@ -1,46 +1,57 @@ //! This file holds the pass to convert from Noir's SSA IR to ACIR. -mod acir_ir; +use fxhash::FxHashMap as HashMap; +use im::Vector; use std::collections::{BTreeMap, HashSet}; use std::fmt::Debug; -use self::acir_ir::acir_variable::{AcirContext, AcirType, AcirVar}; -use self::acir_ir::generated_acir::BrilligStdlibFunc; -use super::function_builder::data_bus::DataBus; -use super::ir::dfg::CallStack; -use super::ir::function::FunctionId; -use super::ir::instruction::{ConstrainError, ErrorType}; -use super::ir::printer::try_to_extract_string_from_error_payload; -use super::{ +use acvm::acir::{ + circuit::{ + brillig::{BrilligBytecode, BrilligFunctionId}, + opcodes::{AcirFunctionId, BlockType}, + AssertionPayload, ErrorSelector, ExpressionWidth, OpcodeLocation, + }, + native_types::Witness, + BlackBoxFunc, +}; +use acvm::{acir::circuit::opcodes::BlockId, acir::AcirField, FieldElement}; +use bn254_blackbox_solver::Bn254BlackBoxSolver; +use iter_extended::{try_vecmap, vecmap}; +use noirc_frontend::monomorphization::ast::InlineType; + +mod acir_variable; +mod big_int; +mod brillig_directive; +mod generated_acir; + +use crate::brillig::{ + brillig_gen::brillig_fn::FunctionContext as BrilligFunctionContext, + brillig_ir::{ + artifact::{BrilligParameter, GeneratedBrillig}, + BrilligContext, + }, + Brillig, +}; +use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; +use crate::ssa::{ + function_builder::data_bus::DataBus, ir::{ - dfg::DataFlowGraph, - function::{Function, RuntimeType}, + dfg::{CallStack, DataFlowGraph}, + function::{Function, FunctionId, RuntimeType}, instruction::{ - Binary, BinaryOp, Instruction, InstructionId, Intrinsic, TerminatorInstruction, + Binary, BinaryOp, ConstrainError, ErrorType, Instruction, InstructionId, Intrinsic, + TerminatorInstruction, }, map::Id, + printer::try_to_extract_string_from_error_payload, types::{NumericType, Type}, value::{Value, ValueId}, }, ssa_gen::Ssa, }; -use crate::brillig::brillig_ir::artifact::{BrilligParameter, GeneratedBrillig}; -use crate::brillig::brillig_ir::BrilligContext; -use crate::brillig::{brillig_gen::brillig_fn::FunctionContext as BrilligFunctionContext, Brillig}; -use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; -pub(crate) use acir_ir::generated_acir::GeneratedAcir; -use acvm::acir::circuit::opcodes::{AcirFunctionId, BlockType}; -use bn254_blackbox_solver::Bn254BlackBoxSolver; -use noirc_frontend::monomorphization::ast::InlineType; - -use acvm::acir::circuit::brillig::{BrilligBytecode, BrilligFunctionId}; -use acvm::acir::circuit::{AssertionPayload, ErrorSelector, ExpressionWidth, OpcodeLocation}; -use acvm::acir::native_types::Witness; -use acvm::acir::BlackBoxFunc; -use acvm::{acir::circuit::opcodes::BlockId, acir::AcirField, FieldElement}; -use fxhash::FxHashMap as HashMap; -use im::Vector; -use iter_extended::{try_vecmap, vecmap}; +use acir_variable::{AcirContext, AcirType, AcirVar}; +use generated_acir::BrilligStdlibFunc; +pub(crate) use generated_acir::GeneratedAcir; #[derive(Default)] struct SharedContext { @@ -2823,22 +2834,6 @@ impl<'a> Context<'a> { Ok(()) } - /// Given an array value, return the numerical type of its element. - /// Panics if the given value is not an array or has a non-numeric element type. - fn array_element_type(dfg: &DataFlowGraph, value: ValueId) -> AcirType { - match dfg.type_of_value(value) { - Type::Array(elements, _) => { - assert_eq!(elements.len(), 1); - (&elements[0]).into() - } - Type::Slice(elements) => { - assert_eq!(elements.len(), 1); - (&elements[0]).into() - } - _ => unreachable!("Expected array type"), - } - } - /// Convert a Vec into a Vec using the given result ids. /// If the type of a result id is an array, several acir vars are collected into /// a single AcirValue::Array of the same length. @@ -2929,9 +2924,9 @@ mod test { use std::collections::BTreeMap; use crate::{ + acir::BrilligStdlibFunc, brillig::Brillig, ssa::{ - acir_gen::acir_ir::generated_acir::BrilligStdlibFunc, function_builder::FunctionBuilder, ir::{function::FunctionId, instruction::BinaryOp, map::Id, types::Type}, }, diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen.rs index 313fd65a197..786a03031d6 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen.rs @@ -1,7 +1,6 @@ pub(crate) mod brillig_black_box; pub(crate) mod brillig_block; pub(crate) mod brillig_block_variables; -pub(crate) mod brillig_directive; pub(crate) mod brillig_fn; pub(crate) mod brillig_slice_ops; mod constant_allocation; diff --git a/compiler/noirc_evaluator/src/lib.rs b/compiler/noirc_evaluator/src/lib.rs index 453e8b59f58..3a5825699b2 100644 --- a/compiler/noirc_evaluator/src/lib.rs +++ b/compiler/noirc_evaluator/src/lib.rs @@ -5,11 +5,9 @@ pub mod errors; -// SSA code to create the SSA based IR -// for functions and execute different optimizations. -pub mod ssa; - +mod acir; pub mod brillig; +pub mod ssa; pub use ssa::create_program; diff --git a/compiler/noirc_evaluator/src/ssa.rs b/compiler/noirc_evaluator/src/ssa.rs index 977ced3c242..6d6cb11511f 100644 --- a/compiler/noirc_evaluator/src/ssa.rs +++ b/compiler/noirc_evaluator/src/ssa.rs @@ -28,19 +28,16 @@ use acvm::{ use noirc_errors::debug_info::{DebugFunctions, DebugInfo, DebugTypes, DebugVariables}; -use noirc_frontend::ast::Visibility; use noirc_frontend::{ + ast::Visibility, hir_def::{function::FunctionSignature, types::Type as HirType}, monomorphization::ast::Program, }; +use ssa_gen::Ssa; use tracing::{span, Level}; -use self::{ - acir_gen::{Artifacts, GeneratedAcir}, - ssa_gen::Ssa, -}; +use crate::acir::{Artifacts, GeneratedAcir}; -mod acir_gen; mod checks; pub(super) mod function_builder; pub mod ir; diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs deleted file mode 100644 index 090d5bb0a83..00000000000 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub(crate) mod acir_variable; -pub(crate) mod big_int; -pub(crate) mod generated_acir; diff --git a/compiler/noirc_evaluator/src/ssa/ir.rs b/compiler/noirc_evaluator/src/ssa/ir/mod.rs similarity index 100% rename from compiler/noirc_evaluator/src/ssa/ir.rs rename to compiler/noirc_evaluator/src/ssa/ir/mod.rs diff --git a/compiler/noirc_evaluator/src/ssa/parser.rs b/compiler/noirc_evaluator/src/ssa/parser/mod.rs similarity index 100% rename from compiler/noirc_evaluator/src/ssa/parser.rs rename to compiler/noirc_evaluator/src/ssa/parser/mod.rs From 245f50d22e9ad2853d4a40ccabfc115c94062ffa Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Mon, 18 Nov 2024 17:53:35 +0000 Subject: [PATCH 020/109] chore(test): Remove duplicate brillig tests (#6523) --- .../execution_success/assert/src/main.nr | 7 + .../brillig_array_to_slice/Nargo.toml | 7 - .../brillig_array_to_slice/Prover.toml | 1 - .../brillig_array_to_slice/src/main.nr | 20 --- .../brillig_assert/Nargo.toml | 6 - .../brillig_assert/Prover.toml | 1 - .../brillig_assert/src/main.nr | 14 -- .../brillig_blake3/Nargo.toml | 7 - .../brillig_blake3/Prover.toml | 37 ---- .../brillig_blake3/src/main.nr | 4 - .../brillig_ecdsa_secp256k1/Nargo.toml | 6 - .../brillig_ecdsa_secp256k1/Prover.toml | 169 ------------------ .../brillig_ecdsa_secp256k1/src/main.nr | 17 -- .../brillig_ecdsa_secp256r1/Nargo.toml | 6 - .../brillig_ecdsa_secp256r1/Prover.toml | 20 --- .../brillig_ecdsa_secp256r1/src/main.nr | 17 -- .../brillig_hash_to_field/Nargo.toml | 6 - .../brillig_hash_to_field/Prover.toml | 1 - .../brillig_hash_to_field/src/main.nr | 12 -- .../brillig_keccak/Nargo.toml | 6 - .../brillig_keccak/Prover.toml | 35 ---- .../brillig_keccak/src/main.nr | 26 --- .../execution_success/brillig_loop/Nargo.toml | 6 - .../brillig_loop/Prover.toml | 1 - .../brillig_loop/src/main.nr | 34 ---- .../brillig_references/Nargo.toml | 6 - .../brillig_references/Prover.toml | 1 - .../brillig_references/src/main.nr | 53 ------ .../brillig_sha256/Nargo.toml | 6 - .../brillig_sha256/Prover.toml | 35 ---- .../brillig_sha256/src/main.nr | 15 -- .../brillig_slices/Nargo.toml | 5 - .../brillig_slices/Prover.toml | 2 - .../brillig_slices/src/main.nr | 144 --------------- .../execution_success/loop/src/main.nr | 9 + .../execution_success/sha256/src/main.nr | 3 + 36 files changed, 19 insertions(+), 726 deletions(-) delete mode 100644 test_programs/execution_success/brillig_array_to_slice/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_array_to_slice/Prover.toml delete mode 100644 test_programs/execution_success/brillig_array_to_slice/src/main.nr delete mode 100644 test_programs/execution_success/brillig_assert/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_assert/Prover.toml delete mode 100644 test_programs/execution_success/brillig_assert/src/main.nr delete mode 100644 test_programs/execution_success/brillig_blake3/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_blake3/Prover.toml delete mode 100644 test_programs/execution_success/brillig_blake3/src/main.nr delete mode 100644 test_programs/execution_success/brillig_ecdsa_secp256k1/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_ecdsa_secp256k1/Prover.toml delete mode 100644 test_programs/execution_success/brillig_ecdsa_secp256k1/src/main.nr delete mode 100644 test_programs/execution_success/brillig_ecdsa_secp256r1/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_ecdsa_secp256r1/Prover.toml delete mode 100644 test_programs/execution_success/brillig_ecdsa_secp256r1/src/main.nr delete mode 100644 test_programs/execution_success/brillig_hash_to_field/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_hash_to_field/Prover.toml delete mode 100644 test_programs/execution_success/brillig_hash_to_field/src/main.nr delete mode 100644 test_programs/execution_success/brillig_keccak/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_keccak/Prover.toml delete mode 100644 test_programs/execution_success/brillig_keccak/src/main.nr delete mode 100644 test_programs/execution_success/brillig_loop/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_loop/Prover.toml delete mode 100644 test_programs/execution_success/brillig_loop/src/main.nr delete mode 100644 test_programs/execution_success/brillig_references/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_references/Prover.toml delete mode 100644 test_programs/execution_success/brillig_references/src/main.nr delete mode 100644 test_programs/execution_success/brillig_sha256/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_sha256/Prover.toml delete mode 100644 test_programs/execution_success/brillig_sha256/src/main.nr delete mode 100644 test_programs/execution_success/brillig_slices/Nargo.toml delete mode 100644 test_programs/execution_success/brillig_slices/Prover.toml delete mode 100644 test_programs/execution_success/brillig_slices/src/main.nr diff --git a/test_programs/execution_success/assert/src/main.nr b/test_programs/execution_success/assert/src/main.nr index 00e94414c0b..b95665d502b 100644 --- a/test_programs/execution_success/assert/src/main.nr +++ b/test_programs/execution_success/assert/src/main.nr @@ -1,3 +1,10 @@ fn main(x: Field) { assert(x == 1); + assert(1 == conditional(x as bool)); +} + +fn conditional(x: bool) -> Field { + assert(x, f"Expected x to be true but got {x}"); + assert_eq(x, true, f"Expected x to be true but got {x}"); + 1 } diff --git a/test_programs/execution_success/brillig_array_to_slice/Nargo.toml b/test_programs/execution_success/brillig_array_to_slice/Nargo.toml deleted file mode 100644 index 58157c38c26..00000000000 --- a/test_programs/execution_success/brillig_array_to_slice/Nargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "brillig_array_to_slice" -type = "bin" -authors = [""] -compiler_version = ">=0.25.0" - -[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/brillig_array_to_slice/Prover.toml b/test_programs/execution_success/brillig_array_to_slice/Prover.toml deleted file mode 100644 index 11497a473bc..00000000000 --- a/test_programs/execution_success/brillig_array_to_slice/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -x = "0" diff --git a/test_programs/execution_success/brillig_array_to_slice/src/main.nr b/test_programs/execution_success/brillig_array_to_slice/src/main.nr deleted file mode 100644 index f54adb39963..00000000000 --- a/test_programs/execution_success/brillig_array_to_slice/src/main.nr +++ /dev/null @@ -1,20 +0,0 @@ -unconstrained fn brillig_as_slice(x: Field) -> (u32, Field, Field) { - let mut dynamic: [Field; 1] = [1]; - dynamic[x] = 2; - assert(dynamic[0] == 2); - - let brillig_slice = dynamic.as_slice(); - assert(brillig_slice.len() == 1); - - (brillig_slice.len(), dynamic[0], brillig_slice[0]) -} - -fn main(x: Field) { - unsafe { - let (slice_len, dynamic_0, slice_0) = brillig_as_slice(x); - assert(slice_len == 1); - assert(dynamic_0 == 2); - assert(slice_0 == 2); - } -} - diff --git a/test_programs/execution_success/brillig_assert/Nargo.toml b/test_programs/execution_success/brillig_assert/Nargo.toml deleted file mode 100644 index b7d9231ab75..00000000000 --- a/test_programs/execution_success/brillig_assert/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_assert" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_assert/Prover.toml b/test_programs/execution_success/brillig_assert/Prover.toml deleted file mode 100644 index 4dd6b405159..00000000000 --- a/test_programs/execution_success/brillig_assert/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -x = "1" diff --git a/test_programs/execution_success/brillig_assert/src/main.nr b/test_programs/execution_success/brillig_assert/src/main.nr deleted file mode 100644 index dc0138d3f05..00000000000 --- a/test_programs/execution_success/brillig_assert/src/main.nr +++ /dev/null @@ -1,14 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is using assert on brillig -fn main(x: Field) { - unsafe { - assert(1 == conditional(x as bool)); - } -} - -unconstrained fn conditional(x: bool) -> Field { - assert(x, f"Expected x to be false but got {x}"); - assert_eq(x, true, f"Expected x to be false but got {x}"); - 1 -} diff --git a/test_programs/execution_success/brillig_blake3/Nargo.toml b/test_programs/execution_success/brillig_blake3/Nargo.toml deleted file mode 100644 index 879476dbdcf..00000000000 --- a/test_programs/execution_success/brillig_blake3/Nargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "brillig_blake3" -type = "bin" -authors = [""] -compiler_version = ">=0.22.0" - -[dependencies] diff --git a/test_programs/execution_success/brillig_blake3/Prover.toml b/test_programs/execution_success/brillig_blake3/Prover.toml deleted file mode 100644 index c807701479b..00000000000 --- a/test_programs/execution_success/brillig_blake3/Prover.toml +++ /dev/null @@ -1,37 +0,0 @@ -# hello as bytes -# https://connor4312.github.io/blake3/index.html -x = [104, 101, 108, 108, 111] -result = [ - 0xea, - 0x8f, - 0x16, - 0x3d, - 0xb3, - 0x86, - 0x82, - 0x92, - 0x5e, - 0x44, - 0x91, - 0xc5, - 0xe5, - 0x8d, - 0x4b, - 0xb3, - 0x50, - 0x6e, - 0xf8, - 0xc1, - 0x4e, - 0xb7, - 0x8a, - 0x86, - 0xe9, - 0x08, - 0xc5, - 0x62, - 0x4a, - 0x67, - 0x20, - 0x0f, -] diff --git a/test_programs/execution_success/brillig_blake3/src/main.nr b/test_programs/execution_success/brillig_blake3/src/main.nr deleted file mode 100644 index 64852d775f4..00000000000 --- a/test_programs/execution_success/brillig_blake3/src/main.nr +++ /dev/null @@ -1,4 +0,0 @@ -unconstrained fn main(x: [u8; 5], result: [u8; 32]) { - let digest = std::hash::blake3(x); - assert(digest == result); -} diff --git a/test_programs/execution_success/brillig_ecdsa_secp256k1/Nargo.toml b/test_programs/execution_success/brillig_ecdsa_secp256k1/Nargo.toml deleted file mode 100644 index 495a49f2247..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256k1/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_ecdsa_secp256k1" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_ecdsa_secp256k1/Prover.toml b/test_programs/execution_success/brillig_ecdsa_secp256k1/Prover.toml deleted file mode 100644 index e78fc19cb71..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256k1/Prover.toml +++ /dev/null @@ -1,169 +0,0 @@ - -hashed_message = [ - 0x3a, - 0x73, - 0xf4, - 0x12, - 0x3a, - 0x5c, - 0xd2, - 0x12, - 0x1f, - 0x21, - 0xcd, - 0x7e, - 0x8d, - 0x35, - 0x88, - 0x35, - 0x47, - 0x69, - 0x49, - 0xd0, - 0x35, - 0xd9, - 0xc2, - 0xda, - 0x68, - 0x06, - 0xb4, - 0x63, - 0x3a, - 0xc8, - 0xc1, - 0xe2, -] -pub_key_x = [ - 0xa0, - 0x43, - 0x4d, - 0x9e, - 0x47, - 0xf3, - 0xc8, - 0x62, - 0x35, - 0x47, - 0x7c, - 0x7b, - 0x1a, - 0xe6, - 0xae, - 0x5d, - 0x34, - 0x42, - 0xd4, - 0x9b, - 0x19, - 0x43, - 0xc2, - 0xb7, - 0x52, - 0xa6, - 0x8e, - 0x2a, - 0x47, - 0xe2, - 0x47, - 0xc7, -] -pub_key_y = [ - 0x89, - 0x3a, - 0xba, - 0x42, - 0x54, - 0x19, - 0xbc, - 0x27, - 0xa3, - 0xb6, - 0xc7, - 0xe6, - 0x93, - 0xa2, - 0x4c, - 0x69, - 0x6f, - 0x79, - 0x4c, - 0x2e, - 0xd8, - 0x77, - 0xa1, - 0x59, - 0x3c, - 0xbe, - 0xe5, - 0x3b, - 0x03, - 0x73, - 0x68, - 0xd7, -] -signature = [ - 0xe5, - 0x08, - 0x1c, - 0x80, - 0xab, - 0x42, - 0x7d, - 0xc3, - 0x70, - 0x34, - 0x6f, - 0x4a, - 0x0e, - 0x31, - 0xaa, - 0x2b, - 0xad, - 0x8d, - 0x97, - 0x98, - 0xc3, - 0x80, - 0x61, - 0xdb, - 0x9a, - 0xe5, - 0x5a, - 0x4e, - 0x8d, - 0xf4, - 0x54, - 0xfd, - 0x28, - 0x11, - 0x98, - 0x94, - 0x34, - 0x4e, - 0x71, - 0xb7, - 0x87, - 0x70, - 0xcc, - 0x93, - 0x1d, - 0x61, - 0xf4, - 0x80, - 0xec, - 0xbb, - 0x0b, - 0x89, - 0xd6, - 0xeb, - 0x69, - 0x69, - 0x01, - 0x61, - 0xe4, - 0x9a, - 0x71, - 0x5f, - 0xcd, - 0x55, -] diff --git a/test_programs/execution_success/brillig_ecdsa_secp256k1/src/main.nr b/test_programs/execution_success/brillig_ecdsa_secp256k1/src/main.nr deleted file mode 100644 index 6bde8ac4ac7..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256k1/src/main.nr +++ /dev/null @@ -1,17 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is ecdsa in brillig -fn main(hashed_message: [u8; 32], pub_key_x: [u8; 32], pub_key_y: [u8; 32], signature: [u8; 64]) { - unsafe { - assert(ecdsa(hashed_message, pub_key_x, pub_key_y, signature)); - } -} - -unconstrained fn ecdsa( - hashed_message: [u8; 32], - pub_key_x: [u8; 32], - pub_key_y: [u8; 32], - signature: [u8; 64], -) -> bool { - std::ecdsa_secp256k1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message) -} diff --git a/test_programs/execution_success/brillig_ecdsa_secp256r1/Nargo.toml b/test_programs/execution_success/brillig_ecdsa_secp256r1/Nargo.toml deleted file mode 100644 index 0a71e782104..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256r1/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_ecdsa_secp256r1" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_ecdsa_secp256r1/Prover.toml b/test_programs/execution_success/brillig_ecdsa_secp256r1/Prover.toml deleted file mode 100644 index a45f799877b..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256r1/Prover.toml +++ /dev/null @@ -1,20 +0,0 @@ -hashed_message = [ - 84, 112, 91, 163, 186, 175, 219, 223, 186, 140, 95, 154, 112, 247, 168, 155, 238, 152, - 217, 6, 181, 62, 49, 7, 77, 167, 186, 236, 220, 13, 169, 173, -] -pub_key_x = [ - 85, 15, 71, 16, 3, 243, 223, 151, 195, 223, 80, 106, 199, 151, 246, 114, 31, 177, 161, - 251, 123, 143, 111, 131, 210, 36, 73, 138, 101, 200, 142, 36, -] -pub_key_y = [ - 19, 96, 147, 215, 1, 46, 80, 154, 115, 113, 92, 189, 11, 0, 163, 204, 15, 244, 181, - 192, 27, 63, 250, 25, 106, 177, 251, 50, 112, 54, 184, 230, -] -signature = [ - 44, 112, 168, 208, 132, 182, 43, 252, 92, 224, 54, 65, 202, 249, 247, 42, - 212, 218, 140, 129, 191, 230, 236, 148, 135, 187, 94, 27, 239, 98, 161, 50, - 24, 173, 158, 226, 158, 175, 53, 31, 220, 80, 241, 82, 12, 66, 94, 155, - 144, 138, 7, 39, 139, 67, 176, 236, 123, 135, 39, 120, 193, 78, 7, 132 -] - - diff --git a/test_programs/execution_success/brillig_ecdsa_secp256r1/src/main.nr b/test_programs/execution_success/brillig_ecdsa_secp256r1/src/main.nr deleted file mode 100644 index 091905a3d01..00000000000 --- a/test_programs/execution_success/brillig_ecdsa_secp256r1/src/main.nr +++ /dev/null @@ -1,17 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is ecdsa in brillig -fn main(hashed_message: [u8; 32], pub_key_x: [u8; 32], pub_key_y: [u8; 32], signature: [u8; 64]) { - unsafe { - assert(ecdsa(hashed_message, pub_key_x, pub_key_y, signature)); - } -} - -unconstrained fn ecdsa( - hashed_message: [u8; 32], - pub_key_x: [u8; 32], - pub_key_y: [u8; 32], - signature: [u8; 64], -) -> bool { - std::ecdsa_secp256r1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message) -} diff --git a/test_programs/execution_success/brillig_hash_to_field/Nargo.toml b/test_programs/execution_success/brillig_hash_to_field/Nargo.toml deleted file mode 100644 index 7cfcc745f0d..00000000000 --- a/test_programs/execution_success/brillig_hash_to_field/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_hash_to_field" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_hash_to_field/Prover.toml b/test_programs/execution_success/brillig_hash_to_field/Prover.toml deleted file mode 100644 index ecdcfd1fb00..00000000000 --- a/test_programs/execution_success/brillig_hash_to_field/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -input = "27" \ No newline at end of file diff --git a/test_programs/execution_success/brillig_hash_to_field/src/main.nr b/test_programs/execution_success/brillig_hash_to_field/src/main.nr deleted file mode 100644 index 48c628020b6..00000000000 --- a/test_programs/execution_success/brillig_hash_to_field/src/main.nr +++ /dev/null @@ -1,12 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is hash_to_field in brillig -fn main(input: Field) -> pub Field { - unsafe { - hash_to_field(input) - } -} - -unconstrained fn hash_to_field(input: Field) -> Field { - std::hash::hash_to_field(&[input]) -} diff --git a/test_programs/execution_success/brillig_keccak/Nargo.toml b/test_programs/execution_success/brillig_keccak/Nargo.toml deleted file mode 100644 index 8cacf2186b8..00000000000 --- a/test_programs/execution_success/brillig_keccak/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_keccak" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_keccak/Prover.toml b/test_programs/execution_success/brillig_keccak/Prover.toml deleted file mode 100644 index d65c4011d3f..00000000000 --- a/test_programs/execution_success/brillig_keccak/Prover.toml +++ /dev/null @@ -1,35 +0,0 @@ -x = 0xbd -result = [ - 0x5a, - 0x50, - 0x2f, - 0x9f, - 0xca, - 0x46, - 0x7b, - 0x26, - 0x6d, - 0x5b, - 0x78, - 0x33, - 0x65, - 0x19, - 0x37, - 0xe8, - 0x05, - 0x27, - 0x0c, - 0xa3, - 0xf3, - 0xaf, - 0x1c, - 0x0d, - 0xd2, - 0x46, - 0x2d, - 0xca, - 0x4b, - 0x3b, - 0x1a, - 0xbf, -] diff --git a/test_programs/execution_success/brillig_keccak/src/main.nr b/test_programs/execution_success/brillig_keccak/src/main.nr deleted file mode 100644 index e5c8e5f493e..00000000000 --- a/test_programs/execution_success/brillig_keccak/src/main.nr +++ /dev/null @@ -1,26 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is keccak256 in brillig -fn main(x: Field, result: [u8; 32]) { - unsafe { - // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field - // The padding is taken care of by the program - let digest = keccak256([x as u8], 1); - assert(digest == result); - //#1399: variable message size - let message_size = 4; - let hash_a = keccak256([1, 2, 3, 4], message_size); - let hash_b = keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size); - - assert(hash_a == hash_b); - - let message_size_big = 8; - let hash_c = keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size_big); - - assert(hash_a != hash_c); - } -} - -unconstrained fn keccak256(data: [u8; N], msg_len: u32) -> [u8; 32] { - std::hash::keccak256(data, msg_len) -} diff --git a/test_programs/execution_success/brillig_loop/Nargo.toml b/test_programs/execution_success/brillig_loop/Nargo.toml deleted file mode 100644 index 1212397c4db..00000000000 --- a/test_programs/execution_success/brillig_loop/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_loop" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_loop/Prover.toml b/test_programs/execution_success/brillig_loop/Prover.toml deleted file mode 100644 index 22cd5b7c12f..00000000000 --- a/test_programs/execution_success/brillig_loop/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -sum = "6" diff --git a/test_programs/execution_success/brillig_loop/src/main.nr b/test_programs/execution_success/brillig_loop/src/main.nr deleted file mode 100644 index 2d073afb482..00000000000 --- a/test_programs/execution_success/brillig_loop/src/main.nr +++ /dev/null @@ -1,34 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is basic looping on brillig -fn main(sum: u32) { - unsafe { - assert(loop(4) == sum); - assert(loop_incl(3) == sum); - assert(plain_loop() == sum); - } -} - -unconstrained fn loop(x: u32) -> u32 { - let mut sum = 0; - for i in 0..x { - sum = sum + i; - } - sum -} - -unconstrained fn loop_incl(x: u32) -> u32 { - let mut sum = 0; - for i in 0..=x { - sum = sum + i; - } - sum -} - -unconstrained fn plain_loop() -> u32 { - let mut sum = 0; - for i in 0..4 { - sum = sum + i; - } - sum -} diff --git a/test_programs/execution_success/brillig_references/Nargo.toml b/test_programs/execution_success/brillig_references/Nargo.toml deleted file mode 100644 index 0f64b862ba0..00000000000 --- a/test_programs/execution_success/brillig_references/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_references" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_references/Prover.toml b/test_programs/execution_success/brillig_references/Prover.toml deleted file mode 100644 index 151faa5a9b1..00000000000 --- a/test_programs/execution_success/brillig_references/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -x = "2" \ No newline at end of file diff --git a/test_programs/execution_success/brillig_references/src/main.nr b/test_programs/execution_success/brillig_references/src/main.nr deleted file mode 100644 index 47f263cf557..00000000000 --- a/test_programs/execution_success/brillig_references/src/main.nr +++ /dev/null @@ -1,53 +0,0 @@ -unconstrained fn main(mut x: Field) { - add1(&mut x); - assert(x == 3); - // https://github.com/noir-lang/noir/issues/1899 - // let mut s = S { y: x }; - // s.add2(); - // assert(s.y == 5); - // Test that normal mutable variables are still copied - let mut a = 0; - mutate_copy(a); - assert(a == 0); - // Test something 3 allocations deep - let mut nested_allocations = Nested { y: &mut &mut 0 }; - add1(*nested_allocations.y); - assert(**nested_allocations.y == 1); - // Test nested struct allocations with a mutable reference to an array. - let mut c = C { foo: 0, bar: &mut C2 { array: &mut [1, 2] } }; - *c.bar.array = [3, 4]; - let arr: [Field; 2] = *c.bar.array; - assert(arr[0] == 3); - assert(arr[1] == 4); -} - -unconstrained fn add1(x: &mut Field) { - *x += 1; -} - -struct S { - y: Field, -} - -struct Nested { - y: &mut &mut Field, -} - -struct C { - foo: Field, - bar: &mut C2, -} - -struct C2 { - array: &mut [Field; 2], -} - -impl S { - unconstrained fn add2(&mut self) { - self.y += 2; - } -} - -unconstrained fn mutate_copy(mut a: Field) { - a = 7; -} diff --git a/test_programs/execution_success/brillig_sha256/Nargo.toml b/test_programs/execution_success/brillig_sha256/Nargo.toml deleted file mode 100644 index 7140fa0fd0b..00000000000 --- a/test_programs/execution_success/brillig_sha256/Nargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "brillig_sha256" -type = "bin" -authors = [""] - -[dependencies] diff --git a/test_programs/execution_success/brillig_sha256/Prover.toml b/test_programs/execution_success/brillig_sha256/Prover.toml deleted file mode 100644 index 374ae90ad78..00000000000 --- a/test_programs/execution_success/brillig_sha256/Prover.toml +++ /dev/null @@ -1,35 +0,0 @@ -x = 0xbd -result = [ - 0x68, - 0x32, - 0x57, - 0x20, - 0xaa, - 0xbd, - 0x7c, - 0x82, - 0xf3, - 0x0f, - 0x55, - 0x4b, - 0x31, - 0x3d, - 0x05, - 0x70, - 0xc9, - 0x5a, - 0xcc, - 0xbb, - 0x7d, - 0xc4, - 0xb5, - 0xaa, - 0xe1, - 0x12, - 0x04, - 0xc0, - 0x8f, - 0xfe, - 0x73, - 0x2b, -] diff --git a/test_programs/execution_success/brillig_sha256/src/main.nr b/test_programs/execution_success/brillig_sha256/src/main.nr deleted file mode 100644 index e574676965d..00000000000 --- a/test_programs/execution_success/brillig_sha256/src/main.nr +++ /dev/null @@ -1,15 +0,0 @@ -// Tests a very simple program. -// -// The features being tested is sha256 in brillig -fn main(x: Field, result: [u8; 32]) { - unsafe { - assert(result == sha256(x)); - } -} - -unconstrained fn sha256(x: Field) -> [u8; 32] { - // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field - // The padding is taken care of by the program - std::hash::sha256([x as u8]) -} - diff --git a/test_programs/execution_success/brillig_slices/Nargo.toml b/test_programs/execution_success/brillig_slices/Nargo.toml deleted file mode 100644 index 5f6caad088a..00000000000 --- a/test_programs/execution_success/brillig_slices/Nargo.toml +++ /dev/null @@ -1,5 +0,0 @@ -[package] -name = "brillig_slices" -type = "bin" -authors = [""] -[dependencies] diff --git a/test_programs/execution_success/brillig_slices/Prover.toml b/test_programs/execution_success/brillig_slices/Prover.toml deleted file mode 100644 index f28f2f8cc48..00000000000 --- a/test_programs/execution_success/brillig_slices/Prover.toml +++ /dev/null @@ -1,2 +0,0 @@ -x = "5" -y = "10" diff --git a/test_programs/execution_success/brillig_slices/src/main.nr b/test_programs/execution_success/brillig_slices/src/main.nr deleted file mode 100644 index f3928cbc026..00000000000 --- a/test_programs/execution_success/brillig_slices/src/main.nr +++ /dev/null @@ -1,144 +0,0 @@ -use std::slice; -unconstrained fn main(x: Field, y: Field) { - let mut slice: [Field] = &[y, x]; - assert(slice.len() == 2); - - slice = slice.push_back(7); - assert(slice.len() == 3); - assert(slice[0] == y); - assert(slice[1] == x); - assert(slice[2] == 7); - // Array set on slice target - slice[0] = x; - slice[1] = y; - slice[2] = 1; - - assert(slice[0] == x); - assert(slice[1] == y); - assert(slice[2] == 1); - - slice = push_front_to_slice(slice, 2); - assert(slice.len() == 4); - assert(slice[0] == 2); - assert(slice[1] == x); - assert(slice[2] == y); - assert(slice[3] == 1); - - let (item, popped_front_slice) = slice.pop_front(); - slice = popped_front_slice; - assert(item == 2); - - assert(slice.len() == 3); - assert(slice[0] == x); - assert(slice[1] == y); - assert(slice[2] == 1); - - let (popped_back_slice, another_item) = slice.pop_back(); - slice = popped_back_slice; - assert(another_item == 1); - - assert(slice.len() == 2); - assert(slice[0] == x); - assert(slice[1] == y); - - slice = slice.insert(1, 2); - assert(slice.len() == 3); - assert(slice[0] == x); - assert(slice[1] == 2); - assert(slice[2] == y); - - let (removed_slice, should_be_2) = slice.remove(1); - slice = removed_slice; - assert(should_be_2 == 2); - - assert(slice.len() == 2); - assert(slice[0] == x); - assert(slice[1] == y); - - let (slice_with_only_x, should_be_y) = slice.remove(1); - slice = slice_with_only_x; - assert(should_be_y == y); - - assert(slice.len() == 1); - assert(slice[0] == x); - - let (empty_slice, should_be_x) = slice.remove(0); - assert(should_be_x == x); - assert(empty_slice.len() == 0); - - regression_merge_slices(x, y); -} -// Tests slice passing to/from functions -unconstrained fn push_front_to_slice(slice: [T], item: T) -> [T] { - slice.push_front(item) -} -// The parameters to this function must come from witness values (inputs to main) -unconstrained fn regression_merge_slices(x: Field, y: Field) { - merge_slices_if(x, y); - merge_slices_else(x); -} - -unconstrained fn merge_slices_if(x: Field, y: Field) { - let slice = merge_slices_return(x, y); - assert(slice[2] == 10); - assert(slice.len() == 3); - - let slice = merge_slices_mutate(x, y); - assert(slice[3] == 5); - assert(slice.len() == 4); - - let slice = merge_slices_mutate_in_loop(x, y); - assert(slice[6] == 4); - assert(slice.len() == 7); -} - -unconstrained fn merge_slices_else(x: Field) { - let slice = merge_slices_return(x, 5); - assert(slice[0] == 0); - assert(slice[1] == 0); - assert(slice.len() == 2); - - let slice = merge_slices_mutate(x, 5); - assert(slice[2] == 5); - assert(slice.len() == 3); - - let slice = merge_slices_mutate_in_loop(x, 5); - assert(slice[2] == 5); - assert(slice.len() == 3); -} -// Test returning a merged slice without a mutation -unconstrained fn merge_slices_return(x: Field, y: Field) -> [Field] { - let slice = &[0; 2]; - if x != y { - if x != 20 { - slice.push_back(y) - } else { - slice - } - } else { - slice - } -} -// Test mutating a slice inside of an if statement -unconstrained fn merge_slices_mutate(x: Field, y: Field) -> [Field] { - let mut slice = &[0; 2]; - if x != y { - slice = slice.push_back(y); - slice = slice.push_back(x); - } else { - slice = slice.push_back(x); - } - slice -} -// Test mutating a slice inside of a loop in an if statement -unconstrained fn merge_slices_mutate_in_loop(x: Field, y: Field) -> [Field] { - let mut slice = &[0; 2]; - if x != y { - for i in 0..5 { - slice = slice.push_back(i as Field); - } - } else { - slice = slice.push_back(x); - } - slice -} diff --git a/test_programs/execution_success/loop/src/main.nr b/test_programs/execution_success/loop/src/main.nr index 4482fdb3443..b3be4c4c3ff 100644 --- a/test_programs/execution_success/loop/src/main.nr +++ b/test_programs/execution_success/loop/src/main.nr @@ -4,6 +4,7 @@ fn main(six_as_u32: u32) { assert_eq(loop(4), six_as_u32); assert_eq(loop_incl(3), six_as_u32); + assert(plain_loop() == six_as_u32); } fn loop(x: u32) -> u32 { @@ -21,3 +22,11 @@ fn loop_incl(x: u32) -> u32 { } sum } + +fn plain_loop() -> u32 { + let mut sum = 0; + for i in 0..4 { + sum = sum + i; + } + sum +} diff --git a/test_programs/execution_success/sha256/src/main.nr b/test_programs/execution_success/sha256/src/main.nr index d26d916ccff..8e5e46b9837 100644 --- a/test_programs/execution_success/sha256/src/main.nr +++ b/test_programs/execution_success/sha256/src/main.nr @@ -17,6 +17,9 @@ fn main(x: Field, result: [u8; 32], input: [u8; 2], toggle: bool) { // docs:end:sha256_var assert(digest == result); + let digest = std::hash::sha256([x as u8]); + assert(digest == result); + // variable size let size: Field = 1 + toggle as Field; let var_sha = std::hash::sha256_var(input, size as u64); From f291e3702589a5cd043acfded5e187f56ec765cc Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Tue, 19 Nov 2024 10:02:41 +0000 Subject: [PATCH 021/109] feat: simplify constant MSM calls in SSA (#6547) --- .../src/embedded_curve_ops.rs | 1 - .../src/ssa/ir/instruction/call.rs | 4 +- .../src/ssa/ir/instruction/call/blackbox.rs | 58 +++++++++++++++++++ .../Nargo.toml | 6 ++ .../src/main.nr | 12 ++++ 5 files changed, 79 insertions(+), 2 deletions(-) create mode 100644 test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml create mode 100644 test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr diff --git a/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs b/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs index 35cc68051d7..a02711fda1e 100644 --- a/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs +++ b/acvm-repo/bn254_blackbox_solver/src/embedded_curve_ops.rs @@ -16,7 +16,6 @@ pub fn multi_scalar_mul( scalars_hi: &[FieldElement], ) -> Result<(FieldElement, FieldElement, FieldElement), BlackBoxResolutionError> { if points.len() != 3 * scalars_lo.len() || scalars_lo.len() != scalars_hi.len() { - dbg!(&points.len(), &scalars_lo.len(), &scalars_hi.len()); return Err(BlackBoxResolutionError::Failed( BlackBoxFunc::MultiScalarMul, "Points and scalars must have the same length".to_string(), diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 5f0626b7ee8..e1e967b9a43 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -572,7 +572,9 @@ fn simplify_black_box_func( acvm::blackbox_solver::ecdsa_secp256r1_verify, ), - BlackBoxFunc::MultiScalarMul => SimplifyResult::None, + BlackBoxFunc::MultiScalarMul => { + blackbox::simplify_msm(dfg, solver, arguments, block, call_stack) + } BlackBoxFunc::EmbeddedCurveAdd => { blackbox::simplify_ec_add(dfg, solver, arguments, block, call_stack) } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs index 75405a87181..4f2a31e2fb0 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call/blackbox.rs @@ -63,6 +63,64 @@ pub(super) fn simplify_ec_add( } } +pub(super) fn simplify_msm( + dfg: &mut DataFlowGraph, + solver: impl BlackBoxFunctionSolver, + arguments: &[ValueId], + block: BasicBlockId, + call_stack: &CallStack, +) -> SimplifyResult { + // TODO: Handle MSMs where a subset of the terms are constant. + match (dfg.get_array_constant(arguments[0]), dfg.get_array_constant(arguments[1])) { + (Some((points, _)), Some((scalars, _))) => { + let Some(points) = points + .into_iter() + .map(|id| dfg.get_numeric_constant(id)) + .collect::>>() + else { + return SimplifyResult::None; + }; + + let Some(scalars) = scalars + .into_iter() + .map(|id| dfg.get_numeric_constant(id)) + .collect::>>() + else { + return SimplifyResult::None; + }; + + let mut scalars_lo = Vec::new(); + let mut scalars_hi = Vec::new(); + for (i, scalar) in scalars.into_iter().enumerate() { + if i % 2 == 0 { + scalars_lo.push(scalar); + } else { + scalars_hi.push(scalar); + } + } + + let Ok((result_x, result_y, result_is_infinity)) = + solver.multi_scalar_mul(&points, &scalars_lo, &scalars_hi) + else { + return SimplifyResult::None; + }; + + let result_x = dfg.make_constant(result_x, Type::field()); + let result_y = dfg.make_constant(result_y, Type::field()); + let result_is_infinity = dfg.make_constant(result_is_infinity, Type::bool()); + + let elements = im::vector![result_x, result_y, result_is_infinity]; + let typ = Type::Array(Arc::new(vec![Type::field()]), 3); + let instruction = Instruction::MakeArray { elements, typ }; + let result_array = + dfg.insert_instruction_and_results(instruction, block, None, call_stack.clone()); + + SimplifyResult::SimplifiedTo(result_array.first()) + } + _ => SimplifyResult::None, + } +} + pub(super) fn simplify_poseidon2_permutation( dfg: &mut DataFlowGraph, solver: impl BlackBoxFunctionSolver, diff --git a/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml b/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml new file mode 100644 index 00000000000..9c9bd8de04a --- /dev/null +++ b/test_programs/compile_success_empty/embedded_curve_msm_simplification/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "embedded_curve_msm_simplification" +type = "bin" +authors = [""] + +[dependencies] diff --git a/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr b/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr new file mode 100644 index 00000000000..e5aaa0f4d15 --- /dev/null +++ b/test_programs/compile_success_empty/embedded_curve_msm_simplification/src/main.nr @@ -0,0 +1,12 @@ +fn main() { + let pub_x = 0x0000000000000000000000000000000000000000000000000000000000000001; + let pub_y = 0x0000000000000002cf135e7506a45d632d270d45f1181294833fc48d823f272c; + + let g1_y = 17631683881184975370165255887551781615748388533673675138860; + let g1 = std::embedded_curve_ops::EmbeddedCurvePoint { x: 1, y: g1_y, is_infinite: false }; + let scalar = std::embedded_curve_ops::EmbeddedCurveScalar { lo: 1, hi: 0 }; + // Test that multi_scalar_mul correctly derives the public key + let res = std::embedded_curve_ops::multi_scalar_mul([g1], [scalar]); + assert(res.x == pub_x); + assert(res.y == pub_y); +} From e1510ec248b2bc35a02ec14aac5af4a20ee3caf7 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Tue, 19 Nov 2024 13:23:42 +0000 Subject: [PATCH 022/109] chore: revert #6375 (#6552) --- compiler/noirc_evaluator/src/ssa/function_builder/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index 6f76585336a..efa49b480c1 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -458,7 +458,13 @@ impl FunctionBuilder { match self.type_of_value(value) { Type::Numeric(_) => (), Type::Function => (), - Type::Reference(_) => (), + Type::Reference(element) => { + if element.contains_an_array() { + let reference = value; + let value = self.insert_load(reference, element.as_ref().clone()); + self.update_array_reference_count(value, increment); + } + } Type::Array(..) | Type::Slice(..) => { // If there are nested arrays or slices, we wait until ArrayGet // is issued to increment the count of that array. From 5f730e82f959cff9a06afea8a7bf89403850d687 Mon Sep 17 00:00:00 2001 From: Tom French <15848336+TomAFrench@users.noreply.github.com> Date: Tue, 19 Nov 2024 15:04:06 +0000 Subject: [PATCH 023/109] chore: remove some `_else_condition` tech debt (#6522) --- .../noirc_evaluator/src/ssa/ir/instruction.rs | 30 ++----- .../src/ssa/ir/instruction/call.rs | 8 +- .../noirc_evaluator/src/ssa/ir/printer.rs | 8 +- .../src/ssa/opt/flatten_cfg.rs | 79 +++++++++---------- .../src/ssa/opt/flatten_cfg/value_merger.rs | 32 ++------ .../src/ssa/opt/remove_if_else.rs | 10 +-- 6 files changed, 57 insertions(+), 110 deletions(-) diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index f56dfaa7c65..081c1f3afdf 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -273,15 +273,7 @@ pub(crate) enum Instruction { /// else_value /// } /// ``` - /// - /// Where we save the result of !then_condition so that we have the same - /// ValueId for it each time. - IfElse { - then_condition: ValueId, - then_value: ValueId, - else_condition: ValueId, - else_value: ValueId, - }, + IfElse { then_condition: ValueId, then_value: ValueId, else_value: ValueId }, /// Creates a new array or slice. /// @@ -536,14 +528,11 @@ impl Instruction { assert_message: assert_message.clone(), } } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { - Instruction::IfElse { - then_condition: f(*then_condition), - then_value: f(*then_value), - else_condition: f(*else_condition), - else_value: f(*else_value), - } - } + Instruction::IfElse { then_condition, then_value, else_value } => Instruction::IfElse { + then_condition: f(*then_condition), + then_value: f(*then_value), + else_value: f(*else_value), + }, Instruction::MakeArray { elements, typ } => Instruction::MakeArray { elements: elements.iter().copied().map(f).collect(), typ: typ.clone(), @@ -602,10 +591,9 @@ impl Instruction { | Instruction::RangeCheck { value, .. } => { f(*value); } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { f(*then_condition); f(*then_value); - f(*else_condition); f(*else_value); } Instruction::MakeArray { elements, typ: _ } => { @@ -768,7 +756,7 @@ impl Instruction { None } } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { let typ = dfg.type_of_value(*then_value); if let Some(constant) = dfg.get_numeric_constant(*then_condition) { @@ -787,13 +775,11 @@ impl Instruction { if matches!(&typ, Type::Numeric(_)) { let then_condition = *then_condition; - let else_condition = *else_condition; let result = ValueMerger::merge_numeric_values( dfg, block, then_condition, - else_condition, then_value, else_value, ); diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index e1e967b9a43..95447f941b0 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -443,12 +443,8 @@ fn simplify_slice_push_back( let mut value_merger = ValueMerger::new(dfg, block, &mut slice_sizes, unknown, None, call_stack); - let new_slice = value_merger.merge_values( - len_not_equals_capacity, - len_equals_capacity, - set_last_slice_value, - new_slice, - ); + let new_slice = + value_merger.merge_values(len_not_equals_capacity, set_last_slice_value, new_slice); SimplifyResult::SimplifiedToMultiple(vec![new_slice_length, new_slice]) } diff --git a/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/compiler/noirc_evaluator/src/ssa/ir/printer.rs index a34e9c132ec..1690d34b847 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -210,15 +210,11 @@ fn display_instruction_inner( Instruction::RangeCheck { value, max_bit_size, .. } => { writeln!(f, "range_check {} to {} bits", show(*value), *max_bit_size,) } - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { let then_condition = show(*then_condition); let then_value = show(*then_value); - let else_condition = show(*else_condition); let else_value = show(*else_value); - writeln!( - f, - "if {then_condition} then {then_value} else if {else_condition} then {else_value}" - ) + writeln!(f, "if {then_condition} then {then_value} else {else_value}") } Instruction::MakeArray { elements, typ } => { write!(f, "make_array [")?; diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index 82f67a7c535..61a93aee58d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -519,7 +519,6 @@ impl<'f> Context<'f> { let instruction = Instruction::IfElse { then_condition: cond_context.then_branch.condition, then_value: then_arg, - else_condition: cond_context.else_branch.as_ref().unwrap().condition, else_value: else_arg, }; let call_stack = cond_context.call_stack.clone(); @@ -667,13 +666,10 @@ impl<'f> Context<'f> { ) .first(); - let not = Instruction::Not(condition); - let else_condition = self.insert_instruction(not, call_stack.clone()); - let instruction = Instruction::IfElse { then_condition: condition, then_value: value, - else_condition, + else_value: previous_value, }; @@ -907,13 +903,12 @@ mod test { b0(v0: u1, v1: &mut Field): enable_side_effects v0 v2 = load v1 -> Field - v3 = not v0 - v4 = cast v0 as Field - v6 = sub Field 5, v2 - v7 = mul v4, v6 - v8 = add v2, v7 - store v8 at v1 - v9 = not v0 + v3 = cast v0 as Field + v5 = sub Field 5, v2 + v6 = mul v3, v5 + v7 = add v2, v6 + store v7 at v1 + v8 = not v0 enable_side_effects u1 1 return } @@ -945,20 +940,19 @@ mod test { b0(v0: u1, v1: &mut Field): enable_side_effects v0 v2 = load v1 -> Field - v3 = not v0 - v4 = cast v0 as Field - v6 = sub Field 5, v2 - v7 = mul v4, v6 - v8 = add v2, v7 - store v8 at v1 - v9 = not v0 - enable_side_effects v9 - v10 = load v1 -> Field - v11 = cast v9 as Field - v13 = sub Field 6, v10 - v14 = mul v11, v13 - v15 = add v10, v14 - store v15 at v1 + v3 = cast v0 as Field + v5 = sub Field 5, v2 + v6 = mul v3, v5 + v7 = add v2, v6 + store v7 at v1 + v8 = not v0 + enable_side_effects v8 + v9 = load v1 -> Field + v10 = cast v8 as Field + v12 = sub Field 6, v9 + v13 = mul v10, v12 + v14 = add v9, v13 + store v14 at v1 enable_side_effects u1 1 return } @@ -1306,24 +1300,23 @@ mod test { v9 = add v7, Field 1 v10 = cast v9 as u8 v11 = load v6 -> u8 - v12 = not v5 - v13 = cast v4 as Field - v14 = cast v11 as Field - v15 = sub v9, v14 - v16 = mul v13, v15 - v17 = add v14, v16 - v18 = cast v17 as u8 - store v18 at v6 - v19 = not v5 - enable_side_effects v19 - v20 = load v6 -> u8 + v12 = cast v4 as Field + v13 = cast v11 as Field + v14 = sub v9, v13 + v15 = mul v12, v14 + v16 = add v13, v15 + v17 = cast v16 as u8 + store v17 at v6 + v18 = not v5 + enable_side_effects v18 + v19 = load v6 -> u8 + v20 = cast v18 as Field v21 = cast v19 as Field - v22 = cast v20 as Field - v24 = sub Field 0, v22 - v25 = mul v21, v24 - v26 = add v22, v25 - v27 = cast v26 as u8 - store v27 at v6 + v23 = sub Field 0, v21 + v24 = mul v20, v23 + v25 = add v21, v24 + v26 = cast v25 as u8 + store v26 at v6 enable_side_effects u1 1 constrain v5 == u1 1 return diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 3d45c43f587..8ea26d4e96d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -45,7 +45,7 @@ impl<'a> ValueMerger<'a> { /// Merge two values a and b from separate basic blocks to a single value. /// If these two values are numeric, the result will be - /// `then_condition * then_value + else_condition * else_value`. + /// `then_condition * (then_value - else_value) + else_value`. /// Otherwise, if the values being merged are arrays, a new array will be made /// recursively from combining each element of both input arrays. /// @@ -54,7 +54,6 @@ impl<'a> ValueMerger<'a> { pub(crate) fn merge_values( &mut self, then_condition: ValueId, - else_condition: ValueId, then_value: ValueId, else_value: ValueId, ) -> ValueId { @@ -70,15 +69,14 @@ impl<'a> ValueMerger<'a> { self.dfg, self.block, then_condition, - else_condition, then_value, else_value, ), typ @ Type::Array(_, _) => { - self.merge_array_values(typ, then_condition, else_condition, then_value, else_value) + self.merge_array_values(typ, then_condition, then_value, else_value) } typ @ Type::Slice(_) => { - self.merge_slice_values(typ, then_condition, else_condition, then_value, else_value) + self.merge_slice_values(typ, then_condition, then_value, else_value) } Type::Reference(_) => panic!("Cannot return references from an if expression"), Type::Function => panic!("Cannot return functions from an if expression"), @@ -86,12 +84,11 @@ impl<'a> ValueMerger<'a> { } /// Merge two numeric values a and b from separate basic blocks to a single value. This - /// function would return the result of `if c { a } else { b }` as `c*a + (!c)*b`. + /// function would return the result of `if c { a } else { b }` as `c * (a-b) + b`. pub(crate) fn merge_numeric_values( dfg: &mut DataFlowGraph, block: BasicBlockId, then_condition: ValueId, - _else_condition: ValueId, then_value: ValueId, else_value: ValueId, ) -> ValueId { @@ -155,7 +152,6 @@ impl<'a> ValueMerger<'a> { &mut self, typ: Type, then_condition: ValueId, - else_condition: ValueId, then_value: ValueId, else_value: ValueId, ) -> ValueId { @@ -170,7 +166,6 @@ impl<'a> ValueMerger<'a> { if let Some(result) = self.try_merge_only_changed_indices( then_condition, - else_condition, then_value, else_value, actual_length, @@ -200,12 +195,7 @@ impl<'a> ValueMerger<'a> { let then_element = get_element(then_value, typevars.clone()); let else_element = get_element(else_value, typevars); - merged.push_back(self.merge_values( - then_condition, - else_condition, - then_element, - else_element, - )); + merged.push_back(self.merge_values(then_condition, then_element, else_element)); } } @@ -218,7 +208,6 @@ impl<'a> ValueMerger<'a> { &mut self, typ: Type, then_condition: ValueId, - else_condition: ValueId, then_value_id: ValueId, else_value_id: ValueId, ) -> ValueId { @@ -276,12 +265,7 @@ impl<'a> ValueMerger<'a> { let else_element = get_element(else_value_id, typevars, else_len * element_types.len()); - merged.push_back(self.merge_values( - then_condition, - else_condition, - then_element, - else_element, - )); + merged.push_back(self.merge_values(then_condition, then_element, else_element)); } } @@ -330,7 +314,6 @@ impl<'a> ValueMerger<'a> { fn try_merge_only_changed_indices( &mut self, then_condition: ValueId, - else_condition: ValueId, then_value: ValueId, else_value: ValueId, array_length: usize, @@ -414,8 +397,7 @@ impl<'a> ValueMerger<'a> { let then_element = get_element(then_value, typevars.clone()); let else_element = get_element(else_value, typevars); - let value = - self.merge_values(then_condition, else_condition, then_element, else_element); + let value = self.merge_values(then_condition, then_element, else_element); array = self.insert_array_set(array, index, value, Some(condition)).first(); } diff --git a/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs b/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs index c387e0b6234..8076bc3cc99 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/remove_if_else.rs @@ -66,10 +66,9 @@ impl Context { for instruction in instructions { match &function.dfg[instruction] { - Instruction::IfElse { then_condition, then_value, else_condition, else_value } => { + Instruction::IfElse { then_condition, then_value, else_value } => { let then_condition = *then_condition; let then_value = *then_value; - let else_condition = *else_condition; let else_value = *else_value; let typ = function.dfg.type_of_value(then_value); @@ -85,12 +84,7 @@ impl Context { call_stack, ); - let value = value_merger.merge_values( - then_condition, - else_condition, - then_value, - else_value, - ); + let value = value_merger.merge_values(then_condition, then_value, else_value); let _typ = function.dfg.type_of_value(value); let results = function.dfg.instruction_results(instruction); From 32a9ed9ad19cf81275c31ca77e4970bc1598c112 Mon Sep 17 00:00:00 2001 From: jfecher Date: Tue, 19 Nov 2024 11:15:32 -0600 Subject: [PATCH 024/109] fix: Do a shallow follow_bindings before unification (#6558) --- compiler/noirc_frontend/src/hir_def/types.rs | 22 +++++++++++++++++-- .../src/monomorphization/mod.rs | 6 +++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/compiler/noirc_frontend/src/hir_def/types.rs b/compiler/noirc_frontend/src/hir_def/types.rs index ef8a697966c..659fafbbcbb 100644 --- a/compiler/noirc_frontend/src/hir_def/types.rs +++ b/compiler/noirc_frontend/src/hir_def/types.rs @@ -1547,12 +1547,15 @@ impl Type { ) -> Result<(), UnificationError> { use Type::*; - let lhs = match self { + let lhs = self.follow_bindings_shallow(); + let rhs = other.follow_bindings_shallow(); + + let lhs = match lhs.as_ref() { Type::InfixExpr(..) => Cow::Owned(self.canonicalize()), other => Cow::Borrowed(other), }; - let rhs = match other { + let rhs = match rhs.as_ref() { Type::InfixExpr(..) => Cow::Owned(other.canonicalize()), other => Cow::Borrowed(other), }; @@ -2386,6 +2389,21 @@ impl Type { } } + /// Follow bindings if this is a type variable or generic to the first non-typevariable + /// type. Unlike `follow_bindings`, this won't recursively follow any bindings on any + /// fields or arguments of this type. + pub fn follow_bindings_shallow(&self) -> Cow { + match self { + Type::TypeVariable(var) | Type::NamedGeneric(var, _) => { + if let TypeBinding::Bound(typ) = &*var.borrow() { + return Cow::Owned(typ.follow_bindings_shallow().into_owned()); + } + Cow::Borrowed(self) + } + other => Cow::Borrowed(other), + } + } + pub fn from_generics(generics: &GenericTypeVars) -> Vec { vecmap(generics, |var| Type::TypeVariable(var.clone())) } diff --git a/compiler/noirc_frontend/src/monomorphization/mod.rs b/compiler/noirc_frontend/src/monomorphization/mod.rs index ce2c58e71c1..dd72437ccd7 100644 --- a/compiler/noirc_frontend/src/monomorphization/mod.rs +++ b/compiler/noirc_frontend/src/monomorphization/mod.rs @@ -959,7 +959,8 @@ impl<'interner> Monomorphizer<'interner> { /// Convert a non-tuple/struct type to a monomorphized type fn convert_type(typ: &HirType, location: Location) -> Result { - Ok(match typ { + let typ = typ.follow_bindings_shallow(); + Ok(match typ.as_ref() { HirType::FieldElement => ast::Type::Field, HirType::Integer(sign, bits) => ast::Type::Integer(*sign, *bits), HirType::Bool => ast::Type::Bool, @@ -1125,7 +1126,8 @@ impl<'interner> Monomorphizer<'interner> { // Similar to `convert_type` but returns an error if any type variable can't be defaulted. fn check_type(typ: &HirType, location: Location) -> Result<(), MonomorphizationError> { - match typ { + let typ = typ.follow_bindings_shallow(); + match typ.as_ref() { HirType::FieldElement | HirType::Integer(..) | HirType::Bool From 5d5175e1c076bd651702b6c84a00c85bc4fea860 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Tue, 19 Nov 2024 18:56:52 +0000 Subject: [PATCH 025/109] feat(ssa): Unroll small loops in brillig (#6505) --- compiler/noirc_evaluator/src/lib.rs | 19 + .../noirc_evaluator/src/ssa/ir/function.rs | 8 + .../src/ssa/ir/function_inserter.rs | 1 + .../noirc_evaluator/src/ssa/ir/post_order.rs | 2 +- compiler/noirc_evaluator/src/ssa/opt/mod.rs | 3 +- .../noirc_evaluator/src/ssa/opt/unrolling.rs | 1333 +++++++++++++---- .../src/ssa/parser/into_ssa.rs | 14 +- .../noirc_evaluator/src/ssa/parser/tests.rs | 10 +- .../noirc_evaluator/src/ssa/ssa_gen/mod.rs | 8 +- 9 files changed, 1059 insertions(+), 339 deletions(-) diff --git a/compiler/noirc_evaluator/src/lib.rs b/compiler/noirc_evaluator/src/lib.rs index 3a5825699b2..193cf7bb387 100644 --- a/compiler/noirc_evaluator/src/lib.rs +++ b/compiler/noirc_evaluator/src/lib.rs @@ -28,3 +28,22 @@ pub(crate) fn trim_leading_whitespace_from_lines(src: &str) -> String { } result } + +/// Trim comments from the lines, ie. content starting with `//`. +#[cfg(test)] +pub(crate) fn trim_comments_from_lines(src: &str) -> String { + let mut result = String::new(); + let mut first = true; + for line in src.lines() { + if !first { + result.push('\n'); + } + if let Some(comment) = line.find("//") { + result.push_str(line[..comment].trim_end()); + } else { + result.push_str(line); + } + first = false; + } + result +} diff --git a/compiler/noirc_evaluator/src/ssa/ir/function.rs b/compiler/noirc_evaluator/src/ssa/ir/function.rs index e8245ff6036..b1233e3063e 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/function.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/function.rs @@ -46,6 +46,14 @@ impl RuntimeType { | RuntimeType::Brillig(InlineType::NoPredicates) ) } + + pub(crate) fn is_brillig(&self) -> bool { + matches!(self, RuntimeType::Brillig(_)) + } + + pub(crate) fn is_acir(&self) -> bool { + matches!(self, RuntimeType::Acir(_)) + } } /// A function holds a list of instructions. diff --git a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs index 708b02b9102..5e133072067 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs @@ -71,6 +71,7 @@ impl<'f> FunctionInserter<'f> { } } + /// Get an instruction and make sure all the values in it are freshly resolved. pub(crate) fn map_instruction(&mut self, id: InstructionId) -> (Instruction, CallStack) { ( self.function.dfg[id].clone().map_values(|id| self.resolve(id)), diff --git a/compiler/noirc_evaluator/src/ssa/ir/post_order.rs b/compiler/noirc_evaluator/src/ssa/ir/post_order.rs index 94ff96ba1d7..398ce887b96 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/post_order.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/post_order.rs @@ -22,7 +22,7 @@ impl PostOrder { } impl PostOrder { - /// Allocate and compute a function's block post-order. Pos + /// Allocate and compute a function's block post-order. pub(crate) fn with_function(func: &Function) -> Self { PostOrder(Self::compute_post_order(func)) } diff --git a/compiler/noirc_evaluator/src/ssa/opt/mod.rs b/compiler/noirc_evaluator/src/ssa/opt/mod.rs index 098f62bceba..10e86c6601a 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mod.rs @@ -35,13 +35,14 @@ pub(crate) fn assert_normalized_ssa_equals(mut ssa: super::Ssa, expected: &str) panic!("`expected` argument of `assert_ssa_equals` is not valid SSA:\n{:?}", err); } - use crate::{ssa::Ssa, trim_leading_whitespace_from_lines}; + use crate::{ssa::Ssa, trim_comments_from_lines, trim_leading_whitespace_from_lines}; ssa.normalize_ids(); let ssa = ssa.to_string(); let ssa = trim_leading_whitespace_from_lines(&ssa); let expected = trim_leading_whitespace_from_lines(expected); + let expected = trim_comments_from_lines(&expected); if ssa != expected { println!("Expected:\n~~~\n{expected}\n~~~\nGot:\n~~~\n{ssa}\n~~~"); diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 3ff0e630a69..89f1b2b2d7d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -7,16 +7,20 @@ //! b. If we have previously modified any of the blocks in the loop, //! restart from step 1 to refresh the context. //! c. If not, try to unroll the loop. If successful, remember the modified -//! blocks. If unsuccessfully either error if the abort_on_error flag is set, +//! blocks. If unsuccessful either error if the abort_on_error flag is set, //! or otherwise remember that the loop failed to unroll and leave it unmodified. //! //! Note that this pass also often creates superfluous jmp instructions in the -//! program that will need to be removed by a later simplify cfg pass. -//! Note also that unrolling is skipped for Brillig runtime and as a result -//! we remove reference count instructions because they are only used by Brillig bytecode +//! program that will need to be removed by a later simplify CFG pass. +//! +//! Note also that unrolling is skipped for Brillig runtime, unless the loops are deemed +//! sufficiently small that inlining can be done without increasing the bytecode. +//! +//! When unrolling ACIR code, we remove reference count instructions because they are +//! only used by Brillig bytecode. use std::collections::HashSet; -use acvm::acir::AcirField; +use acvm::{acir::AcirField, FieldElement}; use crate::{ errors::RuntimeError, @@ -26,9 +30,9 @@ use crate::{ cfg::ControlFlowGraph, dfg::{CallStack, DataFlowGraph}, dom::DominatorTree, - function::{Function, RuntimeType}, + function::Function, function_inserter::{ArrayCache, FunctionInserter}, - instruction::{Instruction, TerminatorInstruction}, + instruction::{Binary, BinaryOp, Instruction, InstructionId, TerminatorInstruction}, post_order::PostOrder, value::ValueId, }, @@ -42,16 +46,9 @@ impl Ssa { /// This meta-pass will keep trying to unroll loops and simplifying the SSA until no more errors are found. #[tracing::instrument(level = "trace", skip(ssa))] pub(crate) fn unroll_loops_iteratively(mut ssa: Ssa) -> Result { - let acir_functions = ssa.functions.iter_mut().filter(|(_, func)| { - // Loop unrolling in brillig can lead to a code explosion currently. This can - // also be true for ACIR, but we have no alternative to unrolling in ACIR. - // Brillig also generally prefers smaller code rather than faster code. - !matches!(func.runtime(), RuntimeType::Brillig(_)) - }); - - for (_, function) in acir_functions { + for (_, function) in ssa.functions.iter_mut() { // Try to unroll loops first: - let mut unroll_errors = function.try_to_unroll_loops(); + let mut unroll_errors = function.try_unroll_loops(); // Keep unrolling until no more errors are found while !unroll_errors.is_empty() { @@ -66,21 +63,24 @@ impl Ssa { function.mem2reg(); // Unroll again - unroll_errors = function.try_to_unroll_loops(); + unroll_errors = function.try_unroll_loops(); // If we didn't manage to unroll any more loops, exit if unroll_errors.len() >= prev_unroll_err_count { return Err(unroll_errors.swap_remove(0)); } } } - Ok(ssa) } } impl Function { - fn try_to_unroll_loops(&mut self) -> Vec { - find_all_loops(self).unroll_each_loop(self) + // Loop unrolling in brillig can lead to a code explosion currently. + // This can also be true for ACIR, but we have no alternative to unrolling in ACIR. + // Brillig also generally prefers smaller code rather than faster code, + // so we only attempt to unroll small loops, which we decide on a case-by-case basis. + fn try_unroll_loops(&mut self) -> Vec { + Loops::find_all(self).unroll_each(self) } } @@ -94,7 +94,7 @@ struct Loop { back_edge_start: BasicBlockId, /// All the blocks contained within the loop, including `header` and `back_edge_start`. - pub(crate) blocks: HashSet, + blocks: HashSet, } struct Loops { @@ -107,60 +107,88 @@ struct Loops { cfg: ControlFlowGraph, } -/// Find a loop in the program by finding a node that dominates any predecessor node. -/// The edge where this happens will be the back-edge of the loop. -fn find_all_loops(function: &Function) -> Loops { - let cfg = ControlFlowGraph::with_function(function); - let post_order = PostOrder::with_function(function); - let mut dom_tree = DominatorTree::with_cfg_and_post_order(&cfg, &post_order); - - let mut loops = vec![]; - - for (block, _) in function.dfg.basic_blocks_iter() { - // These reachable checks wouldn't be needed if we only iterated over reachable blocks - if dom_tree.is_reachable(block) { - for predecessor in cfg.predecessors(block) { - if dom_tree.is_reachable(predecessor) && dom_tree.dominates(block, predecessor) { - // predecessor -> block is the back-edge of a loop - loops.push(find_blocks_in_loop(block, predecessor, &cfg)); +impl Loops { + /// Find a loop in the program by finding a node that dominates any predecessor node. + /// The edge where this happens will be the back-edge of the loop. + /// + /// For example consider the following SSA of a basic loop: + /// ```text + /// main(): + /// v0 = ... start ... + /// v1 = ... end ... + /// jmp loop_entry(v0) + /// loop_entry(i: Field): + /// v2 = lt i v1 + /// jmpif v2, then: loop_body, else: loop_end + /// loop_body(): + /// v3 = ... body ... + /// v4 = add 1, i + /// jmp loop_entry(v4) + /// loop_end(): + /// ``` + /// + /// The CFG will look something like this: + /// ```text + /// main + /// ↓ + /// loop_entry ←---↰ + /// ↓ ↘ | + /// loop_end loop_body + /// ``` + /// `loop_entry` has two predecessors: `main` and `loop_body`, and it dominates `loop_body`. + fn find_all(function: &Function) -> Self { + let cfg = ControlFlowGraph::with_function(function); + let post_order = PostOrder::with_function(function); + let mut dom_tree = DominatorTree::with_cfg_and_post_order(&cfg, &post_order); + + let mut loops = vec![]; + + for (block, _) in function.dfg.basic_blocks_iter() { + // These reachable checks wouldn't be needed if we only iterated over reachable blocks + if dom_tree.is_reachable(block) { + for predecessor in cfg.predecessors(block) { + // In the above example, we're looking for when `block` is `loop_entry` and `predecessor` is `loop_body`. + if dom_tree.is_reachable(predecessor) && dom_tree.dominates(block, predecessor) + { + // predecessor -> block is the back-edge of a loop + loops.push(Loop::find_blocks_in_loop(block, predecessor, &cfg)); + } } } } - } - // Sort loops by block size so that we unroll the larger, outer loops of nested loops first. - // This is needed because inner loops may use the induction variable from their outer loops in - // their loop range. - loops.sort_by_key(|loop_| loop_.blocks.len()); + // Sort loops by block size so that we unroll the larger, outer loops of nested loops first. + // This is needed because inner loops may use the induction variable from their outer loops in + // their loop range. We will start popping loops from the back. + loops.sort_by_key(|loop_| loop_.blocks.len()); - Loops { - failed_to_unroll: HashSet::new(), - yet_to_unroll: loops, - modified_blocks: HashSet::new(), - cfg, + Self { + failed_to_unroll: HashSet::new(), + yet_to_unroll: loops, + modified_blocks: HashSet::new(), + cfg, + } } -} -impl Loops { /// Unroll all loops within a given function. /// Any loops which fail to be unrolled (due to using non-constant indices) will be unmodified. - fn unroll_each_loop(mut self, function: &mut Function) -> Vec { + fn unroll_each(mut self, function: &mut Function) -> Vec { let mut unroll_errors = vec![]; while let Some(next_loop) = self.yet_to_unroll.pop() { + if function.runtime().is_brillig() && !next_loop.is_small_loop(function, &self.cfg) { + continue; + } // If we've previously modified a block in this loop we need to refresh the context. // This happens any time we have nested loops. if next_loop.blocks.iter().any(|block| self.modified_blocks.contains(block)) { - let mut new_context = find_all_loops(function); - new_context.failed_to_unroll = self.failed_to_unroll; - return unroll_errors - .into_iter() - .chain(new_context.unroll_each_loop(function)) - .collect(); + let mut new_loops = Self::find_all(function); + new_loops.failed_to_unroll = self.failed_to_unroll; + return unroll_errors.into_iter().chain(new_loops.unroll_each(function)).collect(); } // Don't try to unroll the loop again if it is known to fail if !self.failed_to_unroll.contains(&next_loop.header) { - match unroll_loop(function, &self.cfg, &next_loop) { + match next_loop.unroll(function, &self.cfg) { Ok(_) => self.modified_blocks.extend(next_loop.blocks), Err(call_stack) => { self.failed_to_unroll.insert(next_loop.header); @@ -173,83 +201,522 @@ impl Loops { } } -/// Return each block that is in a loop starting in the given header block. -/// Expects back_edge_start -> header to be the back edge of the loop. -fn find_blocks_in_loop( - header: BasicBlockId, - back_edge_start: BasicBlockId, - cfg: &ControlFlowGraph, -) -> Loop { - let mut blocks = HashSet::new(); - blocks.insert(header); - - let mut insert = |block, stack: &mut Vec| { - if !blocks.contains(&block) { - blocks.insert(block); - stack.push(block); +impl Loop { + /// Return each block that is in a loop starting in the given header block. + /// Expects back_edge_start -> header to be the back edge of the loop. + fn find_blocks_in_loop( + header: BasicBlockId, + back_edge_start: BasicBlockId, + cfg: &ControlFlowGraph, + ) -> Self { + let mut blocks = HashSet::new(); + blocks.insert(header); + + let mut insert = |block, stack: &mut Vec| { + if !blocks.contains(&block) { + blocks.insert(block); + stack.push(block); + } + }; + + // Starting from the back edge of the loop, each predecessor of this block until + // the header is within the loop. + let mut stack = vec![]; + insert(back_edge_start, &mut stack); + + while let Some(block) = stack.pop() { + for predecessor in cfg.predecessors(block) { + insert(predecessor, &mut stack); + } + } + + Self { header, back_edge_start, blocks } + } + + /// Find the lower bound of the loop in the pre-header and return it + /// if it's a numeric constant, which it will be if the previous SSA + /// steps managed to inline it. + /// + /// Consider the following example of a `for i in 0..4` loop: + /// ```text + /// brillig(inline) fn main f0 { + /// b0(v0: u32): // Pre-header + /// ... + /// jmp b1(u32 0) // Lower-bound + /// b1(v1: u32): // Induction variable + /// v5 = lt v1, u32 4 + /// jmpif v5 then: b3, else: b2 + /// ``` + fn get_const_lower_bound( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result, CallStack> { + let pre_header = self.get_pre_header(function, cfg)?; + let jump_value = get_induction_variable(function, pre_header)?; + Ok(function.dfg.get_numeric_constant(jump_value)) + } + + /// Find the upper bound of the loop in the loop header and return it + /// if it's a numeric constant, which it will be if the previous SSA + /// steps managed to inline it. + /// + /// Consider the following example of a `for i in 0..4` loop: + /// ```text + /// brillig(inline) fn main f0 { + /// b0(v0: u32): + /// ... + /// jmp b1(u32 0) + /// b1(v1: u32): // Loop header + /// v5 = lt v1, u32 4 // Upper bound + /// jmpif v5 then: b3, else: b2 + /// ``` + fn get_const_upper_bound(&self, function: &Function) -> Option { + let block = &function.dfg[self.header]; + let instructions = block.instructions(); + assert_eq!( + instructions.len(), + 1, + "The header should just compare the induction variable and jump" + ); + match &function.dfg[instructions[0]] { + Instruction::Binary(Binary { lhs: _, operator: BinaryOp::Lt, rhs }) => { + function.dfg.get_numeric_constant(*rhs) + } + Instruction::Binary(Binary { lhs: _, operator: BinaryOp::Eq, rhs }) => { + // `for i in 0..1` is turned into: + // b1(v0: u32): + // v12 = eq v0, u32 0 + // jmpif v12 then: b3, else: b2 + function.dfg.get_numeric_constant(*rhs).map(|c| c + FieldElement::one()) + } + other => panic!("Unexpected instruction in header: {other:?}"), } - }; + } - // Starting from the back edge of the loop, each predecessor of this block until - // the header is within the loop. - let mut stack = vec![]; - insert(back_edge_start, &mut stack); + /// Get the lower and upper bounds of the loop if both are constant numeric values. + fn get_const_bounds( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result, CallStack> { + let Some(lower) = self.get_const_lower_bound(function, cfg)? else { + return Ok(None); + }; + let Some(upper) = self.get_const_upper_bound(function) else { + return Ok(None); + }; + Ok(Some((lower, upper))) + } - while let Some(block) = stack.pop() { - for predecessor in cfg.predecessors(block) { - insert(predecessor, &mut stack); + /// Unroll a single loop in the function. + /// Returns Ok(()) if it succeeded, Err(callstack) if it failed, + /// where the callstack indicates the location of the instruction + /// that could not be processed, or empty if such information was + /// not available. + /// + /// Consider this example: + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// jmp loop_entry(v0) + /// loop_entry(i: Field): + /// v2 = lt i v1 + /// jmpif v2, then: loop_body, else: loop_end + /// ``` + /// + /// The first step is to unroll the header by recognizing that jump condition + /// is a constant, which means it will go to `loop_body`: + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// v2 = lt v0 v1 + /// // jmpif v2, then: loop_body, else: loop_end + /// jmp dest: loop_body + /// ``` + /// + /// Following that we unroll the loop body, which is the next source, replace + /// the induction variable with the new value created in the body, and have + /// another go at the header. + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// v2 = lt v0 v1 + /// v3 = ... body ... + /// v4 = add 1, 0 + /// jmp loop_entry(v4) + /// ``` + /// + /// At the end we reach a point where the condition evaluates to 0 and we jump to the end. + /// ```text + /// main(): + /// v0 = 0 + /// v1 = 2 + /// v2 = lt 0 + /// v3 = ... body ... + /// v4 = add 1, v0 + /// v5 = lt v4 v1 + /// v6 = ... body ... + /// v7 = add v4, 1 + /// v8 = lt v5 v1 + /// jmp loop_end + /// ``` + /// + /// When e.g. `v8 = lt v5 v1` cannot be evaluated to a constant, the loop signals by returning `Err` + /// that a few SSA passes are required to evaluate and simplify these values. + fn unroll(&self, function: &mut Function, cfg: &ControlFlowGraph) -> Result<(), CallStack> { + let mut unroll_into = self.get_pre_header(function, cfg)?; + let mut jump_value = get_induction_variable(function, unroll_into)?; + let mut array_cache = Some(ArrayCache::default()); + + while let Some(mut context) = self.unroll_header(function, unroll_into, jump_value)? { + // The inserter's array cache must be explicitly enabled. This is to + // confirm that we're inserting in insertion order. This is true here since: + // 1. We have a fresh inserter for each loop + // 2. Each loop is unrolled in iteration order + // + // Within a loop we do not insert in insertion order. This is fine however since the + // array cache is buffered with a separate fresh_array_cache which collects arrays + // but does not deduplicate. When we later call `into_array_cache`, that will merge + // the fresh cache in with the old one so that each iteration of the loop can cache + // from previous iterations but not the current iteration. + context.inserter.set_array_cache(array_cache, unroll_into); + (unroll_into, jump_value, array_cache) = context.unroll_loop_iteration(); } + + Ok(()) + } + + /// The loop pre-header is the block that comes before the loop begins. Generally a header block + /// is expected to have 2 predecessors: the pre-header and the final block of the loop which jumps + /// back to the beginning. Other predecessors can come from `break` or `continue`. + fn get_pre_header( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result { + let mut pre_header = cfg + .predecessors(self.header) + .filter(|predecessor| *predecessor != self.back_edge_start) + .collect::>(); + + if function.runtime().is_acir() { + assert_eq!(pre_header.len(), 1); + Ok(pre_header.remove(0)) + } else if pre_header.len() == 1 { + Ok(pre_header.remove(0)) + } else { + // We can come back into the header from multiple blocks, so we can't unroll this. + Err(CallStack::new()) + } + } + + /// Unrolls the header block of the loop. This is the block that dominates all other blocks in the + /// loop and contains the jmpif instruction that lets us know if we should continue looping. + /// Returns Some(iteration context) if we should perform another iteration. + fn unroll_header<'a>( + &'a self, + function: &'a mut Function, + unroll_into: BasicBlockId, + induction_value: ValueId, + ) -> Result>, CallStack> { + // We insert into a fresh block first and move instructions into the unroll_into block later + // only once we verify the jmpif instruction has a constant condition. If it does not, we can + // just discard this fresh block and leave the loop unmodified. + let fresh_block = function.dfg.make_block(); + + let mut context = LoopIteration::new(function, self, fresh_block, self.header); + let source_block = &context.dfg()[context.source_block]; + assert_eq!(source_block.parameters().len(), 1, "Expected only 1 argument in loop header"); + + // Insert the current value of the loop induction variable into our context. + let first_param = source_block.parameters()[0]; + context.inserter.try_map_value(first_param, induction_value); + // Copy over all instructions and a fresh terminator. + context.inline_instructions_from_block(); + // Mutate the terminator if possible so that it points at the iteration block. + match context.dfg()[fresh_block].unwrap_terminator() { + TerminatorInstruction::JmpIf { condition, then_destination, else_destination, call_stack } => { + let condition = *condition; + let next_blocks = context.handle_jmpif(condition, *then_destination, *else_destination, call_stack.clone()); + + // If there is only 1 next block the jmpif evaluated to a single known block. + // This is the expected case and lets us know if we should loop again or not. + if next_blocks.len() == 1 { + context.dfg_mut().inline_block(fresh_block, unroll_into); + + // The fresh block is gone now so we're committing to insert into the original + // unroll_into block from now on. + context.insert_block = unroll_into; + + // In the last iteration, `handle_jmpif` will have replaced `context.source_block` + // with the `else_destination`, that is, the `loop_end`, which signals that we + // have no more loops to unroll, because that block was not part of the loop itself, + // ie. it wasn't between `loop_header` and `loop_body`. Otherwise we have the `loop_body` + // in `source_block` and can unroll that into the destination. + Ok(self.blocks.contains(&context.source_block).then_some(context)) + } else { + // If this case is reached the loop either uses non-constant indices or we need + // another pass, such as mem2reg to resolve them to constants. + Err(context.inserter.function.dfg.get_value_call_stack(condition)) + } + } + other => unreachable!("Expected loop header to terminate in a JmpIf to the loop body, but found {other:?} instead"), + } + } + + /// Find all reference values which were allocated before the pre-header. + /// + /// These are accessible inside the loop body, and they can be involved + /// in load/store operations that could be eliminated if we unrolled the + /// body into the pre-header. + /// + /// Consider this loop: + /// ```text + /// let mut sum = 0; + /// let mut arr = &[]; + /// for i in 0..3 { + /// sum = sum + i; + /// arr.push_back(sum) + /// } + /// sum + /// ``` + /// + /// The SSA has a load+store for the `sum` and a load+push for the `arr`: + /// ```text + /// b0(v0: u32): + /// v2 = allocate -> &mut u32 // reference allocated for `sum` + /// store u32 0 at v2 // initial value for `sum` + /// v4 = allocate -> &mut u32 // reference allocated for the length of `arr` + /// store u32 0 at v4 // initial length of `arr` + /// inc_rc [] of u32 // storage for `arr` + /// v6 = allocate -> &mut [u32] // reference allocated to point at the storage of `arr` + /// store [] of u32 at v6 // initial value for the storage of `arr` + /// jmp b1(u32 0) // start looping from 0 + /// b1(v1: u32): // `i` induction variable + /// v8 = lt v1, u32 3 // loop until 3 + /// jmpif v8 then: b3, else: b2 + /// b3(): + /// v11 = load v2 -> u32 // load `sum` + /// v12 = add v11, v1 // add `i` to `sum` + /// store v12 at v2 // store updated `sum` + /// v13 = load v4 -> u32 // load length of `arr` + /// v14 = load v6 -> [u32] // load storage of `arr` + /// v16, v17 = call slice_push_back(v13, v14, v12) -> (u32, [u32]) // builtin to push, will store to storage and length references + /// v19 = add v1, u32 1 // increase `arr` + /// jmp b1(v19) // back-edge of the loop + /// b2(): // after the loop + /// v9 = load v2 -> u32 // read final value of `sum` + /// ``` + /// + /// We won't always find load _and_ store ops (e.g. the push above doesn't come with a store), + /// but it's likely that mem2reg could eliminate a lot of the loads we can find, so we can + /// use this as an approximation of the gains we would see. + fn find_pre_header_reference_values( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Result, CallStack> { + // We need to traverse blocks from the pre-header up to the block entry point. + let pre_header = self.get_pre_header(function, cfg)?; + let function_entry = function.entry_block(); + + // The algorithm in `find_blocks_in_loop` expects to collect the blocks between the header and the back-edge of the loop, + // but technically works the same if we go from the pre-header up to the function entry as well. + let blocks = Self::find_blocks_in_loop(function_entry, pre_header, cfg).blocks; + + // Collect allocations in all blocks above the header. + let allocations = blocks.iter().flat_map(|b| { + function.dfg[*b] + .instructions() + .iter() + .filter(|i| matches!(&function.dfg[**i], Instruction::Allocate)) + .map(|i| { + // Get the value into which the allocation was stored. + function.dfg.instruction_results(*i)[0] + }) + }); + + // Collect reference parameters of the function itself. + let params = + function.parameters().iter().filter(|p| function.dfg.value_is_reference(**p)).copied(); + + Ok(params.chain(allocations).collect()) + } + + /// Count the number of load and store instructions of specific variables in the loop. + /// + /// Returns `(loads, stores)` in case we want to differentiate in the estimates. + fn count_loads_and_stores( + &self, + function: &Function, + refs: &HashSet, + ) -> (usize, usize) { + let mut loads = 0; + let mut stores = 0; + for block in &self.blocks { + for instruction in function.dfg[*block].instructions() { + match &function.dfg[*instruction] { + Instruction::Load { address } if refs.contains(address) => { + loads += 1; + } + Instruction::Store { address, .. } if refs.contains(address) => { + stores += 1; + } + _ => {} + } + } + } + (loads, stores) + } + + /// Count the number of instructions in the loop, including the terminating jumps. + fn count_all_instructions(&self, function: &Function) -> usize { + self.blocks + .iter() + .map(|block| { + let block = &function.dfg[*block]; + block.instructions().len() + block.terminator().map(|_| 1).unwrap_or_default() + }) + .sum() + } + + /// Count the number of increments to the induction variable. + /// It should be one, but it can be duplicated. + /// The increment should be in the block where the back-edge was found. + fn count_induction_increments(&self, function: &Function) -> usize { + let back = &function.dfg[self.back_edge_start]; + let header = &function.dfg[self.header]; + let induction_var = header.parameters()[0]; + + back.instructions().iter().filter(|instruction| { + let instruction = &function.dfg[**instruction]; + matches!(instruction, Instruction::Binary(Binary { lhs, operator: BinaryOp::Add, rhs: _ }) if *lhs == induction_var) + }).count() + } + + /// Decide if this loop is small enough that it can be inlined in a way that the number + /// of unrolled instructions times the number of iterations would result in smaller bytecode + /// than if we keep the loops with their overheads. + fn is_small_loop(&self, function: &Function, cfg: &ControlFlowGraph) -> bool { + self.boilerplate_stats(function, cfg).map(|s| s.is_small()).unwrap_or_default() } - Loop { header, back_edge_start, blocks } + /// Collect boilerplate stats if we can figure out the upper and lower bounds of the loop, + /// and the loop doesn't have multiple back-edges from breaks and continues. + fn boilerplate_stats( + &self, + function: &Function, + cfg: &ControlFlowGraph, + ) -> Option { + let Ok(Some((lower, upper))) = self.get_const_bounds(function, cfg) else { + return None; + }; + let Some(lower) = lower.try_to_u64() else { + return None; + }; + let Some(upper) = upper.try_to_u64() else { + return None; + }; + let Ok(refs) = self.find_pre_header_reference_values(function, cfg) else { + return None; + }; + let (loads, stores) = self.count_loads_and_stores(function, &refs); + let increments = self.count_induction_increments(function); + let all_instructions = self.count_all_instructions(function); + + Some(BoilerplateStats { + iterations: (upper - lower) as usize, + loads, + stores, + increments, + all_instructions, + }) + } } -/// Unroll a single loop in the function. -/// Returns Err(()) if it failed to unroll and Ok(()) otherwise. -fn unroll_loop( - function: &mut Function, - cfg: &ControlFlowGraph, - loop_: &Loop, -) -> Result<(), CallStack> { - let mut unroll_into = get_pre_header(cfg, loop_); - let mut jump_value = get_induction_variable(function, unroll_into)?; - let mut array_cache = Some(ArrayCache::default()); - - while let Some(mut context) = unroll_loop_header(function, loop_, unroll_into, jump_value)? { - // The inserter's array cache must be explicitly enabled. This is to - // confirm that we're inserting in insertion order. This is true here since: - // 1. We have a fresh inserter for each loop - // 2. Each loop is unrolled in iteration order - // - // Within a loop we do not insert in insertion order. This is fine however since the - // array cache is buffered with a separate fresh_array_cache which collects arrays - // but does not deduplicate. When we later call `into_array_cache`, that will merge - // the fresh cache in with the old one so that each iteration of the loop can cache - // from previous iterations but not the current iteration. - context.inserter.set_array_cache(array_cache, unroll_into); - (unroll_into, jump_value, array_cache) = context.unroll_loop_iteration(); - } - - Ok(()) +/// All the instructions in the following example are boilerplate: +/// ```text +/// brillig(inline) fn main f0 { +/// b0(v0: u32): +/// ... +/// jmp b1(u32 0) +/// b1(v1: u32): +/// v5 = lt v1, u32 4 +/// jmpif v5 then: b3, else: b2 +/// b3(): +/// ... +/// v11 = add v1, u32 1 +/// jmp b1(v11) +/// b2(): +/// ... +/// } +/// ``` +#[derive(Debug)] +struct BoilerplateStats { + /// Number of iterations in the loop. + iterations: usize, + /// Number of loads pre-header references in the loop. + loads: usize, + /// Number of stores into pre-header references in the loop. + stores: usize, + /// Number of increments to the induction variable (might be duplicated). + increments: usize, + /// Number of instructions in the loop, including boilerplate, + /// but excluding the boilerplate which is outside the loop. + all_instructions: usize, } -/// The loop pre-header is the block that comes before the loop begins. Generally a header block -/// is expected to have 2 predecessors: the pre-header and the final block of the loop which jumps -/// back to the beginning. -fn get_pre_header(cfg: &ControlFlowGraph, loop_: &Loop) -> BasicBlockId { - let mut pre_header = cfg - .predecessors(loop_.header) - .filter(|predecessor| *predecessor != loop_.back_edge_start) - .collect::>(); - - assert_eq!(pre_header.len(), 1); - pre_header.remove(0) +impl BoilerplateStats { + /// Instruction count if we leave the loop as-is. + /// It's the instructions in the loop, plus the one to kick it off in the pre-header. + fn baseline_instructions(&self) -> usize { + self.all_instructions + 1 + } + + /// Estimated number of _useful_ instructions, which is the ones in the loop + /// minus all in-loop boilerplate. + fn useful_instructions(&self) -> usize { + // Two jumps + plus the comparison with the upper bound + let boilerplate = 3; + // Be conservative and only assume that mem2reg gets rid of load followed by store. + // NB we have not checked that these are actual pairs. + let load_and_store = self.loads.min(self.stores) * 2; + self.all_instructions - self.increments - load_and_store - boilerplate + } + + /// Estimated number of instructions if we unroll the loop. + fn unrolled_instructions(&self) -> usize { + self.useful_instructions() * self.iterations + } + + /// A small loop is where if we unroll it into the pre-header then considering the + /// number of iterations we still end up with a smaller bytecode than if we leave + /// the blocks in tact with all the boilerplate involved in jumping, and the extra + /// reference access instructions. + fn is_small(&self) -> bool { + self.unrolled_instructions() < self.baseline_instructions() + } } /// Return the induction value of the current iteration of the loop, from the given block's jmp arguments. /// /// Expects the current block to terminate in `jmp h(N)` where h is the loop header and N is -/// a Field value. +/// a Field value. Returns an `Err` if this isn't the case. +/// +/// Consider the following example: +/// ```text +/// main(): +/// v0 = ... start ... +/// v1 = ... end ... +/// jmp loop_entry(v0) +/// loop_entry(i: Field): +/// ... +/// ``` +/// We're looking for the terminating jump of the `main` predecessor of `loop_entry`. fn get_induction_variable(function: &Function, block: BasicBlockId) -> Result { match function.dfg[block].terminator() { Some(TerminatorInstruction::Jmp { arguments, call_stack: location, .. }) => { @@ -270,54 +737,6 @@ fn get_induction_variable(function: &Function, block: BasicBlockId) -> Result( - function: &'a mut Function, - loop_: &'a Loop, - unroll_into: BasicBlockId, - induction_value: ValueId, -) -> Result>, CallStack> { - // We insert into a fresh block first and move instructions into the unroll_into block later - // only once we verify the jmpif instruction has a constant condition. If it does not, we can - // just discard this fresh block and leave the loop unmodified. - let fresh_block = function.dfg.make_block(); - - let mut context = LoopIteration::new(function, loop_, fresh_block, loop_.header); - let source_block = &context.dfg()[context.source_block]; - assert_eq!(source_block.parameters().len(), 1, "Expected only 1 argument in loop header"); - - // Insert the current value of the loop induction variable into our context. - let first_param = source_block.parameters()[0]; - context.inserter.try_map_value(first_param, induction_value); - context.inline_instructions_from_block(); - - match context.dfg()[fresh_block].unwrap_terminator() { - TerminatorInstruction::JmpIf { condition, then_destination, else_destination, call_stack } => { - let condition = *condition; - let next_blocks = context.handle_jmpif(condition, *then_destination, *else_destination, call_stack.clone()); - - // If there is only 1 next block the jmpif evaluated to a single known block. - // This is the expected case and lets us know if we should loop again or not. - if next_blocks.len() == 1 { - context.dfg_mut().inline_block(fresh_block, unroll_into); - - // The fresh block is gone now so we're committing to insert into the original - // unroll_into block from now on. - context.insert_block = unroll_into; - - Ok(loop_.blocks.contains(&context.source_block).then_some(context)) - } else { - // If this case is reached the loop either uses non-constant indices or we need - // another pass, such as mem2reg to resolve them to constants. - Err(context.inserter.function.dfg.get_value_call_stack(condition)) - } - } - other => unreachable!("Expected loop header to terminate in a JmpIf to the loop body, but found {other:?} instead"), - } -} - /// The context object for each loop iteration. /// Notably each loop iteration maps each loop block to a fresh, unrolled block. struct LoopIteration<'f> { @@ -379,7 +798,8 @@ impl<'f> LoopIteration<'f> { next_blocks.append(&mut blocks); } } - + // After having unrolled all blocks in the loop body, we must know how to get back to the header; + // this is also the block into which we have to unroll into next. let (end_block, induction_value) = self .induction_value .expect("Expected to find the induction variable by end of loop iteration"); @@ -390,6 +810,8 @@ impl<'f> LoopIteration<'f> { /// Unroll a single block in the current iteration of the loop fn unroll_loop_block(&mut self) -> Vec { let mut next_blocks = self.unroll_loop_block_helper(); + // Guarantee that the next blocks we set up to be unrolled, are actually part of the loop, + // which we recorded while inlining the instructions of the blocks already processed. next_blocks.retain(|block| { let b = self.get_original_block(*block); self.loop_.blocks.contains(&b) @@ -399,6 +821,7 @@ impl<'f> LoopIteration<'f> { /// Unroll a single block in the current iteration of the loop fn unroll_loop_block_helper(&mut self) -> Vec { + // Copy instructions from the loop body to the unroll destination, replacing the terminator. self.inline_instructions_from_block(); self.visited_blocks.insert(self.source_block); @@ -416,6 +839,7 @@ impl<'f> LoopIteration<'f> { ), TerminatorInstruction::Jmp { destination, arguments, call_stack: _ } => { if self.get_original_block(*destination) == self.loop_.header { + // We found the back-edge of the loop. assert_eq!(arguments.len(), 1); self.induction_value = Some((self.insert_block, arguments[0])); } @@ -427,7 +851,10 @@ impl<'f> LoopIteration<'f> { /// Find the next branch(es) to take from a jmpif terminator and return them. /// If only one block is returned, it means the jmpif condition evaluated to a known - /// constant and we can safely take only the given branch. + /// constant and we can safely take only the given branch. In this case the method + /// also replaces the terminator of the insert block (a.k.a fresh block) to be a `Jmp`, + /// and changes the source block in the context for the next iteration to be the + /// destination indicated by the constant condition (ie. the `then` or the `else`). fn handle_jmpif( &mut self, condition: ValueId, @@ -473,10 +900,13 @@ impl<'f> LoopIteration<'f> { } } + /// Find the original ID of a block that replaced it. fn get_original_block(&self, block: BasicBlockId) -> BasicBlockId { self.original_blocks.get(&block).copied().unwrap_or(block) } + /// Copy over instructions from the source into the insert block, + /// while simplifying instructions and keeping track of original block IDs. fn inline_instructions_from_block(&mut self) { let source_block = &self.dfg()[self.source_block]; let instructions = source_block.instructions().to_vec(); @@ -485,23 +915,31 @@ impl<'f> LoopIteration<'f> { // instances of the induction variable or any values that were changed as a result // of the new induction variable value. for instruction in instructions { - // Skip reference count instructions since they are only used for brillig, and brillig code is not unrolled - if !matches!( - self.dfg()[instruction], - Instruction::IncrementRc { .. } | Instruction::DecrementRc { .. } - ) { - self.inserter.push_instruction(instruction, self.insert_block); + // Reference counting is only used by Brillig, ACIR doesn't need them. + if self.inserter.function.runtime().is_acir() && self.is_refcount(instruction) { + continue; } + self.inserter.push_instruction(instruction, self.insert_block); } let mut terminator = self.dfg()[self.source_block] .unwrap_terminator() .clone() .map_values(|value| self.inserter.resolve(value)); + // Replace the blocks in the terminator with fresh one with the same parameters, + // while remembering which were the original block IDs. terminator.mutate_blocks(|block| self.get_or_insert_block(block)); self.inserter.function.dfg.set_block_terminator(self.insert_block, terminator); } + /// Is the instruction an `Rc`? + fn is_refcount(&self, instruction: InstructionId) -> bool { + matches!( + self.dfg()[instruction], + Instruction::IncrementRc { .. } | Instruction::DecrementRc { .. } + ) + } + fn dfg(&self) -> &DataFlowGraph { &self.inserter.function.dfg } @@ -513,22 +951,19 @@ impl<'f> LoopIteration<'f> { #[cfg(test)] mod tests { - use crate::{ - errors::RuntimeError, - ssa::{ - function_builder::FunctionBuilder, - ir::{instruction::BinaryOp, map::Id, types::Type}, - }, - }; + use acvm::FieldElement; - use super::Ssa; + use crate::errors::RuntimeError; + use crate::ssa::{ir::value::ValueId, opt::assert_normalized_ssa_equals, Ssa}; + + use super::{BoilerplateStats, Loops}; /// Tries to unroll all loops in each SSA function. /// If any loop cannot be unrolled, it is left as-is or in a partially unrolled state. - fn try_to_unroll_loops(mut ssa: Ssa) -> (Ssa, Vec) { + fn try_unroll_loops(mut ssa: Ssa) -> (Ssa, Vec) { let mut errors = vec![]; for function in ssa.functions.values_mut() { - errors.extend(function.try_to_unroll_loops()); + errors.extend(function.try_unroll_loops()); } (ssa, errors) } @@ -542,166 +977,406 @@ mod tests { // } // } // } - // - // fn main f0 { - // b0(): - // jmp b1(Field 0) - // b1(v0: Field): // header of outer loop - // v1 = lt v0, Field 3 - // jmpif v1, then: b2, else: b3 - // b2(): - // jmp b4(Field 0) - // b4(v2: Field): // header of inner loop - // v3 = lt v2, Field 4 - // jmpif v3, then: b5, else: b6 - // b5(): - // v4 = add v0, v2 - // v5 = lt Field 10, v4 - // constrain v5 - // v6 = add v2, Field 1 - // jmp b4(v6) - // b6(): // end of inner loop - // v7 = add v0, Field 1 - // jmp b1(v7) - // b3(): // end of outer loop - // return Field 0 - // } - let main_id = Id::test_new(0); - - // Compiling main - let mut builder = FunctionBuilder::new("main".into(), main_id); - - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - let b3 = builder.insert_block(); - let b4 = builder.insert_block(); - let b5 = builder.insert_block(); - let b6 = builder.insert_block(); - - let v0 = builder.add_block_parameter(b1, Type::field()); - let v2 = builder.add_block_parameter(b4, Type::field()); - - let zero = builder.field_constant(0u128); - let one = builder.field_constant(1u128); - let three = builder.field_constant(3u128); - let four = builder.field_constant(4u128); - let ten = builder.field_constant(10u128); - - builder.terminate_with_jmp(b1, vec![zero]); - - // b1 - builder.switch_to_block(b1); - let v1 = builder.insert_binary(v0, BinaryOp::Lt, three); - builder.terminate_with_jmpif(v1, b2, b3); - - // b2 - builder.switch_to_block(b2); - builder.terminate_with_jmp(b4, vec![zero]); - - // b3 - builder.switch_to_block(b3); - builder.terminate_with_return(vec![zero]); - - // b4 - builder.switch_to_block(b4); - let v3 = builder.insert_binary(v2, BinaryOp::Lt, four); - builder.terminate_with_jmpif(v3, b5, b6); - - // b5 - builder.switch_to_block(b5); - let v4 = builder.insert_binary(v0, BinaryOp::Add, v2); - let v5 = builder.insert_binary(ten, BinaryOp::Lt, v4); - builder.insert_constrain(v5, one, None); - let v6 = builder.insert_binary(v2, BinaryOp::Add, one); - builder.terminate_with_jmp(b4, vec![v6]); - - // b6 - builder.switch_to_block(b6); - let v7 = builder.insert_binary(v0, BinaryOp::Add, one); - builder.terminate_with_jmp(b1, vec![v7]); - - let ssa = builder.finish(); - assert_eq!(ssa.main().reachable_blocks().len(), 7); - - // Expected output: - // - // fn main f0 { - // b0(): - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // jmp b23() - // b23(): - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // jmp b27() - // b27(): - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // constrain Field 0 - // jmp b31() - // b31(): - // jmp b3() - // b3(): - // return Field 0 - // } + let src = " + acir(inline) fn main f0 { + b0(): + jmp b1(Field 0) + b1(v0: Field): // header of outer loop + v1 = lt v0, Field 3 + jmpif v1 then: b2, else: b3 + b2(): + jmp b4(Field 0) + b4(v2: Field): // header of inner loop + v3 = lt v2, Field 4 + jmpif v3 then: b5, else: b6 + b5(): + v4 = add v0, v2 + v5 = lt Field 10, v4 + constrain v5 == Field 1 + v6 = add v2, Field 1 + jmp b4(v6) + b6(): // end of inner loop + v7 = add v0, Field 1 + jmp b1(v7) + b3(): // end of outer loop + return Field 0 + } + "; + let ssa = Ssa::from_str(src).unwrap(); + + let expected = " + acir(inline) fn main f0 { + b0(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b1() + b1(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b2() + b2(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b3() + b3(): + jmp b4() + b4(): + return Field 0 + } + "; + // The final block count is not 1 because unrolling creates some unnecessary jmps. // If a simplify cfg pass is ran afterward, the expected block count will be 1. - let (ssa, errors) = try_to_unroll_loops(ssa); + let (ssa, errors) = try_unroll_loops(ssa); assert_eq!(errors.len(), 0, "All loops should be unrolled"); assert_eq!(ssa.main().reachable_blocks().len(), 5); + + assert_normalized_ssa_equals(ssa, expected); } // Test that the pass can still be run on loops which fail to unroll properly #[test] fn fail_to_unroll_loop() { - // fn main f0 { - // b0(v0: Field): - // jmp b1(v0) - // b1(v1: Field): - // v2 = lt v1, 5 - // jmpif v2, then: b2, else: b3 - // b2(): - // v3 = add v1, Field 1 - // jmp b1(v3) - // b3(): - // return Field 0 - // } - let main_id = Id::test_new(0); - let mut builder = FunctionBuilder::new("main".into(), main_id); + let src = " + acir(inline) fn main f0 { + b0(v0: Field): + jmp b1(v0) + b1(v1: Field): + v2 = lt v1, Field 5 + jmpif v2 then: b2, else: b3 + b2(): + v3 = add v1, Field 1 + jmp b1(v3) + b3(): + return Field 0 + } + "; + let ssa = Ssa::from_str(src).unwrap(); + + // Sanity check + assert_eq!(ssa.main().reachable_blocks().len(), 4); + + // Expected that we failed to unroll the loop + let (_, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 1, "Expected to fail to unroll loop"); + } - let b1 = builder.insert_block(); - let b2 = builder.insert_block(); - let b3 = builder.insert_block(); + #[test] + fn test_get_const_bounds() { + let ssa = brillig_unroll_test_case(); + let function = ssa.main(); + let loops = Loops::find_all(function); + assert_eq!(loops.yet_to_unroll.len(), 1); + + let (lower, upper) = loops.yet_to_unroll[0] + .get_const_bounds(function, &loops.cfg) + .expect("should find bounds") + .expect("bounds are numeric const"); + + assert_eq!(lower, FieldElement::from(0u32)); + assert_eq!(upper, FieldElement::from(4u32)); + } - let v0 = builder.add_parameter(Type::field()); - let v1 = builder.add_block_parameter(b1, Type::field()); + #[test] + fn test_find_pre_header_reference_values() { + let ssa = brillig_unroll_test_case(); + let function = ssa.main(); + let mut loops = Loops::find_all(function); + let loop0 = loops.yet_to_unroll.pop().unwrap(); + + let refs = loop0.find_pre_header_reference_values(function, &loops.cfg).unwrap(); + assert_eq!(refs.len(), 1); + assert!(refs.contains(&ValueId::new(2))); + + let (loads, stores) = loop0.count_loads_and_stores(function, &refs); + assert_eq!(loads, 1); + assert_eq!(stores, 1); + + let all = loop0.count_all_instructions(function); + assert_eq!(all, 7); + } - builder.terminate_with_jmp(b1, vec![v0]); + #[test] + fn test_boilerplate_stats() { + let ssa = brillig_unroll_test_case(); + let stats = loop0_stats(&ssa); + assert_eq!(stats.iterations, 4); + assert_eq!(stats.all_instructions, 2 + 5); // Instructions in b1 and b3 + assert_eq!(stats.increments, 1); + assert_eq!(stats.loads, 1); + assert_eq!(stats.stores, 1); + assert_eq!(stats.useful_instructions(), 1); // Adding to sum + assert_eq!(stats.baseline_instructions(), 8); + assert!(stats.is_small()); + } - builder.switch_to_block(b1); - let five = builder.field_constant(5u128); - let v2 = builder.insert_binary(v1, BinaryOp::Lt, five); - builder.terminate_with_jmpif(v2, b2, b3); + #[test] + fn test_boilerplate_stats_6470() { + let ssa = brillig_unroll_test_case_6470(3); + let stats = loop0_stats(&ssa); + assert_eq!(stats.iterations, 3); + assert_eq!(stats.all_instructions, 2 + 8); // Instructions in b1 and b3 + assert_eq!(stats.increments, 2); + assert_eq!(stats.loads, 1); + assert_eq!(stats.stores, 1); + assert_eq!(stats.useful_instructions(), 3); // array get, add, array set + assert_eq!(stats.baseline_instructions(), 11); + assert!(stats.is_small()); + } - builder.switch_to_block(b2); - let one = builder.field_constant(1u128); - let v3 = builder.insert_binary(v1, BinaryOp::Add, one); - builder.terminate_with_jmp(b1, vec![v3]); + /// Test that we can unroll a small loop. + #[test] + fn test_brillig_unroll_small_loop() { + let ssa = brillig_unroll_test_case(); + + // Expectation taken by compiling the Noir program as ACIR, + // ie. by removing the `unconstrained` from `main`. + let expected = " + brillig(inline) fn main f0 { + b0(v0: u32): + v1 = allocate -> &mut u32 + store u32 0 at v1 + v3 = load v1 -> u32 + store v3 at v1 + v4 = load v1 -> u32 + v6 = add v4, u32 1 + store v6 at v1 + v7 = load v1 -> u32 + v9 = add v7, u32 2 + store v9 at v1 + v10 = load v1 -> u32 + v12 = add v10, u32 3 + store v12 at v1 + jmp b1() + b1(): + v13 = load v1 -> u32 + v14 = eq v13, v0 + constrain v13 == v0 + return + } + "; - builder.switch_to_block(b3); - let zero = builder.field_constant(0u128); - builder.terminate_with_return(vec![zero]); + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_eq!(ssa.main().reachable_blocks().len(), 2, "The loop should be unrolled"); - let ssa = builder.finish(); - assert_eq!(ssa.main().reachable_blocks().len(), 4); + assert_normalized_ssa_equals(ssa, expected); + } - // Expected that we failed to unroll the loop - let (_, errors) = try_to_unroll_loops(ssa); - assert_eq!(errors.len(), 1, "Expected to fail to unroll loop"); + /// Test that we can unroll the loop in the ticket if we don't have too many iterations. + #[test] + fn test_brillig_unroll_6470_small() { + // Few enough iterations so that we can perform the unroll. + let ssa = brillig_unroll_test_case_6470(3); + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_eq!(ssa.main().reachable_blocks().len(), 2, "The loop should be unrolled"); + + // The IDs are shifted by one compared to what the ACIR version printed. + let expected = " + brillig(inline) fn main f0 { + b0(v0: [u64; 6]): + inc_rc v0 + v2 = make_array [u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] : [u64; 6] + inc_rc v2 + v3 = allocate -> &mut [u64; 6] + store v2 at v3 + v4 = load v3 -> [u64; 6] + v6 = array_get v0, index u32 0 -> u64 + v8 = add v6, u64 1 + v9 = array_set v4, index u32 0, value v8 + store v9 at v3 + v10 = load v3 -> [u64; 6] + v12 = array_get v0, index u32 1 -> u64 + v13 = add v12, u64 1 + v14 = array_set v10, index u32 1, value v13 + store v14 at v3 + v15 = load v3 -> [u64; 6] + v17 = array_get v0, index u32 2 -> u64 + v18 = add v17, u64 1 + v19 = array_set v15, index u32 2, value v18 + store v19 at v3 + jmp b1() + b1(): + v20 = load v3 -> [u64; 6] + dec_rc v0 + return v20 + } + "; + assert_normalized_ssa_equals(ssa, expected); + } + + /// Test that with more iterations it's not unrolled. + #[test] + fn test_brillig_unroll_6470_large() { + // More iterations than it can unroll + let parse_ssa = || brillig_unroll_test_case_6470(6); + let ssa = parse_ssa(); + let stats = loop0_stats(&ssa); + assert!(!stats.is_small(), "the loop should be considered large"); + + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_normalized_ssa_equals(ssa, parse_ssa().to_string().as_str()); + } + + /// Test that `break` and `continue` stop unrolling without any panic. + #[test] + fn test_brillig_unroll_break_and_continue() { + // unconstrained fn main() { + // let mut count = 0; + // for i in 0..10 { + // if i == 2 { + // continue; + // } + // if i == 5 { + // break; + // } + // count += 1; + // } + // assert(count == 4); + // } + let src = " + brillig(inline) fn main f0 { + b0(): + v1 = allocate -> &mut Field + store Field 0 at v1 + jmp b1(u32 0) + b1(v0: u32): + v5 = lt v0, u32 10 + jmpif v5 then: b2, else: b6 + b2(): + v7 = eq v0, u32 2 + jmpif v7 then: b7, else: b3 + b7(): + v18 = add v0, u32 1 + jmp b1(v18) + b3(): + v9 = eq v0, u32 5 + jmpif v9 then: b5, else: b4 + b5(): + jmp b6() + b6(): + v15 = load v1 -> Field + v17 = eq v15, Field 4 + constrain v15 == Field 4 + return + b4(): + v10 = load v1 -> Field + v12 = add v10, Field 1 + store v12 at v1 + v14 = add v0, u32 1 + jmp b1(v14) + } + "; + let ssa = Ssa::from_str(src).unwrap(); + let (ssa, errors) = try_unroll_loops(ssa); + assert_eq!(errors.len(), 0, "Unroll should have no errors"); + assert_normalized_ssa_equals(ssa, src); + } + + /// Simple test loop: + /// ```text + /// unconstrained fn main(sum: u32) { + /// assert(loop(0, 4) == sum); + /// } + /// + /// fn loop(from: u32, to: u32) -> u32 { + /// let mut sum = 0; + /// for i in from..to { + /// sum = sum + i; + /// } + /// sum + /// } + /// ``` + /// We can check what the ACIR unrolling behavior would be by + /// removing the `unconstrained` from the `main` function and + /// compiling the program with `nargo --test-program . compile --show-ssa`. + fn brillig_unroll_test_case() -> Ssa { + let src = " + // After `static_assert` and `assert_constant`: + brillig(inline) fn main f0 { + b0(v0: u32): + v2 = allocate -> &mut u32 + store u32 0 at v2 + jmp b1(u32 0) + b1(v1: u32): + v5 = lt v1, u32 4 + jmpif v5 then: b3, else: b2 + b3(): + v8 = load v2 -> u32 + v9 = add v8, v1 + store v9 at v2 + v11 = add v1, u32 1 + jmp b1(v11) + b2(): + v6 = load v2 -> u32 + v7 = eq v6, v0 + constrain v6 == v0 + return + } + "; + Ssa::from_str(src).unwrap() + } + + /// Test case from #6470: + /// ```text + /// unconstrained fn __validate_gt_remainder(a_u60: [u64; 6]) -> [u64; 6] { + /// let mut result_u60: [u64; 6] = [0; 6]; + /// + /// for i in 0..6 { + /// result_u60[i] = a_u60[i] + 1; + /// } + /// + /// result_u60 + /// } + /// ``` + /// The `num_iterations` parameter can be used to make it more costly to inline. + fn brillig_unroll_test_case_6470(num_iterations: usize) -> Ssa { + let src = format!( + " + // After `static_assert` and `assert_constant`: + brillig(inline) fn main f0 {{ + b0(v0: [u64; 6]): + inc_rc v0 + v3 = make_array [u64 0, u64 0, u64 0, u64 0, u64 0, u64 0] : [u64; 6] + inc_rc v3 + v4 = allocate -> &mut [u64; 6] + store v3 at v4 + jmp b1(u32 0) + b1(v1: u32): + v7 = lt v1, u32 {num_iterations} + jmpif v7 then: b3, else: b2 + b3(): + v9 = load v4 -> [u64; 6] + v10 = array_get v0, index v1 -> u64 + v12 = add v10, u64 1 + v13 = array_set v9, index v1, value v12 + v15 = add v1, u32 1 + store v13 at v4 + v16 = add v1, u32 1 // duplicate + jmp b1(v16) + b2(): + v8 = load v4 -> [u64; 6] + dec_rc v0 + return v8 + }} + " + ); + Ssa::from_str(&src).unwrap() + } + + // Boilerplate stats of the first loop in the SSA. + fn loop0_stats(ssa: &Ssa) -> BoilerplateStats { + let function = ssa.main(); + let mut loops = Loops::find_all(function); + let loop0 = loops.yet_to_unroll.pop().expect("there should be a loop"); + loop0.boilerplate_stats(function, &loops.cfg).expect("there should be stats") } } diff --git a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs index 9ca4f52cb14..552ac0781c7 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/into_ssa.rs @@ -25,7 +25,11 @@ struct Translator { /// Maps block names to their IDs blocks: HashMap>, - /// Maps variable names to their IDs + /// Maps variable names to their IDs. + /// + /// This is necessary because the SSA we parse might have undergone some + /// passes already which replaced some of the original IDs. The translator + /// will recreate the SSA step by step, which can result in a new ID layout. variables: HashMap>, } @@ -307,7 +311,13 @@ impl Translator { } fn finish(self) -> Ssa { - self.builder.finish() + let mut ssa = self.builder.finish(); + // Normalize the IDs so we have a better chance of matching the SSA we parsed + // after the step-by-step reconstruction done during translation. This assumes + // that the SSA we parsed was printed by the `SsaBuilder`, which normalizes + // before each print. + ssa.normalize_ids(); + ssa } fn current_function_id(&self) -> FunctionId { diff --git a/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/compiler/noirc_evaluator/src/ssa/parser/tests.rs index f318e317473..60d398bf9d5 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -106,8 +106,8 @@ fn test_multiple_blocks_and_jmp() { acir(inline) fn main f0 { b0(): jmp b1(Field 1) - b1(v1: Field): - return v1 + b1(v0: Field): + return v0 } "; assert_ssa_roundtrip(src); @@ -118,11 +118,11 @@ fn test_jmpif() { let src = " acir(inline) fn main f0 { b0(v0: Field): - jmpif v0 then: b1, else: b2 - b1(): - return + jmpif v0 then: b2, else: b1 b2(): return + b1(): + return } "; assert_ssa_roundtrip(src); diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index aac6e33bf5b..38a0565866f 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -466,6 +466,7 @@ impl<'a> FunctionContext<'a> { /// /// For example, the loop `for i in start .. end { body }` is codegen'd as: /// + /// ```text /// v0 = ... codegen start ... /// v1 = ... codegen end ... /// br loop_entry(v0) @@ -478,6 +479,7 @@ impl<'a> FunctionContext<'a> { /// br loop_entry(v4) /// loop_end(): /// ... This is the current insert point after codegen_for finishes ... + /// ``` fn codegen_for(&mut self, for_expr: &ast::For) -> Result { let loop_entry = self.builder.insert_block(); let loop_body = self.builder.insert_block(); @@ -529,6 +531,7 @@ impl<'a> FunctionContext<'a> { /// /// For example, the expression `if cond { a } else { b }` is codegen'd as: /// + /// ```text /// v0 = ... codegen cond ... /// brif v0, then: then_block, else: else_block /// then_block(): @@ -539,16 +542,19 @@ impl<'a> FunctionContext<'a> { /// br end_if(v2) /// end_if(v3: ?): // Type of v3 matches the type of a and b /// ... This is the current insert point after codegen_if finishes ... + /// ``` /// /// As another example, the expression `if cond { a }` is codegen'd as: /// + /// ```text /// v0 = ... codegen cond ... - /// brif v0, then: then_block, else: end_block + /// brif v0, then: then_block, else: end_if /// then_block: /// v1 = ... codegen a ... /// br end_if() /// end_if: // No block parameter is needed. Without an else, the unit value is always returned. /// ... This is the current insert point after codegen_if finishes ... + /// ``` fn codegen_if(&mut self, if_expr: &ast::If) -> Result { let condition = self.codegen_non_tuple_expression(&if_expr.condition)?; From 7dd71c15cbcbf025ba049b506c94924903b32754 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Tue, 19 Nov 2024 16:47:03 -0500 Subject: [PATCH 026/109] feat: Sync from aztec-packages (#6557) Co-authored-by: Tom French --- .aztec-sync-commit | 2 +- acvm-repo/acir/README.md | 14 +- acvm-repo/acir/benches/serialization.rs | 1 - acvm-repo/acir/codegen/acir.cpp | 274 +----------------- acvm-repo/acir/src/circuit/directives.rs | 11 - acvm-repo/acir/src/circuit/mod.rs | 20 +- acvm-repo/acir/src/circuit/opcodes.rs | 29 +- acvm-repo/acir/src/lib.rs | 2 - .../acir/tests/test_program_serialization.rs | 163 ++++++----- acvm-repo/acvm/Cargo.toml | 3 +- .../optimizers/constant_backpropagation.rs | 34 +-- .../compiler/optimizers/merge_expressions.rs | 4 - .../compiler/optimizers/redundant_range.rs | 1 - acvm-repo/acvm/src/compiler/simulator.rs | 13 - .../acvm/src/compiler/transformers/mod.rs | 13 +- acvm-repo/acvm/src/pwg/brillig.rs | 24 +- acvm-repo/acvm/src/pwg/directives/mod.rs | 49 ---- acvm-repo/acvm/src/pwg/mod.rs | 106 ++----- acvm-repo/acvm/tests/solver.rs | 106 ++++--- acvm-repo/acvm_js/test/shared/addition.ts | 8 +- .../test/shared/complex_foreign_call.ts | 16 +- acvm-repo/acvm_js/test/shared/foreign_call.ts | 10 +- acvm-repo/acvm_js/test/shared/memory_op.ts | 8 +- .../acvm_js/test/shared/multi_scalar_mul.ts | 6 +- .../acvm_js/test/shared/nested_acir_call.ts | 14 +- .../acvm_js/test/shared/schnorr_verify.ts | 28 +- acvm-repo/brillig/src/lib.rs | 1 - acvm-repo/brillig/src/opcodes.rs | 3 +- acvm-repo/brillig_vm/src/lib.rs | 67 +++-- compiler/integration-tests/package.json | 2 +- .../scripts/codegen-verifiers.sh | 4 +- .../test/browser/recursion.test.ts | 6 +- .../onchain_recursive_verification.test.ts | 8 +- .../test/node/smart_contract_verifier.test.ts | 2 +- compiler/noirc_driver/src/abi_gen.rs | 31 +- compiler/noirc_evaluator/Cargo.toml | 2 +- .../noirc_evaluator/src/acir/acir_variable.rs | 15 +- .../src/acir/brillig_directive.rs | 153 +++++++++- .../src/acir/generated_acir.rs | 96 ++++-- compiler/noirc_evaluator/src/acir/mod.rs | 35 ++- .../src/brillig/brillig_gen/brillig_block.rs | 4 +- .../noirc_evaluator/src/brillig/brillig_ir.rs | 7 +- .../src/brillig/brillig_ir/artifact.rs | 51 ++-- .../src/brillig/brillig_ir/codegen_calls.rs | 2 +- .../brillig_ir/codegen_control_flow.rs | 49 +++- .../src/brillig/brillig_ir/debug_show.rs | 21 +- .../src/brillig/brillig_ir/entry_point.rs | 12 +- .../src/brillig/brillig_ir/instructions.rs | 22 +- .../src/brillig/brillig_ir/procedures/mod.rs | 25 +- .../procedures/revert_with_string.rs | 23 ++ compiler/noirc_evaluator/src/lib.rs | 1 + compiler/noirc_evaluator/src/ssa.rs | 30 +- .../src/ssa/function_builder/mod.rs | 7 +- .../noirc_evaluator/src/ssa/ir/instruction.rs | 47 +-- .../noirc_evaluator/src/ssa/ir/printer.rs | 9 +- compiler/noirc_evaluator/src/ssa/opt/die.rs | 6 +- .../noirc_evaluator/src/ssa/ssa_gen/mod.rs | 31 +- .../tests/arithmetic_generics.txt | 7 + compiler/noirc_frontend/src/ast/function.rs | 4 +- .../noirc_frontend/src/elaborator/lints.rs | 15 +- compiler/noirc_frontend/src/elaborator/mod.rs | 7 +- .../src/hir/resolution/errors.rs | 20 +- compiler/noirc_frontend/src/lexer/token.rs | 3 - .../src/monomorphization/ast.rs | 4 - .../src/monomorphization/mod.rs | 8 +- .../src/parser/parser/attributes.rs | 11 - compiler/noirc_frontend/src/tests.rs | 12 +- docs/docs/how_to/how-to-recursion.md | 4 +- docs/docs/noir/standard_library/recursion.mdx | 18 -- .../getting_started/quick_start.md | 3 +- .../recursion/generate_recursive_proof.sh | 12 +- examples/recursion/recurse_leaf/src/main.nr | 3 +- examples/recursion/sum/src/main.nr | 3 +- noir_stdlib/src/collections/bounded_vec.nr | 12 +- scripts/install_bb.sh | 2 +- .../recursive_method/Nargo.toml | 6 - .../recursive_method/src/main.nr | 6 - .../databus_mapping_regression/src/main.nr | 2 +- .../assert_statement_recursive/Nargo.toml | 7 - .../assert_statement_recursive/Prover.toml | 2 - .../assert_statement_recursive/src/main.nr | 11 - .../Nargo.toml | 3 +- .../double_verify_honk_proof/Prover.toml | 5 + .../double_verify_honk_proof/src/main.nr | 28 ++ .../double_verify_proof/src/main.nr | 2 +- .../double_verify_proof_recursive/Prover.toml | 5 - .../double_verify_proof_recursive/src/main.nr | 18 -- .../single_verify_proof/src/main.nr | 2 +- .../verify_honk_proof/Nargo.toml | 6 + .../verify_honk_proof/Prover.toml | 4 + .../verify_honk_proof/src/main.nr | 21 ++ test_programs/gates_report.sh | 2 +- tooling/debugger/src/context.rs | 69 ++--- tooling/fuzzer/src/dictionary/mod.rs | 6 +- tooling/nargo_fmt/src/formatter/attribute.rs | 3 +- tooling/noir_js_types/src/types.ts | 3 +- tooling/noirc_abi/src/lib.rs | 8 + tooling/noirc_abi_wasm/src/lib.rs | 1 + .../test/shared/decode_error.ts | 6 + tooling/profiler/src/opcode_formatter.rs | 11 +- yarn.lock | 10 +- 101 files changed, 954 insertions(+), 1172 deletions(-) delete mode 100644 acvm-repo/acir/src/circuit/directives.rs delete mode 100644 acvm-repo/acvm/src/pwg/directives/mod.rs create mode 100644 compiler/noirc_evaluator/src/brillig/brillig_ir/procedures/revert_with_string.rs create mode 100644 compiler/noirc_frontend/proptest-regressions/tests/arithmetic_generics.txt delete mode 100644 test_programs/compile_success_contract/recursive_method/Nargo.toml delete mode 100644 test_programs/compile_success_contract/recursive_method/src/main.nr delete mode 100644 test_programs/execution_success/assert_statement_recursive/Nargo.toml delete mode 100644 test_programs/execution_success/assert_statement_recursive/Prover.toml delete mode 100644 test_programs/execution_success/assert_statement_recursive/src/main.nr rename test_programs/execution_success/{double_verify_proof_recursive => double_verify_honk_proof}/Nargo.toml (57%) create mode 100644 test_programs/execution_success/double_verify_honk_proof/Prover.toml create mode 100644 test_programs/execution_success/double_verify_honk_proof/src/main.nr delete mode 100644 test_programs/execution_success/double_verify_proof_recursive/Prover.toml delete mode 100644 test_programs/execution_success/double_verify_proof_recursive/src/main.nr create mode 100644 test_programs/execution_success/verify_honk_proof/Nargo.toml create mode 100644 test_programs/execution_success/verify_honk_proof/Prover.toml create mode 100644 test_programs/execution_success/verify_honk_proof/src/main.nr diff --git a/.aztec-sync-commit b/.aztec-sync-commit index 8cfda4ff013..dc4d9c58bda 100644 --- a/.aztec-sync-commit +++ b/.aztec-sync-commit @@ -1 +1 @@ -d9de430e4a01d6908a9b1fe5e6ede9309aa8a10d +58761fcf181d0a26c4e387105b1bc8a3a75b0368 diff --git a/acvm-repo/acir/README.md b/acvm-repo/acir/README.md index 902769e3ad7..a0e7094b4d6 100644 --- a/acvm-repo/acir/README.md +++ b/acvm-repo/acir/README.md @@ -77,7 +77,7 @@ In summary, the workflow is the following: 1. user program -> (compilation) ACIR, a list of opcodes which constrain (partial) witnesses 2. user inputs + ACIR -> (execution/solving) assign values to all the - (partial) witnesses + (partial) witnesses 3. witness assignment + ACIR -> (proving system) proof Although the ordering of opcode does not matter in theory, since a system of @@ -121,7 +121,7 @@ proving system and are only used by the solver. Finally, some opcodes will have a predicate, whose value is `0` or `1`. Its purpose is to nullify the opcode when the value is `0`, so that it has no effect. Note that removing the opcode is not a solution because this modifies -the circuit (the circuit being mainly the list of the opcodes). +the circuit (the circuit being mainly the list of the opcodes). *Remark*: Opcodes operate on witnesses, but we will see that some opcode work on expressions of witnesses. We call an expression a linear combination of @@ -265,16 +265,6 @@ without adding any constraint. NOTE: see the [circuit/opcodes.rs](src/circuit/opcodes.rs) file for the most up-to-date documentation on these opcodes. -#### Directive - -This opcode is a specialization of Brillig opcode. Instead of having some generic -assembly code like Brillig, a directive has a hardcoded name which tells the -solver which computation to do: with Brillig, the computation refers to the -compiled bytecode of an unconstrained Noir function, but with a directive, the -computation is hardcoded inside the compiler. - -Directives will be replaced by Brillig opcodes in the future. - #### MemoryOp: memory abstraction for ACIR ACIR is able to address any array of witnesses. Each array is assigned an ID diff --git a/acvm-repo/acir/benches/serialization.rs b/acvm-repo/acir/benches/serialization.rs index 792200c8912..dd6a5c8b1cf 100644 --- a/acvm-repo/acir/benches/serialization.rs +++ b/acvm-repo/acir/benches/serialization.rs @@ -38,7 +38,6 @@ fn sample_program(num_opcodes: usize) -> Program { public_parameters: PublicInputs(BTreeSet::from([Witness(5)])), return_values: PublicInputs(BTreeSet::from([Witness(6)])), assert_messages: Vec::new(), - recursive: false, }], unconstrained_functions: Vec::new(), } diff --git a/acvm-repo/acir/codegen/acir.cpp b/acvm-repo/acir/codegen/acir.cpp index 0880b5a0cbe..2ae9a31d6ca 100644 --- a/acvm-repo/acir/codegen/acir.cpp +++ b/acvm-repo/acir/codegen/acir.cpp @@ -702,8 +702,7 @@ namespace Program { }; struct Stop { - uint64_t return_data_offset; - uint64_t return_data_size; + Program::HeapVector return_data; friend bool operator==(const Stop&, const Stop&); std::vector bincodeSerialize() const; @@ -1086,25 +1085,6 @@ namespace Program { static BrilligOutputs bincodeDeserialize(std::vector); }; - struct Directive { - - struct ToLeRadix { - Program::Expression a; - std::vector b; - uint32_t radix; - - friend bool operator==(const ToLeRadix&, const ToLeRadix&); - std::vector bincodeSerialize() const; - static ToLeRadix bincodeDeserialize(std::vector); - }; - - std::variant value; - - friend bool operator==(const Directive&, const Directive&); - std::vector bincodeSerialize() const; - static Directive bincodeDeserialize(std::vector); - }; - struct MemOp { Program::Expression operation; Program::Expression index; @@ -1133,14 +1113,6 @@ namespace Program { static BlackBoxFuncCall bincodeDeserialize(std::vector); }; - struct Directive { - Program::Directive value; - - friend bool operator==(const Directive&, const Directive&); - std::vector bincodeSerialize() const; - static Directive bincodeDeserialize(std::vector); - }; - struct MemoryOp { Program::BlockId block_id; Program::MemOp op; @@ -1183,7 +1155,7 @@ namespace Program { static Call bincodeDeserialize(std::vector); }; - std::variant value; + std::variant value; friend bool operator==(const Opcode&, const Opcode&); std::vector bincodeSerialize() const; @@ -1216,24 +1188,8 @@ namespace Program { }; struct AssertionPayload { - - struct StaticString { - std::string value; - - friend bool operator==(const StaticString&, const StaticString&); - std::vector bincodeSerialize() const; - static StaticString bincodeDeserialize(std::vector); - }; - - struct Dynamic { - std::tuple> value; - - friend bool operator==(const Dynamic&, const Dynamic&); - std::vector bincodeSerialize() const; - static Dynamic bincodeDeserialize(std::vector); - }; - - std::variant value; + uint64_t error_selector; + std::vector payload; friend bool operator==(const AssertionPayload&, const AssertionPayload&); std::vector bincodeSerialize() const; @@ -1305,7 +1261,6 @@ namespace Program { Program::PublicInputs public_parameters; Program::PublicInputs return_values; std::vector> assert_messages; - bool recursive; friend bool operator==(const Circuit&, const Circuit&); std::vector bincodeSerialize() const; @@ -1335,7 +1290,8 @@ namespace Program { namespace Program { inline bool operator==(const AssertionPayload &lhs, const AssertionPayload &rhs) { - if (!(lhs.value == rhs.value)) { return false; } + if (!(lhs.error_selector == rhs.error_selector)) { return false; } + if (!(lhs.payload == rhs.payload)) { return false; } return true; } @@ -1360,7 +1316,8 @@ template <> template void serde::Serializable::serialize(const Program::AssertionPayload &obj, Serializer &serializer) { serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); + serde::Serializable::serialize(obj.error_selector, serializer); + serde::Serializable::serialize(obj.payload, serializer); serializer.decrease_container_depth(); } @@ -1369,87 +1326,12 @@ template Program::AssertionPayload serde::Deserializable::deserialize(Deserializer &deserializer) { deserializer.increase_container_depth(); Program::AssertionPayload obj; - obj.value = serde::Deserializable::deserialize(deserializer); + obj.error_selector = serde::Deserializable::deserialize(deserializer); + obj.payload = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; } -namespace Program { - - inline bool operator==(const AssertionPayload::StaticString &lhs, const AssertionPayload::StaticString &rhs) { - if (!(lhs.value == rhs.value)) { return false; } - return true; - } - - inline std::vector AssertionPayload::StaticString::bincodeSerialize() const { - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); - } - - inline AssertionPayload::StaticString AssertionPayload::StaticString::bincodeDeserialize(std::vector input) { - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw serde::deserialization_error("Some input bytes were not read"); - } - return value; - } - -} // end of namespace Program - -template <> -template -void serde::Serializable::serialize(const Program::AssertionPayload::StaticString &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.value, serializer); -} - -template <> -template -Program::AssertionPayload::StaticString serde::Deserializable::deserialize(Deserializer &deserializer) { - Program::AssertionPayload::StaticString obj; - obj.value = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Program { - - inline bool operator==(const AssertionPayload::Dynamic &lhs, const AssertionPayload::Dynamic &rhs) { - if (!(lhs.value == rhs.value)) { return false; } - return true; - } - - inline std::vector AssertionPayload::Dynamic::bincodeSerialize() const { - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); - } - - inline AssertionPayload::Dynamic AssertionPayload::Dynamic::bincodeDeserialize(std::vector input) { - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw serde::deserialization_error("Some input bytes were not read"); - } - return value; - } - -} // end of namespace Program - -template <> -template -void serde::Serializable::serialize(const Program::AssertionPayload::Dynamic &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.value, serializer); -} - -template <> -template -Program::AssertionPayload::Dynamic serde::Deserializable::deserialize(Deserializer &deserializer) { - Program::AssertionPayload::Dynamic obj; - obj.value = serde::Deserializable::deserialize(deserializer); - return obj; -} - namespace Program { inline bool operator==(const BinaryFieldOp &lhs, const BinaryFieldOp &rhs) { @@ -5423,8 +5305,7 @@ Program::BrilligOpcode::Trap serde::Deserializable namespace Program { inline bool operator==(const BrilligOpcode::Stop &lhs, const BrilligOpcode::Stop &rhs) { - if (!(lhs.return_data_offset == rhs.return_data_offset)) { return false; } - if (!(lhs.return_data_size == rhs.return_data_size)) { return false; } + if (!(lhs.return_data == rhs.return_data)) { return false; } return true; } @@ -5448,16 +5329,14 @@ namespace Program { template <> template void serde::Serializable::serialize(const Program::BrilligOpcode::Stop &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.return_data_offset, serializer); - serde::Serializable::serialize(obj.return_data_size, serializer); + serde::Serializable::serialize(obj.return_data, serializer); } template <> template Program::BrilligOpcode::Stop serde::Deserializable::deserialize(Deserializer &deserializer) { Program::BrilligOpcode::Stop obj; - obj.return_data_offset = serde::Deserializable::deserialize(deserializer); - obj.return_data_size = serde::Deserializable::deserialize(deserializer); + obj.return_data = serde::Deserializable::deserialize(deserializer); return obj; } @@ -5589,7 +5468,6 @@ namespace Program { if (!(lhs.public_parameters == rhs.public_parameters)) { return false; } if (!(lhs.return_values == rhs.return_values)) { return false; } if (!(lhs.assert_messages == rhs.assert_messages)) { return false; } - if (!(lhs.recursive == rhs.recursive)) { return false; } return true; } @@ -5621,7 +5499,6 @@ void serde::Serializable::serialize(const Program::Circuit &ob serde::Serializable::serialize(obj.public_parameters, serializer); serde::Serializable::serialize(obj.return_values, serializer); serde::Serializable::serialize(obj.assert_messages, serializer); - serde::Serializable::serialize(obj.recursive, serializer); serializer.decrease_container_depth(); } @@ -5637,7 +5514,6 @@ Program::Circuit serde::Deserializable::deserialize(Deserializ obj.public_parameters = serde::Deserializable::deserialize(deserializer); obj.return_values = serde::Deserializable::deserialize(deserializer); obj.assert_messages = serde::Deserializable::deserialize(deserializer); - obj.recursive = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; } @@ -5760,92 +5636,6 @@ Program::ConstantOrWitnessEnum::Witness serde::Deserializable Directive::bincodeSerialize() const { - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); - } - - inline Directive Directive::bincodeDeserialize(std::vector input) { - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw serde::deserialization_error("Some input bytes were not read"); - } - return value; - } - -} // end of namespace Program - -template <> -template -void serde::Serializable::serialize(const Program::Directive &obj, Serializer &serializer) { - serializer.increase_container_depth(); - serde::Serializable::serialize(obj.value, serializer); - serializer.decrease_container_depth(); -} - -template <> -template -Program::Directive serde::Deserializable::deserialize(Deserializer &deserializer) { - deserializer.increase_container_depth(); - Program::Directive obj; - obj.value = serde::Deserializable::deserialize(deserializer); - deserializer.decrease_container_depth(); - return obj; -} - -namespace Program { - - inline bool operator==(const Directive::ToLeRadix &lhs, const Directive::ToLeRadix &rhs) { - if (!(lhs.a == rhs.a)) { return false; } - if (!(lhs.b == rhs.b)) { return false; } - if (!(lhs.radix == rhs.radix)) { return false; } - return true; - } - - inline std::vector Directive::ToLeRadix::bincodeSerialize() const { - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); - } - - inline Directive::ToLeRadix Directive::ToLeRadix::bincodeDeserialize(std::vector input) { - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw serde::deserialization_error("Some input bytes were not read"); - } - return value; - } - -} // end of namespace Program - -template <> -template -void serde::Serializable::serialize(const Program::Directive::ToLeRadix &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.a, serializer); - serde::Serializable::serialize(obj.b, serializer); - serde::Serializable::serialize(obj.radix, serializer); -} - -template <> -template -Program::Directive::ToLeRadix serde::Deserializable::deserialize(Deserializer &deserializer) { - Program::Directive::ToLeRadix obj; - obj.a = serde::Deserializable::deserialize(deserializer); - obj.b = serde::Deserializable::deserialize(deserializer); - obj.radix = serde::Deserializable::deserialize(deserializer); - return obj; -} - namespace Program { inline bool operator==(const Expression &lhs, const Expression &rhs) { @@ -6957,44 +6747,6 @@ Program::Opcode::BlackBoxFuncCall serde::Deserializable Opcode::Directive::bincodeSerialize() const { - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); - } - - inline Opcode::Directive Opcode::Directive::bincodeDeserialize(std::vector input) { - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw serde::deserialization_error("Some input bytes were not read"); - } - return value; - } - -} // end of namespace Program - -template <> -template -void serde::Serializable::serialize(const Program::Opcode::Directive &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.value, serializer); -} - -template <> -template -Program::Opcode::Directive serde::Deserializable::deserialize(Deserializer &deserializer) { - Program::Opcode::Directive obj; - obj.value = serde::Deserializable::deserialize(deserializer); - return obj; -} - namespace Program { inline bool operator==(const Opcode::MemoryOp &lhs, const Opcode::MemoryOp &rhs) { diff --git a/acvm-repo/acir/src/circuit/directives.rs b/acvm-repo/acir/src/circuit/directives.rs deleted file mode 100644 index 3bc66288590..00000000000 --- a/acvm-repo/acir/src/circuit/directives.rs +++ /dev/null @@ -1,11 +0,0 @@ -use crate::native_types::{Expression, Witness}; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -/// Directives do not apply any constraints. -/// You can think of them as opcodes that allow one to use non-determinism -/// In the future, this can be replaced with asm non-determinism blocks -pub enum Directive { - //decomposition of a: a=\sum b[i]*radix^i where b is an array of witnesses < radix in little endian form - ToLeRadix { a: Expression, b: Vec, radix: u32 }, -} diff --git a/acvm-repo/acir/src/circuit/mod.rs b/acvm-repo/acir/src/circuit/mod.rs index f700fefe0cd..33982065c2a 100644 --- a/acvm-repo/acir/src/circuit/mod.rs +++ b/acvm-repo/acir/src/circuit/mod.rs @@ -1,6 +1,5 @@ pub mod black_box_functions; pub mod brillig; -pub mod directives; pub mod opcodes; use crate::native_types::{Expression, Witness}; @@ -68,11 +67,6 @@ pub struct Circuit { // c++ code at the moment when it is, due to OpcodeLocation needing a comparison // implementation which is never generated. pub assert_messages: Vec<(OpcodeLocation, AssertionPayload)>, - - /// States whether the backend should use a SNARK recursion friendly prover. - /// If implemented by a backend, this means that proofs generated with this circuit - /// will be friendly for recursively verifying inside of another SNARK. - pub recursive: bool, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -82,9 +76,9 @@ pub enum ExpressionOrMemory { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum AssertionPayload { - StaticString(String), - Dynamic(/* error_selector */ u64, Vec>), +pub struct AssertionPayload { + pub error_selector: u64, + pub payload: Vec>, } #[derive(Debug, Copy, PartialEq, Eq, Hash, Clone, PartialOrd, Ord)] @@ -120,12 +114,6 @@ impl<'de> Deserialize<'de> for ErrorSelector { } } -/// This selector indicates that the payload is a string. -/// This is used to parse any error with a string payload directly, -/// to avoid users having to parse the error externally to the ACVM. -/// Only non-string errors need to be parsed externally to the ACVM using the circuit ABI. -pub const STRING_ERROR_SELECTOR: ErrorSelector = ErrorSelector(0); - #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] pub struct RawAssertionPayload { pub selector: ErrorSelector, @@ -445,7 +433,6 @@ mod tests { public_parameters: PublicInputs(BTreeSet::from_iter(vec![Witness(2), Witness(12)])), return_values: PublicInputs(BTreeSet::from_iter(vec![Witness(4), Witness(12)])), assert_messages: Default::default(), - recursive: false, }; let program = Program { functions: vec![circuit], unconstrained_functions: Vec::new() }; @@ -481,7 +468,6 @@ mod tests { public_parameters: PublicInputs(BTreeSet::from_iter(vec![Witness(2)])), return_values: PublicInputs(BTreeSet::from_iter(vec![Witness(2)])), assert_messages: Default::default(), - recursive: false, }; let program = Program { functions: vec![circuit], unconstrained_functions: Vec::new() }; diff --git a/acvm-repo/acir/src/circuit/opcodes.rs b/acvm-repo/acir/src/circuit/opcodes.rs index 848d7bda84b..06effd3c5b6 100644 --- a/acvm-repo/acir/src/circuit/opcodes.rs +++ b/acvm-repo/acir/src/circuit/opcodes.rs @@ -1,7 +1,4 @@ -use super::{ - brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, - directives::Directive, -}; +use super::brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}; pub mod function_id; pub use function_id::AcirFunctionId; @@ -60,7 +57,7 @@ pub enum Opcode { /// specialized constraints. /// /// Often used for exposing more efficient implementations of - /// SNARK-unfriendly computations. + /// SNARK-unfriendly computations. /// /// All black box functions take as input a tuple `(witness, num_bits)`, /// where `num_bits` is a constant representing the bit size of the input @@ -75,16 +72,6 @@ pub enum Opcode { /// embedded curve. BlackBoxFuncCall(BlackBoxFuncCall), - /// This opcode is a specialization of a Brillig opcode. Instead of having - /// some generic assembly code like Brillig, a directive has a hardcoded - /// name which tells the solver which computation to do: with Brillig, the - /// computation refers to the compiled bytecode of an unconstrained Noir - /// function, but with a directive, the computation is hardcoded inside the - /// compiler. - /// - /// Directives will be replaced by Brillig opcodes in the future. - Directive(Directive), - /// Atomic operation on a block of memory /// /// ACIR is able to address any array of witnesses. Each array is assigned @@ -158,18 +145,6 @@ impl std::fmt::Display for Opcode { } Opcode::BlackBoxFuncCall(g) => write!(f, "{g}"), - Opcode::Directive(Directive::ToLeRadix { a, b, radix: _ }) => { - write!(f, "DIR::TORADIX ")?; - write!( - f, - // TODO (Note): this assumes that the decomposed bits have contiguous witness indices - // This should be the case, however, we can also have a function which checks this - "(_{}, [_{}..._{}] )", - a, - b.first().unwrap().witness_index(), - b.last().unwrap().witness_index(), - ) - } Opcode::MemoryOp { block_id, op, predicate } => { write!(f, "MEM ")?; if let Some(pred) = predicate { diff --git a/acvm-repo/acir/src/lib.rs b/acvm-repo/acir/src/lib.rs index f8a31439127..bb5b50c0daf 100644 --- a/acvm-repo/acir/src/lib.rs +++ b/acvm-repo/acir/src/lib.rs @@ -42,7 +42,6 @@ mod reflection { use crate::{ circuit::{ brillig::{BrilligInputs, BrilligOutputs}, - directives::Directive, opcodes::{BlackBoxFuncCall, BlockType, ConstantOrWitnessEnum, FunctionInput}, AssertionPayload, Circuit, ExpressionOrMemory, ExpressionWidth, Opcode, OpcodeLocation, Program, @@ -77,7 +76,6 @@ mod reflection { tracer.trace_simple_type::>().unwrap(); tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::().unwrap(); - tracer.trace_simple_type::>().unwrap(); tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::().unwrap(); tracer.trace_simple_type::>().unwrap(); diff --git a/acvm-repo/acir/tests/test_program_serialization.rs b/acvm-repo/acir/tests/test_program_serialization.rs index a915cb95d07..002bad0e7f3 100644 --- a/acvm-repo/acir/tests/test_program_serialization.rs +++ b/acvm-repo/acir/tests/test_program_serialization.rs @@ -20,7 +20,9 @@ use acir::{ native_types::{Expression, Witness}, }; use acir_field::{AcirField, FieldElement}; -use brillig::{BitSize, HeapArray, HeapValueType, IntegerBitSize, MemoryAddress, ValueOrArray}; +use brillig::{ + BitSize, HeapArray, HeapValueType, HeapVector, IntegerBitSize, MemoryAddress, ValueOrArray, +}; #[test] fn addition_circuit() { @@ -47,11 +49,11 @@ fn addition_circuit() { let expected_serialization: Vec = vec![ 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 65, 14, 128, 32, 12, 4, 65, 124, 80, 75, 91, - 104, 111, 126, 69, 34, 252, 255, 9, 106, 228, 64, 162, 55, 153, 164, 217, 158, 38, 155, - 245, 238, 97, 189, 206, 187, 55, 161, 231, 214, 19, 254, 129, 126, 162, 107, 25, 92, 4, - 137, 185, 230, 88, 145, 112, 135, 104, 69, 5, 88, 74, 82, 84, 20, 149, 35, 42, 81, 85, 214, - 108, 197, 50, 24, 50, 85, 108, 98, 212, 186, 44, 204, 235, 5, 183, 99, 233, 46, 63, 252, - 110, 216, 56, 184, 15, 78, 146, 74, 173, 20, 141, 1, 0, 0, + 104, 111, 126, 69, 34, 252, 255, 9, 106, 228, 64, 194, 81, 38, 105, 182, 167, 201, 102, + 189, 251, 216, 159, 243, 110, 38, 244, 60, 122, 194, 63, 208, 47, 116, 109, 131, 139, 32, + 49, 215, 28, 43, 18, 158, 16, 173, 168, 0, 75, 73, 138, 138, 162, 114, 69, 37, 170, 202, + 154, 173, 88, 6, 67, 166, 138, 77, 140, 90, 151, 133, 117, 189, 224, 117, 108, 221, 229, + 135, 223, 13, 27, 135, 121, 106, 119, 3, 58, 173, 124, 163, 140, 1, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -91,10 +93,10 @@ fn multi_scalar_mul_circuit() { let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 77, 9, 14, 0, 32, 8, 202, 171, 227, 255, 255, 109, - 217, 162, 141, 114, 99, 2, 162, 74, 57, 53, 18, 2, 46, 208, 70, 122, 99, 162, 43, 113, 35, - 239, 102, 157, 230, 1, 94, 19, 45, 209, 145, 11, 202, 43, 238, 56, 249, 133, 254, 255, 187, - 79, 45, 204, 84, 220, 246, 193, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 77, 9, 10, 0, 32, 8, 243, 236, 248, 255, 127, 35, + 163, 5, 35, 97, 184, 205, 169, 42, 183, 102, 65, 193, 21, 218, 73, 31, 44, 116, 35, 238, + 228, 189, 108, 208, 60, 193, 91, 161, 23, 6, 114, 73, 121, 195, 157, 32, 95, 232, 255, 191, + 203, 181, 1, 243, 231, 24, 106, 192, 0, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -134,24 +136,24 @@ fn schnorr_verify_circuit() { let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 85, 211, 103, 78, 2, 81, 24, 70, 225, 193, 130, 96, 239, - 189, 96, 239, 189, 35, 34, 34, 34, 34, 238, 130, 253, 47, 129, 192, 9, 223, 36, 7, 146, - 201, 60, 209, 31, 144, 123, 207, 155, 73, 250, 159, 118, 239, 201, 132, 121, 103, 227, 205, - 211, 137, 247, 144, 60, 220, 123, 114, 225, 17, 121, 84, 206, 202, 99, 114, 78, 206, 203, - 227, 242, 132, 60, 41, 79, 201, 211, 242, 140, 60, 43, 207, 201, 243, 242, 130, 188, 40, - 47, 201, 203, 242, 138, 188, 42, 175, 201, 235, 242, 134, 188, 41, 111, 201, 219, 242, 142, - 92, 144, 119, 229, 61, 121, 95, 62, 144, 15, 229, 35, 249, 88, 62, 145, 79, 229, 51, 249, - 92, 190, 144, 47, 229, 43, 249, 90, 190, 145, 111, 229, 59, 249, 94, 126, 144, 31, 229, 39, - 249, 89, 126, 145, 95, 229, 162, 252, 38, 151, 228, 119, 185, 44, 127, 200, 21, 249, 83, - 174, 134, 233, 52, 137, 191, 125, 233, 255, 53, 249, 91, 174, 203, 63, 114, 67, 254, 149, - 155, 242, 159, 220, 10, 255, 199, 247, 183, 244, 59, 216, 38, 155, 100, 139, 108, 144, 237, - 165, 155, 203, 199, 111, 102, 83, 108, 137, 13, 177, 29, 54, 195, 86, 216, 8, 219, 96, 19, - 108, 129, 13, 208, 62, 205, 211, 58, 141, 211, 54, 77, 211, 50, 13, 211, 46, 205, 22, 146, - 126, 163, 180, 73, 147, 180, 72, 131, 180, 71, 115, 180, 70, 99, 180, 69, 83, 180, 68, 67, - 180, 67, 51, 180, 66, 35, 180, 65, 19, 180, 64, 3, 220, 61, 119, 206, 93, 115, 199, 197, - 184, 211, 82, 220, 97, 57, 238, 172, 18, 119, 84, 141, 187, 168, 197, 217, 215, 227, 172, - 27, 113, 182, 205, 56, 203, 244, 204, 210, 115, 75, 116, 158, 3, 159, 46, 43, 32, 188, 53, - 25, 5, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 211, 103, 78, 2, 81, 24, 70, 225, 193, 130, 96, 239, + 189, 96, 239, 189, 35, 34, 34, 34, 82, 118, 193, 254, 151, 64, 224, 132, 111, 146, 67, 50, + 153, 39, 250, 3, 114, 239, 121, 51, 201, 240, 211, 29, 60, 153, 48, 239, 108, 188, 121, + 122, 241, 30, 145, 71, 7, 79, 46, 60, 38, 143, 203, 89, 121, 66, 206, 201, 121, 121, 82, + 158, 146, 167, 229, 25, 121, 86, 158, 147, 231, 229, 5, 121, 81, 94, 146, 151, 229, 21, + 121, 85, 94, 147, 215, 229, 13, 121, 83, 222, 146, 183, 229, 29, 121, 87, 222, 147, 11, + 242, 190, 124, 32, 31, 202, 71, 242, 177, 124, 34, 159, 202, 103, 242, 185, 124, 33, 95, + 202, 87, 242, 181, 124, 35, 223, 202, 119, 242, 189, 252, 32, 63, 202, 79, 242, 179, 252, + 34, 191, 202, 111, 242, 187, 92, 148, 63, 228, 146, 252, 41, 151, 229, 47, 185, 34, 127, + 203, 213, 48, 157, 38, 241, 183, 31, 253, 191, 38, 255, 202, 117, 249, 79, 110, 200, 255, + 114, 83, 110, 201, 237, 112, 39, 190, 191, 173, 223, 193, 54, 217, 36, 91, 100, 131, 108, + 47, 221, 92, 62, 126, 51, 155, 98, 75, 108, 136, 237, 176, 25, 182, 194, 70, 216, 6, 155, + 96, 11, 108, 128, 246, 105, 158, 214, 105, 156, 182, 105, 154, 150, 105, 152, 118, 105, + 182, 144, 12, 27, 165, 77, 154, 164, 69, 26, 164, 61, 154, 163, 53, 26, 163, 45, 154, 162, + 37, 26, 162, 29, 154, 161, 21, 26, 161, 13, 154, 160, 5, 26, 224, 238, 185, 115, 238, 154, + 59, 46, 198, 157, 150, 226, 14, 203, 113, 103, 149, 184, 163, 106, 220, 69, 45, 206, 190, + 30, 103, 221, 136, 179, 109, 198, 89, 166, 103, 150, 158, 91, 162, 243, 244, 167, 15, 14, + 161, 226, 6, 24, 5, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -162,31 +164,37 @@ fn simple_brillig_foreign_call() { let w_input = Witness(1); let w_inverted = Witness(2); + let value_address = MemoryAddress::direct(0); + let zero_usize = MemoryAddress::direct(1); + let one_usize = MemoryAddress::direct(2); + let brillig_bytecode = BrilligBytecode { bytecode: vec![ brillig::Opcode::Const { - destination: MemoryAddress::direct(0), + destination: zero_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), - value: FieldElement::from(1_usize), + value: FieldElement::from(0_usize), }, brillig::Opcode::Const { - destination: MemoryAddress::direct(1), + destination: one_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), - value: FieldElement::from(0_usize), + value: FieldElement::from(1_usize), }, brillig::Opcode::CalldataCopy { - destination_address: MemoryAddress::direct(0), - size_address: MemoryAddress::direct(0), - offset_address: MemoryAddress::direct(1), + destination_address: value_address, + size_address: one_usize, + offset_address: zero_usize, }, brillig::Opcode::ForeignCall { function: "invert".into(), - destinations: vec![ValueOrArray::MemoryAddress(MemoryAddress::direct(0))], + destinations: vec![ValueOrArray::MemoryAddress(value_address)], destination_value_types: vec![HeapValueType::field()], - inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::direct(0))], + inputs: vec![ValueOrArray::MemoryAddress(value_address)], input_value_types: vec![HeapValueType::field()], }, - brillig::Opcode::Stop { return_data_offset: 0, return_data_size: 1 }, + brillig::Opcode::Stop { + return_data: HeapVector { pointer: zero_usize, size: one_usize }, + }, ], }; @@ -214,12 +222,12 @@ fn simple_brillig_foreign_call() { let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 79, 73, 10, 128, 48, 12, 204, 40, 46, 5, 111, 126, - 36, 254, 192, 207, 120, 240, 226, 65, 196, 247, 91, 48, 129, 80, 218, 122, 48, 3, 33, 147, - 9, 89, 6, 244, 98, 140, 1, 225, 157, 100, 173, 45, 84, 91, 37, 243, 63, 44, 240, 219, 197, - 246, 223, 38, 37, 176, 34, 85, 156, 169, 251, 144, 233, 183, 142, 206, 67, 114, 215, 121, - 63, 15, 84, 135, 222, 157, 98, 244, 194, 247, 227, 222, 206, 11, 31, 19, 165, 186, 164, - 207, 153, 222, 3, 91, 101, 84, 220, 120, 2, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 81, 203, 10, 128, 48, 12, 179, 243, 57, 240, 230, + 143, 108, 127, 224, 207, 120, 240, 226, 65, 196, 239, 119, 98, 11, 101, 100, 94, 214, 64, + 73, 26, 88, 73, 24, 53, 31, 166, 52, 196, 186, 99, 150, 93, 67, 188, 149, 57, 212, 33, 146, + 221, 173, 160, 243, 186, 92, 144, 54, 127, 138, 245, 204, 62, 243, 95, 110, 13, 195, 122, + 144, 207, 240, 126, 28, 65, 71, 7, 250, 206, 105, 6, 214, 251, 113, 111, 231, 133, 190, 93, + 191, 40, 237, 37, 127, 1, 190, 36, 121, 0, 128, 254, 118, 42, 127, 2, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -304,7 +312,22 @@ fn complex_brillig_foreign_call() { HeapValueType::field(), ], }, - brillig::Opcode::Stop { return_data_offset: 32, return_data_size: 5 }, + brillig::Opcode::Const { + destination: MemoryAddress::direct(0), + bit_size: BitSize::Integer(IntegerBitSize::U32), + value: FieldElement::from(32_usize), + }, + brillig::Opcode::Const { + destination: MemoryAddress::direct(1), + bit_size: BitSize::Integer(IntegerBitSize::U32), + value: FieldElement::from(5_usize), + }, + brillig::Opcode::Stop { + return_data: HeapVector { + pointer: MemoryAddress::direct(0), + size: MemoryAddress::direct(1), + }, + }, ], }; @@ -344,17 +367,17 @@ fn complex_brillig_foreign_call() { let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 85, 93, 10, 194, 48, 12, 78, 155, 233, 54, 240, - 205, 11, 8, 122, 128, 76, 47, 176, 187, 136, 111, 138, 62, 122, 124, 45, 75, 88, 23, 139, - 19, 76, 64, 63, 24, 89, 75, 242, 229, 159, 6, 24, 208, 60, 191, 192, 255, 11, 150, 145, - 101, 186, 71, 152, 66, 116, 123, 150, 244, 29, 186, 96, 199, 69, 94, 49, 198, 63, 136, 17, - 29, 98, 132, 172, 255, 63, 216, 111, 203, 190, 152, 214, 15, 11, 251, 83, 193, 176, 95, 75, - 62, 215, 44, 27, 93, 232, 100, 20, 225, 117, 241, 38, 144, 233, 105, 149, 4, 229, 185, 183, - 201, 232, 208, 42, 191, 198, 252, 36, 213, 216, 192, 103, 249, 250, 228, 185, 39, 225, 71, - 23, 126, 234, 132, 191, 114, 234, 83, 173, 234, 149, 231, 146, 251, 93, 193, 56, 129, 199, - 235, 229, 118, 62, 221, 177, 96, 170, 205, 19, 182, 234, 188, 43, 148, 108, 142, 67, 144, - 63, 52, 239, 244, 67, 65, 127, 206, 102, 13, 227, 56, 201, 195, 246, 0, 155, 0, 46, 128, - 245, 6, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 85, 205, 14, 130, 48, 12, 238, 54, 20, 136, 222, + 124, 1, 19, 125, 128, 161, 241, 238, 187, 24, 111, 26, 61, 250, 248, 178, 216, 198, 89, 26, + 56, 216, 18, 248, 146, 165, 12, 218, 175, 255, 193, 193, 7, 85, 123, 28, 62, 23, 40, 61, + 202, 244, 62, 192, 47, 72, 247, 140, 50, 254, 135, 198, 233, 113, 69, 171, 24, 253, 12, 98, + 12, 6, 49, 66, 214, 255, 9, 246, 91, 179, 47, 170, 245, 11, 194, 254, 164, 221, 90, 180, + 103, 137, 247, 18, 101, 197, 11, 157, 140, 60, 116, 23, 47, 7, 13, 207, 10, 101, 45, 124, + 87, 76, 232, 88, 51, 191, 202, 252, 145, 138, 177, 133, 254, 124, 109, 243, 60, 68, 226, + 15, 38, 252, 177, 33, 254, 194, 168, 79, 37, 171, 87, 158, 75, 238, 119, 13, 223, 1, 188, + 60, 238, 207, 219, 245, 21, 4, 83, 110, 158, 176, 99, 247, 189, 80, 178, 33, 14, 66, 254, + 159, 233, 211, 119, 130, 254, 144, 205, 88, 163, 98, 180, 18, 167, 13, 116, 65, 190, 222, + 250, 76, 4, 233, 188, 7, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -392,11 +415,11 @@ fn memory_op_circuit() { let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 82, 65, 10, 0, 32, 8, 211, 180, 255, 216, 15, 250, - 255, 171, 10, 82, 176, 232, 150, 30, 26, 200, 118, 144, 49, 135, 8, 11, 117, 14, 169, 102, - 229, 162, 140, 78, 219, 206, 137, 174, 44, 111, 104, 217, 190, 24, 236, 75, 113, 94, 146, - 93, 174, 252, 86, 46, 71, 223, 78, 46, 104, 129, 253, 155, 45, 60, 195, 5, 3, 89, 11, 161, - 73, 39, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 144, 75, 10, 0, 32, 8, 68, 253, 117, 31, 187, 65, + 247, 63, 85, 65, 10, 82, 203, 116, 209, 128, 60, 221, 12, 227, 32, 108, 181, 53, 108, 187, + 147, 140, 24, 118, 231, 169, 97, 212, 55, 245, 106, 95, 76, 246, 229, 60, 47, 173, 46, 87, + 127, 43, 87, 178, 127, 231, 16, 148, 194, 29, 195, 11, 220, 154, 119, 139, 115, 25, 38, 3, + 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -495,15 +518,15 @@ fn nested_acir_call_circuit() { let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 205, 146, 97, 10, 195, 32, 12, 133, 163, 66, 207, 147, - 24, 173, 241, 223, 174, 50, 153, 189, 255, 17, 214, 177, 148, 57, 17, 250, 99, 14, 250, - 224, 97, 144, 16, 146, 143, 231, 224, 45, 167, 126, 105, 217, 109, 118, 91, 248, 200, 168, - 225, 248, 63, 107, 114, 208, 233, 104, 188, 233, 139, 191, 137, 108, 51, 139, 113, 13, 161, - 38, 95, 137, 233, 142, 62, 23, 137, 24, 98, 89, 133, 132, 162, 196, 135, 23, 230, 42, 65, - 82, 46, 57, 97, 166, 192, 149, 182, 152, 121, 211, 97, 110, 222, 94, 8, 13, 132, 182, 54, - 48, 144, 235, 8, 254, 11, 22, 76, 132, 101, 231, 237, 229, 23, 189, 213, 54, 119, 15, 83, - 212, 199, 172, 175, 191, 226, 102, 96, 140, 251, 202, 84, 13, 204, 141, 224, 25, 176, 161, - 158, 53, 121, 144, 73, 14, 4, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 146, 81, 10, 195, 48, 8, 134, 77, 164, 247, 209, + 152, 52, 230, 109, 87, 89, 88, 122, 255, 35, 172, 99, 41, 11, 89, 161, 15, 77, 31, 250, + 193, 143, 34, 34, 250, 35, 194, 23, 172, 250, 48, 173, 50, 171, 44, 252, 48, 85, 176, 213, + 143, 154, 16, 58, 182, 198, 71, 141, 116, 14, 182, 205, 44, 161, 217, 251, 18, 93, 97, 225, + 39, 185, 148, 53, 144, 15, 121, 86, 86, 14, 26, 94, 78, 69, 138, 122, 141, 41, 167, 72, + 137, 189, 20, 94, 66, 146, 165, 14, 195, 113, 123, 17, 52, 38, 180, 185, 129, 127, 176, 51, + 240, 42, 175, 96, 160, 87, 118, 220, 94, 110, 170, 183, 218, 230, 238, 221, 39, 234, 191, + 172, 207, 177, 171, 153, 155, 153, 106, 96, 236, 3, 30, 249, 181, 199, 27, 99, 149, 130, + 253, 11, 4, 0, 0, ]; assert_eq!(bytes, expected_serialization); } diff --git a/acvm-repo/acvm/Cargo.toml b/acvm-repo/acvm/Cargo.toml index 2b143ebe475..722c825dca5 100644 --- a/acvm-repo/acvm/Cargo.toml +++ b/acvm-repo/acvm/Cargo.toml @@ -14,7 +14,6 @@ repository.workspace = true workspace = true [dependencies] -num-bigint.workspace = true thiserror.workspace = true tracing.workspace = true serde.workspace = true @@ -43,3 +42,5 @@ ark-bn254.workspace = true bn254_blackbox_solver.workspace = true proptest.workspace = true zkhash = { version = "^0.2.0", default-features = false } +num-bigint.workspace = true + diff --git a/acvm-repo/acvm/src/compiler/optimizers/constant_backpropagation.rs b/acvm-repo/acvm/src/compiler/optimizers/constant_backpropagation.rs index 5b778f63f07..994cfc84e66 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/constant_backpropagation.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/constant_backpropagation.rs @@ -3,14 +3,13 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use crate::{ compiler::optimizers::GeneralOptimizer, pwg::{ - arithmetic::ExpressionSolver, blackbox::solve_range_opcode, directives::solve_directives, - BrilligSolver, BrilligSolverStatus, + arithmetic::ExpressionSolver, blackbox::solve_range_opcode, BrilligSolver, + BrilligSolverStatus, }, }; use acir::{ circuit::{ brillig::{Brillig, BrilligInputs, BrilligOutputs}, - directives::Directive, opcodes::BlackBoxFuncCall, Circuit, Opcode, }, @@ -212,34 +211,6 @@ impl ConstantBackpropagationOptimizer { } } - Opcode::Directive(Directive::ToLeRadix { a, b, radix }) => { - if b.iter().all(|output| known_witnesses.contains_key(output)) { - continue; - } else if b.iter().any(|witness| required_witnesses.contains(witness)) { - // If one of the brillig opcode's outputs is a required witness then we can't remove the opcode. In this case we can't replace - // all of the uses of this witness with the calculated constant so we'll be attempting to use an uninitialized witness. - // - // We then do not attempt execution of this opcode and just simplify the inputs. - Opcode::Directive(Directive::ToLeRadix { - a: remap_expression(&known_witnesses, a), - b, - radix, - }) - } else { - let directive = Directive::ToLeRadix { - a: remap_expression(&known_witnesses, a), - b, - radix, - }; - let result = solve_directives(&mut known_witnesses, &directive); - - match result { - Ok(()) => continue, - Err(_) => Opcode::Directive(directive), - } - } - } - Opcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { input }) => { if solve_range_opcode(&known_witnesses, &input).is_ok() { continue; @@ -288,7 +259,6 @@ mod tests { public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), - recursive: false, } } diff --git a/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs b/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs index e2585f64acd..9d0d5b3af60 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/merge_expressions.rs @@ -3,7 +3,6 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use acir::{ circuit::{ brillig::{BrilligInputs, BrilligOutputs}, - directives::Directive, opcodes::BlockId, Circuit, Opcode, }, @@ -161,7 +160,6 @@ impl MergeExpressionsOptimizer { witnesses } - Opcode::Directive(Directive::ToLeRadix { a, .. }) => CircuitSimulator::expr_wit(a), Opcode::MemoryOp { block_id: _, op, predicate } => { //index et value, et predicate let mut witnesses = CircuitSimulator::expr_wit(&op.index); @@ -297,7 +295,6 @@ mod tests { public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), - recursive: false, }; check_circuit(circuit); } @@ -350,7 +347,6 @@ mod tests { public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), - recursive: false, }; check_circuit(circuit); } diff --git a/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs b/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs index 3570a36a7e7..f9c715a277f 100644 --- a/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs +++ b/acvm-repo/acvm/src/compiler/optimizers/redundant_range.rs @@ -191,7 +191,6 @@ mod tests { public_parameters: PublicInputs::default(), return_values: PublicInputs::default(), assert_messages: Default::default(), - recursive: false, } } diff --git a/acvm-repo/acvm/src/compiler/simulator.rs b/acvm-repo/acvm/src/compiler/simulator.rs index d89b53aa564..893195f342a 100644 --- a/acvm-repo/acvm/src/compiler/simulator.rs +++ b/acvm-repo/acvm/src/compiler/simulator.rs @@ -1,7 +1,6 @@ use acir::{ circuit::{ brillig::{BrilligInputs, BrilligOutputs}, - directives::Directive, opcodes::{BlockId, FunctionInput}, Circuit, Opcode, }, @@ -84,17 +83,6 @@ impl CircuitSimulator { } true } - Opcode::Directive(directive) => match directive { - Directive::ToLeRadix { a, b, .. } => { - if !self.can_solve_expression(a) { - return false; - } - for w in b { - self.mark_solvable(*w); - } - true - } - }, Opcode::MemoryOp { block_id, op, predicate } => { if !self.can_solve_expression(&op.index) { return false; @@ -246,7 +234,6 @@ mod tests { public_parameters, return_values: PublicInputs::default(), assert_messages: Default::default(), - recursive: false, } } diff --git a/acvm-repo/acvm/src/compiler/transformers/mod.rs b/acvm-repo/acvm/src/compiler/transformers/mod.rs index b11d054a57b..c9ce4ac7895 100644 --- a/acvm-repo/acvm/src/compiler/transformers/mod.rs +++ b/acvm-repo/acvm/src/compiler/transformers/mod.rs @@ -1,5 +1,5 @@ use acir::{ - circuit::{brillig::BrilligOutputs, directives::Directive, Circuit, ExpressionWidth, Opcode}, + circuit::{brillig::BrilligOutputs, Circuit, ExpressionWidth, Opcode}, native_types::{Expression, Witness}, AcirField, }; @@ -104,17 +104,6 @@ pub(super) fn transform_internal( new_acir_opcode_positions.push(acir_opcode_positions[index]); transformed_opcodes.push(opcode); } - Opcode::Directive(ref directive) => { - match directive { - Directive::ToLeRadix { b, .. } => { - for witness in b { - transformer.mark_solvable(*witness); - } - } - } - new_acir_opcode_positions.push(acir_opcode_positions[index]); - transformed_opcodes.push(opcode); - } Opcode::MemoryInit { .. } => { // `MemoryInit` does not write values to the `WitnessMap` new_acir_opcode_positions.push(acir_opcode_positions[index]); diff --git a/acvm-repo/acvm/src/pwg/brillig.rs b/acvm-repo/acvm/src/pwg/brillig.rs index dffa45dbd7a..a5f5783478e 100644 --- a/acvm-repo/acvm/src/pwg/brillig.rs +++ b/acvm-repo/acvm/src/pwg/brillig.rs @@ -6,7 +6,6 @@ use acir::{ brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, opcodes::BlockId, ErrorSelector, OpcodeLocation, RawAssertionPayload, ResolvedAssertionPayload, - STRING_ERROR_SELECTOR, }, native_types::WitnessMap, AcirField, @@ -307,25 +306,10 @@ fn extract_failure_payload_from_memory( .expect("Error selector is not u64"), ); - match error_selector { - STRING_ERROR_SELECTOR => { - // If the error selector is 0, it means the error is a string - let string = revert_values_iter - .map(|&memory_value| { - let as_u8: u8 = memory_value.try_into().expect("String item is not u8"); - as_u8 as char - }) - .collect(); - Some(ResolvedAssertionPayload::String(string)) - } - _ => { - // If the error selector is not 0, it means the error is a custom error - Some(ResolvedAssertionPayload::Raw(RawAssertionPayload { - selector: error_selector, - data: revert_values_iter.map(|value| value.to_field()).collect(), - })) - } - } + Some(ResolvedAssertionPayload::Raw(RawAssertionPayload { + selector: error_selector, + data: revert_values_iter.map(|value| value.to_field()).collect(), + })) } } diff --git a/acvm-repo/acvm/src/pwg/directives/mod.rs b/acvm-repo/acvm/src/pwg/directives/mod.rs deleted file mode 100644 index d7bee88c278..00000000000 --- a/acvm-repo/acvm/src/pwg/directives/mod.rs +++ /dev/null @@ -1,49 +0,0 @@ -use acir::{circuit::directives::Directive, native_types::WitnessMap, AcirField}; -use num_bigint::BigUint; - -use crate::OpcodeResolutionError; - -use super::{get_value, insert_value, ErrorLocation}; - -/// Attempts to solve the [`Directive`] opcode `directive`. -/// If successful, `initial_witness` will be mutated to contain the new witness assignment. -/// -/// Returns `Ok(OpcodeResolution)` to signal whether the directive was successful solved. -/// -/// Returns `Err(OpcodeResolutionError)` if a circuit constraint is unsatisfied. -pub(crate) fn solve_directives( - initial_witness: &mut WitnessMap, - directive: &Directive, -) -> Result<(), OpcodeResolutionError> { - match directive { - Directive::ToLeRadix { a, b, radix } => { - let value_a = get_value(a, initial_witness)?; - let big_integer = BigUint::from_bytes_be(&value_a.to_be_bytes()); - - // Decompose the integer into its radix digits in little endian form. - let decomposed_integer = big_integer.to_radix_le(*radix); - - if b.len() < decomposed_integer.len() { - return Err(OpcodeResolutionError::UnsatisfiedConstrain { - opcode_location: ErrorLocation::Unresolved, - payload: None, - }); - } - - for (i, witness) in b.iter().enumerate() { - // Fetch the `i'th` digit from the decomposed integer list - // and convert it to a field element. - // If it is not available, which can happen when the decomposed integer - // list is shorter than the witness list, we return 0. - let value = match decomposed_integer.get(i) { - Some(digit) => F::from_be_bytes_reduce(&[*digit]), - None => F::zero(), - }; - - insert_value(witness, value, initial_witness)?; - } - - Ok(()) - } - } -} diff --git a/acvm-repo/acvm/src/pwg/mod.rs b/acvm-repo/acvm/src/pwg/mod.rs index fa3498da613..20c12a72fc0 100644 --- a/acvm-repo/acvm/src/pwg/mod.rs +++ b/acvm-repo/acvm/src/pwg/mod.rs @@ -10,7 +10,7 @@ use acir::{ AcirFunctionId, BlockId, ConstantOrWitnessEnum, FunctionInput, InvalidInputBitSize, }, AssertionPayload, ErrorSelector, ExpressionOrMemory, Opcode, OpcodeLocation, - RawAssertionPayload, ResolvedAssertionPayload, STRING_ERROR_SELECTOR, + RawAssertionPayload, ResolvedAssertionPayload, }, native_types::{Expression, Witness, WitnessMap}, AcirField, BlackBoxFunc, @@ -18,8 +18,7 @@ use acir::{ use acvm_blackbox_solver::BlackBoxResolutionError; use self::{ - arithmetic::ExpressionSolver, blackbox::bigint::AcvmBigIntSolver, directives::solve_directives, - memory_op::MemoryOpSolver, + arithmetic::ExpressionSolver, blackbox::bigint::AcvmBigIntSolver, memory_op::MemoryOpSolver, }; use crate::BlackBoxFunctionSolver; @@ -29,8 +28,6 @@ use thiserror::Error; pub(crate) mod arithmetic; // Brillig bytecode pub(crate) mod brillig; -// Directives -pub(crate) mod directives; // black box functions pub(crate) mod blackbox; mod memory_op; @@ -371,7 +368,6 @@ impl<'a, F: AcirField, B: BlackBoxFunctionSolver> ACVM<'a, F, B> { bb_func, &mut self.bigint_solver, ), - Opcode::Directive(directive) => solve_directives(&mut self.witness_map, directive), Opcode::MemoryInit { block_id, init, .. } => { let solver = self.block_solvers.entry(*block_id).or_default(); solver.init(init, &self.witness_map) @@ -445,59 +441,32 @@ impl<'a, F: AcirField, B: BlackBoxFunctionSolver> ACVM<'a, F, B> { &self, location: OpcodeLocation, ) -> Option> { - let (_, found_assertion_payload) = + let (_, assertion_descriptor) = self.assertion_payloads.iter().find(|(loc, _)| location == *loc)?; - match found_assertion_payload { - AssertionPayload::StaticString(string) => { - Some(ResolvedAssertionPayload::String(string.clone())) - } - AssertionPayload::Dynamic(error_selector, expression) => { - let mut fields = vec![]; - for expr in expression { - match expr { - ExpressionOrMemory::Expression(expr) => { - let value = get_value(expr, &self.witness_map).ok()?; - fields.push(value); - } - ExpressionOrMemory::Memory(block_id) => { - let memory_block = self.block_solvers.get(block_id)?; - fields.extend((0..memory_block.block_len).map(|memory_index| { - *memory_block - .block_value - .get(&memory_index) - .expect("All memory is initialized on creation") - })); - } - } + let mut fields = Vec::new(); + for expr in assertion_descriptor.payload.iter() { + match expr { + ExpressionOrMemory::Expression(expr) => { + let value = get_value(expr, &self.witness_map).ok()?; + fields.push(value); + } + ExpressionOrMemory::Memory(block_id) => { + let memory_block = self.block_solvers.get(block_id)?; + fields.extend((0..memory_block.block_len).map(|memory_index| { + *memory_block + .block_value + .get(&memory_index) + .expect("All memory is initialized on creation") + })); } - let error_selector = ErrorSelector::new(*error_selector); - - Some(match error_selector { - STRING_ERROR_SELECTOR => { - // If the error selector is 0, it means the error is a string - let string = fields - .iter() - .map(|field| { - let as_u8: u8 = field - .try_to_u64() - .expect("String character doesn't fit in u64") - .try_into() - .expect("String character doesn't fit in u8"); - as_u8 as char - }) - .collect(); - ResolvedAssertionPayload::String(string) - } - _ => { - // If the error selector is not 0, it means the error is a custom error - ResolvedAssertionPayload::Raw(RawAssertionPayload { - selector: error_selector, - data: fields, - }) - } - }) } } + let error_selector = ErrorSelector::new(assertion_descriptor.error_selector); + + Some(ResolvedAssertionPayload::Raw(RawAssertionPayload { + selector: error_selector, + data: fields, + })) } fn solve_brillig_call_opcode( @@ -530,7 +499,7 @@ impl<'a, F: AcirField, B: BlackBoxFunctionSolver> ACVM<'a, F, B> { )?, }; - let result = solver.solve().map_err(|err| self.map_brillig_error(err))?; + let result = solver.solve()?; match result { BrilligSolverStatus::ForeignCallWait(foreign_call) => { @@ -570,31 +539,6 @@ impl<'a, F: AcirField, B: BlackBoxFunctionSolver> ACVM<'a, F, B> { } } - fn map_brillig_error(&self, mut err: OpcodeResolutionError) -> OpcodeResolutionError { - match &mut err { - OpcodeResolutionError::BrilligFunctionFailed { call_stack, payload, .. } => { - // Some brillig errors have static strings as payloads, we can resolve them here - let last_location = - call_stack.last().expect("Call stacks should have at least one item"); - let assertion_descriptor = - self.assertion_payloads.iter().find_map(|(loc, payload)| { - if loc == last_location { - Some(payload) - } else { - None - } - }); - - if let Some(AssertionPayload::StaticString(string)) = assertion_descriptor { - *payload = Some(ResolvedAssertionPayload::String(string.clone())); - } - - err - } - _ => err, - } - } - pub fn step_into_brillig(&mut self) -> StepResult<'a, F, B> { let Opcode::BrilligCall { id, inputs, outputs, predicate } = &self.opcodes[self.instruction_pointer] diff --git a/acvm-repo/acvm/tests/solver.rs b/acvm-repo/acvm/tests/solver.rs index 2828ea3d79e..8b164b7c0f2 100644 --- a/acvm-repo/acvm/tests/solver.rs +++ b/acvm-repo/acvm/tests/solver.rs @@ -77,13 +77,6 @@ fn inversion_brillig_oracle_equivalence() { let w_x_plus_y = Witness(6); let w_equal_res = Witness(7); - let equal_opcode = BrilligOpcode::BinaryFieldOp { - op: BinaryFieldOp::Equals, - lhs: MemoryAddress::direct(0), - rhs: MemoryAddress::direct(1), - destination: MemoryAddress::direct(2), - }; - let opcodes = vec![ Opcode::BrilligCall { id: BrilligFunctionId(0), @@ -122,22 +115,38 @@ fn inversion_brillig_oracle_equivalence() { }), ]; + let equal_opcode = BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::Equals, + lhs: MemoryAddress::direct(0), + rhs: MemoryAddress::direct(1), + destination: MemoryAddress::direct(2), + }; + + let zero_usize = MemoryAddress::direct(3); + let two_usize = MemoryAddress::direct(4); + let three_usize = MemoryAddress::direct(5); + let brillig_bytecode = BrilligBytecode { bytecode: vec![ BrilligOpcode::Const { - destination: MemoryAddress::direct(0), + destination: zero_usize, + bit_size: BitSize::Integer(IntegerBitSize::U32), + value: FieldElement::from(0u64), + }, + BrilligOpcode::Const { + destination: two_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(2u64), }, BrilligOpcode::Const { - destination: MemoryAddress::direct(1), + destination: three_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), - value: FieldElement::from(0u64), + value: FieldElement::from(3u64), }, BrilligOpcode::CalldataCopy { destination_address: MemoryAddress::direct(0), - size_address: MemoryAddress::direct(0), - offset_address: MemoryAddress::direct(1), + size_address: two_usize, + offset_address: zero_usize, }, equal_opcode, // Oracles are named 'foreign calls' in brillig @@ -148,7 +157,9 @@ fn inversion_brillig_oracle_equivalence() { inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::direct(0))], input_value_types: vec![HeapValueType::field()], }, - BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 3 }, + BrilligOpcode::Stop { + return_data: HeapVector { pointer: zero_usize, size: three_usize }, + }, ], }; @@ -216,13 +227,6 @@ fn double_inversion_brillig_oracle() { let w_ij_oracle = Witness(10); let w_i_plus_j = Witness(11); - let equal_opcode = BrilligOpcode::BinaryFieldOp { - op: BinaryFieldOp::Equals, - lhs: MemoryAddress::direct(0), - rhs: MemoryAddress::direct(1), - destination: MemoryAddress::direct(4), - }; - let opcodes = vec![ Opcode::BrilligCall { id: BrilligFunctionId(0), @@ -268,22 +272,38 @@ fn double_inversion_brillig_oracle() { }), ]; + let zero_usize = MemoryAddress::direct(5); + let three_usize = MemoryAddress::direct(6); + let five_usize = MemoryAddress::direct(7); + + let equal_opcode = BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::Equals, + lhs: MemoryAddress::direct(0), + rhs: MemoryAddress::direct(1), + destination: MemoryAddress::direct(4), + }; + let brillig_bytecode = BrilligBytecode { bytecode: vec![ BrilligOpcode::Const { - destination: MemoryAddress::direct(0), + destination: zero_usize, + bit_size: BitSize::Integer(IntegerBitSize::U32), + value: FieldElement::from(0u64), + }, + BrilligOpcode::Const { + destination: three_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(3u64), }, BrilligOpcode::Const { - destination: MemoryAddress::direct(1), + destination: five_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), - value: FieldElement::from(0u64), + value: FieldElement::from(5u64), }, BrilligOpcode::CalldataCopy { destination_address: MemoryAddress::direct(0), - size_address: MemoryAddress::direct(0), - offset_address: MemoryAddress::direct(1), + size_address: three_usize, + offset_address: zero_usize, }, equal_opcode, // Oracles are named 'foreign calls' in brillig @@ -301,7 +321,9 @@ fn double_inversion_brillig_oracle() { inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::direct(2))], input_value_types: vec![HeapValueType::field()], }, - BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 5 }, + BrilligOpcode::Stop { + return_data: HeapVector { pointer: zero_usize, size: five_usize }, + }, ], }; @@ -386,22 +408,31 @@ fn oracle_dependent_execution() { let w_x_inv = Witness(3); let w_y_inv = Witness(4); + let zero_usize = MemoryAddress::direct(4); + let three_usize = MemoryAddress::direct(5); + let four_usize = MemoryAddress::direct(6); + let brillig_bytecode = BrilligBytecode { bytecode: vec![ BrilligOpcode::Const { - destination: MemoryAddress::direct(0), + destination: zero_usize, + bit_size: BitSize::Integer(IntegerBitSize::U32), + value: FieldElement::from(0u64), + }, + BrilligOpcode::Const { + destination: three_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(3u64), }, BrilligOpcode::Const { - destination: MemoryAddress::direct(1), + destination: four_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), - value: FieldElement::from(0u64), + value: FieldElement::from(4u64), }, BrilligOpcode::CalldataCopy { destination_address: MemoryAddress::direct(0), - size_address: MemoryAddress::direct(0), - offset_address: MemoryAddress::direct(1), + size_address: three_usize, + offset_address: zero_usize, }, // Oracles are named 'foreign calls' in brillig BrilligOpcode::ForeignCall { function: "invert".into(), @@ -417,7 +448,9 @@ fn oracle_dependent_execution() { inputs: vec![ValueOrArray::MemoryAddress(MemoryAddress::direct(2))], input_value_types: vec![HeapValueType::field()], }, - BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 4 }, + BrilligOpcode::Stop { + return_data: HeapVector { pointer: zero_usize, size: four_usize }, + }, ], }; @@ -662,7 +695,7 @@ fn unsatisfied_opcode_resolved_brillig() { }; // Jump pass the trap if the values are equal, else // jump to the trap - let location_of_stop = 3; + let location_of_stop = 7; let jmp_if_opcode = BrilligOpcode::JumpIf { condition: MemoryAddress::direct(2), location: location_of_stop }; @@ -673,7 +706,12 @@ fn unsatisfied_opcode_resolved_brillig() { size: MemoryAddress::direct(3), }, }; - let stop_opcode = BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }; + let stop_opcode = BrilligOpcode::Stop { + return_data: HeapVector { + pointer: MemoryAddress::direct(0), + size: MemoryAddress::direct(3), + }, + }; let brillig_bytecode = BrilligBytecode { bytecode: vec![ diff --git a/acvm-repo/acvm_js/test/shared/addition.ts b/acvm-repo/acvm_js/test/shared/addition.ts index 820a415acf3..2b8124e77d7 100644 --- a/acvm-repo/acvm_js/test/shared/addition.ts +++ b/acvm-repo/acvm_js/test/shared/addition.ts @@ -3,10 +3,10 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `addition_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 65, 14, 128, 32, 12, 4, 65, 124, 80, 75, 91, 104, 111, 126, 69, 34, 252, - 255, 9, 106, 228, 64, 162, 55, 153, 164, 217, 158, 38, 155, 245, 238, 97, 189, 206, 187, 55, 161, 231, 214, 19, 254, - 129, 126, 162, 107, 25, 92, 4, 137, 185, 230, 88, 145, 112, 135, 104, 69, 5, 88, 74, 82, 84, 20, 149, 35, 42, 81, 85, - 214, 108, 197, 50, 24, 50, 85, 108, 98, 212, 186, 44, 204, 235, 5, 183, 99, 233, 46, 63, 252, 110, 216, 56, 184, 15, - 78, 146, 74, 173, 20, 141, 1, 0, 0, + 255, 9, 106, 228, 64, 194, 81, 38, 105, 182, 167, 201, 102, 189, 251, 216, 159, 243, 110, 38, 244, 60, 122, 194, 63, + 208, 47, 116, 109, 131, 139, 32, 49, 215, 28, 43, 18, 158, 16, 173, 168, 0, 75, 73, 138, 138, 162, 114, 69, 37, 170, + 202, 154, 173, 88, 6, 67, 166, 138, 77, 140, 90, 151, 133, 117, 189, 224, 117, 108, 221, 229, 135, 223, 13, 27, 135, + 121, 106, 119, 3, 58, 173, 124, 163, 140, 1, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts b/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts index 8eb7b7d5059..24fbc1a921a 100644 --- a/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts +++ b/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts @@ -2,14 +2,14 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `complex_brillig_foreign_call` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 85, 93, 10, 194, 48, 12, 78, 155, 233, 54, 240, 205, 11, 8, 122, 128, 76, 47, - 176, 187, 136, 111, 138, 62, 122, 124, 45, 75, 88, 23, 139, 19, 76, 64, 63, 24, 89, 75, 242, 229, 159, 6, 24, 208, 60, - 191, 192, 255, 11, 150, 145, 101, 186, 71, 152, 66, 116, 123, 150, 244, 29, 186, 96, 199, 69, 94, 49, 198, 63, 136, - 17, 29, 98, 132, 172, 255, 63, 216, 111, 203, 190, 152, 214, 15, 11, 251, 83, 193, 176, 95, 75, 62, 215, 44, 27, 93, - 232, 100, 20, 225, 117, 241, 38, 144, 233, 105, 149, 4, 229, 185, 183, 201, 232, 208, 42, 191, 198, 252, 36, 213, 216, - 192, 103, 249, 250, 228, 185, 39, 225, 71, 23, 126, 234, 132, 191, 114, 234, 83, 173, 234, 149, 231, 146, 251, 93, - 193, 56, 129, 199, 235, 229, 118, 62, 221, 177, 96, 170, 205, 19, 182, 234, 188, 43, 148, 108, 142, 67, 144, 63, 52, - 239, 244, 67, 65, 127, 206, 102, 13, 227, 56, 201, 195, 246, 0, 155, 0, 46, 128, 245, 6, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 85, 205, 14, 130, 48, 12, 238, 54, 20, 136, 222, 124, 1, 19, 125, 128, 161, + 241, 238, 187, 24, 111, 26, 61, 250, 248, 178, 216, 198, 89, 26, 56, 216, 18, 248, 146, 165, 12, 218, 175, 255, 193, + 193, 7, 85, 123, 28, 62, 23, 40, 61, 202, 244, 62, 192, 47, 72, 247, 140, 50, 254, 135, 198, 233, 113, 69, 171, 24, + 253, 12, 98, 12, 6, 49, 66, 214, 255, 9, 246, 91, 179, 47, 170, 245, 11, 194, 254, 164, 221, 90, 180, 103, 137, 247, + 18, 101, 197, 11, 157, 140, 60, 116, 23, 47, 7, 13, 207, 10, 101, 45, 124, 87, 76, 232, 88, 51, 191, 202, 252, 145, + 138, 177, 133, 254, 124, 109, 243, 60, 68, 226, 15, 38, 252, 177, 33, 254, 194, 168, 79, 37, 171, 87, 158, 75, 238, + 119, 13, 223, 1, 188, 60, 238, 207, 219, 245, 21, 4, 83, 110, 158, 176, 99, 247, 189, 80, 178, 33, 14, 66, 254, 159, + 233, 211, 119, 130, 254, 144, 205, 88, 163, 98, 180, 18, 167, 13, 116, 65, 190, 222, 250, 76, 4, 233, 188, 7, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000001'], diff --git a/acvm-repo/acvm_js/test/shared/foreign_call.ts b/acvm-repo/acvm_js/test/shared/foreign_call.ts index dc3c6f23f6f..da0b9974c61 100644 --- a/acvm-repo/acvm_js/test/shared/foreign_call.ts +++ b/acvm-repo/acvm_js/test/shared/foreign_call.ts @@ -2,11 +2,11 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `simple_brillig_foreign_call` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 79, 73, 10, 128, 48, 12, 204, 40, 46, 5, 111, 126, 36, 254, 192, 207, 120, - 240, 226, 65, 196, 247, 91, 48, 129, 80, 218, 122, 48, 3, 33, 147, 9, 89, 6, 244, 98, 140, 1, 225, 157, 100, 173, 45, - 84, 91, 37, 243, 63, 44, 240, 219, 197, 246, 223, 38, 37, 176, 34, 85, 156, 169, 251, 144, 233, 183, 142, 206, 67, - 114, 215, 121, 63, 15, 84, 135, 222, 157, 98, 244, 194, 247, 227, 222, 206, 11, 31, 19, 165, 186, 164, 207, 153, 222, - 3, 91, 101, 84, 220, 120, 2, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 81, 203, 10, 128, 48, 12, 179, 243, 57, 240, 230, 143, 108, 127, 224, 207, + 120, 240, 226, 65, 196, 239, 119, 98, 11, 101, 100, 94, 214, 64, 73, 26, 88, 73, 24, 53, 31, 166, 52, 196, 186, 99, + 150, 93, 67, 188, 149, 57, 212, 33, 146, 221, 173, 160, 243, 186, 92, 144, 54, 127, 138, 245, 204, 62, 243, 95, 110, + 13, 195, 122, 144, 207, 240, 126, 28, 65, 71, 7, 250, 206, 105, 6, 214, 251, 113, 111, 231, 133, 190, 93, 191, 40, + 237, 37, 127, 1, 190, 36, 121, 0, 128, 254, 118, 42, 127, 2, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000005'], diff --git a/acvm-repo/acvm_js/test/shared/memory_op.ts b/acvm-repo/acvm_js/test/shared/memory_op.ts index f7443c2258b..2f0fbfb85f1 100644 --- a/acvm-repo/acvm_js/test/shared/memory_op.ts +++ b/acvm-repo/acvm_js/test/shared/memory_op.ts @@ -1,9 +1,9 @@ // See `memory_op_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 82, 65, 10, 0, 32, 8, 211, 180, 255, 216, 15, 250, 255, 171, 10, 82, 176, 232, - 150, 30, 26, 200, 118, 144, 49, 135, 8, 11, 117, 14, 169, 102, 229, 162, 140, 78, 219, 206, 137, 174, 44, 111, 104, - 217, 190, 24, 236, 75, 113, 94, 146, 93, 174, 252, 86, 46, 71, 223, 78, 46, 104, 129, 253, 155, 45, 60, 195, 5, 3, 89, - 11, 161, 73, 39, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 144, 75, 10, 0, 32, 8, 68, 253, 117, 31, 187, 65, 247, 63, 85, 65, 10, 82, + 203, 116, 209, 128, 60, 221, 12, 227, 32, 108, 181, 53, 108, 187, 147, 140, 24, 118, 231, 169, 97, 212, 55, 245, 106, + 95, 76, 246, 229, 60, 47, 173, 46, 87, 127, 43, 87, 178, 127, 231, 16, 148, 194, 29, 195, 11, 220, 154, 119, 139, 115, + 25, 38, 3, 0, 0, ]); export const initialWitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/multi_scalar_mul.ts b/acvm-repo/acvm_js/test/shared/multi_scalar_mul.ts index 239d5473606..3ec589dd0c8 100644 --- a/acvm-repo/acvm_js/test/shared/multi_scalar_mul.ts +++ b/acvm-repo/acvm_js/test/shared/multi_scalar_mul.ts @@ -1,8 +1,8 @@ // See `multi_scalar_mul_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 77, 9, 14, 0, 32, 8, 202, 171, 227, 255, 255, 109, 217, 162, 141, 114, 99, 2, - 162, 74, 57, 53, 18, 2, 46, 208, 70, 122, 99, 162, 43, 113, 35, 239, 102, 157, 230, 1, 94, 19, 45, 209, 145, 11, 202, - 43, 238, 56, 249, 133, 254, 255, 187, 79, 45, 204, 84, 220, 246, 193, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 77, 9, 10, 0, 32, 8, 243, 236, 248, 255, 127, 35, 163, 5, 35, 97, 184, 205, + 169, 42, 183, 102, 65, 193, 21, 218, 73, 31, 44, 116, 35, 238, 228, 189, 108, 208, 60, 193, 91, 161, 23, 6, 114, 73, + 121, 195, 157, 32, 95, 232, 255, 191, 203, 181, 1, 243, 231, 24, 106, 192, 0, 0, 0, ]); export const initialWitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000001'], diff --git a/acvm-repo/acvm_js/test/shared/nested_acir_call.ts b/acvm-repo/acvm_js/test/shared/nested_acir_call.ts index 64051dff93f..3464809dfc4 100644 --- a/acvm-repo/acvm_js/test/shared/nested_acir_call.ts +++ b/acvm-repo/acvm_js/test/shared/nested_acir_call.ts @@ -2,13 +2,13 @@ import { WitnessMap, StackItem, WitnessStack } from '@noir-lang/acvm_js'; // See `nested_acir_call_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 205, 146, 97, 10, 195, 32, 12, 133, 163, 66, 207, 147, 24, 173, 241, 223, 174, 50, - 153, 189, 255, 17, 214, 177, 148, 57, 17, 250, 99, 14, 250, 224, 97, 144, 16, 146, 143, 231, 224, 45, 167, 126, 105, - 217, 109, 118, 91, 248, 200, 168, 225, 248, 63, 107, 114, 208, 233, 104, 188, 233, 139, 191, 137, 108, 51, 139, 113, - 13, 161, 38, 95, 137, 233, 142, 62, 23, 137, 24, 98, 89, 133, 132, 162, 196, 135, 23, 230, 42, 65, 82, 46, 57, 97, - 166, 192, 149, 182, 152, 121, 211, 97, 110, 222, 94, 8, 13, 132, 182, 54, 48, 144, 235, 8, 254, 11, 22, 76, 132, 101, - 231, 237, 229, 23, 189, 213, 54, 119, 15, 83, 212, 199, 172, 175, 191, 226, 102, 96, 140, 251, 202, 84, 13, 204, 141, - 224, 25, 176, 161, 158, 53, 121, 144, 73, 14, 4, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 146, 81, 10, 195, 48, 8, 134, 77, 164, 247, 209, 152, 52, 230, 109, 87, 89, + 88, 122, 255, 35, 172, 99, 41, 11, 89, 161, 15, 77, 31, 250, 193, 143, 34, 34, 250, 35, 194, 23, 172, 250, 48, 173, + 50, 171, 44, 252, 48, 85, 176, 213, 143, 154, 16, 58, 182, 198, 71, 141, 116, 14, 182, 205, 44, 161, 217, 251, 18, 93, + 97, 225, 39, 185, 148, 53, 144, 15, 121, 86, 86, 14, 26, 94, 78, 69, 138, 122, 141, 41, 167, 72, 137, 189, 20, 94, 66, + 146, 165, 14, 195, 113, 123, 17, 52, 38, 180, 185, 129, 127, 176, 51, 240, 42, 175, 96, 160, 87, 118, 220, 94, 110, + 170, 183, 218, 230, 238, 221, 39, 234, 191, 172, 207, 177, 171, 153, 155, 153, 106, 96, 236, 3, 30, 249, 181, 199, 27, + 99, 149, 130, 253, 11, 4, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/schnorr_verify.ts b/acvm-repo/acvm_js/test/shared/schnorr_verify.ts index 830ca1026d6..d2df63a8ddb 100644 --- a/acvm-repo/acvm_js/test/shared/schnorr_verify.ts +++ b/acvm-repo/acvm_js/test/shared/schnorr_verify.ts @@ -1,19 +1,19 @@ // See `schnorr_verify_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 85, 211, 103, 78, 2, 81, 24, 70, 225, 193, 130, 96, 239, 189, 96, 239, 189, 35, 34, - 34, 34, 34, 238, 130, 253, 47, 129, 192, 9, 223, 36, 7, 146, 201, 60, 209, 31, 144, 123, 207, 155, 73, 250, 159, 118, - 239, 201, 132, 121, 103, 227, 205, 211, 137, 247, 144, 60, 220, 123, 114, 225, 17, 121, 84, 206, 202, 99, 114, 78, - 206, 203, 227, 242, 132, 60, 41, 79, 201, 211, 242, 140, 60, 43, 207, 201, 243, 242, 130, 188, 40, 47, 201, 203, 242, - 138, 188, 42, 175, 201, 235, 242, 134, 188, 41, 111, 201, 219, 242, 142, 92, 144, 119, 229, 61, 121, 95, 62, 144, 15, - 229, 35, 249, 88, 62, 145, 79, 229, 51, 249, 92, 190, 144, 47, 229, 43, 249, 90, 190, 145, 111, 229, 59, 249, 94, 126, - 144, 31, 229, 39, 249, 89, 126, 145, 95, 229, 162, 252, 38, 151, 228, 119, 185, 44, 127, 200, 21, 249, 83, 174, 134, - 233, 52, 137, 191, 125, 233, 255, 53, 249, 91, 174, 203, 63, 114, 67, 254, 149, 155, 242, 159, 220, 10, 255, 199, 247, - 183, 244, 59, 216, 38, 155, 100, 139, 108, 144, 237, 165, 155, 203, 199, 111, 102, 83, 108, 137, 13, 177, 29, 54, 195, - 86, 216, 8, 219, 96, 19, 108, 129, 13, 208, 62, 205, 211, 58, 141, 211, 54, 77, 211, 50, 13, 211, 46, 205, 22, 146, - 126, 163, 180, 73, 147, 180, 72, 131, 180, 71, 115, 180, 70, 99, 180, 69, 83, 180, 68, 67, 180, 67, 51, 180, 66, 35, - 180, 65, 19, 180, 64, 3, 220, 61, 119, 206, 93, 115, 199, 197, 184, 211, 82, 220, 97, 57, 238, 172, 18, 119, 84, 141, - 187, 168, 197, 217, 215, 227, 172, 27, 113, 182, 205, 56, 203, 244, 204, 210, 115, 75, 116, 158, 3, 159, 46, 43, 32, - 188, 53, 25, 5, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 211, 103, 78, 2, 81, 24, 70, 225, 193, 130, 96, 239, 189, 96, 239, 189, 35, 34, + 34, 34, 82, 118, 193, 254, 151, 64, 224, 132, 111, 146, 67, 50, 153, 39, 250, 3, 114, 239, 121, 51, 201, 240, 211, 29, + 60, 153, 48, 239, 108, 188, 121, 122, 241, 30, 145, 71, 7, 79, 46, 60, 38, 143, 203, 89, 121, 66, 206, 201, 121, 121, + 82, 158, 146, 167, 229, 25, 121, 86, 158, 147, 231, 229, 5, 121, 81, 94, 146, 151, 229, 21, 121, 85, 94, 147, 215, + 229, 13, 121, 83, 222, 146, 183, 229, 29, 121, 87, 222, 147, 11, 242, 190, 124, 32, 31, 202, 71, 242, 177, 124, 34, + 159, 202, 103, 242, 185, 124, 33, 95, 202, 87, 242, 181, 124, 35, 223, 202, 119, 242, 189, 252, 32, 63, 202, 79, 242, + 179, 252, 34, 191, 202, 111, 242, 187, 92, 148, 63, 228, 146, 252, 41, 151, 229, 47, 185, 34, 127, 203, 213, 48, 157, + 38, 241, 183, 31, 253, 191, 38, 255, 202, 117, 249, 79, 110, 200, 255, 114, 83, 110, 201, 237, 112, 39, 190, 191, 173, + 223, 193, 54, 217, 36, 91, 100, 131, 108, 47, 221, 92, 62, 126, 51, 155, 98, 75, 108, 136, 237, 176, 25, 182, 194, 70, + 216, 6, 155, 96, 11, 108, 128, 246, 105, 158, 214, 105, 156, 182, 105, 154, 150, 105, 152, 118, 105, 182, 144, 12, 27, + 165, 77, 154, 164, 69, 26, 164, 61, 154, 163, 53, 26, 163, 45, 154, 162, 37, 26, 162, 29, 154, 161, 21, 26, 161, 13, + 154, 160, 5, 26, 224, 238, 185, 115, 238, 154, 59, 46, 198, 157, 150, 226, 14, 203, 113, 103, 149, 184, 163, 106, 220, + 69, 45, 206, 190, 30, 103, 221, 136, 179, 109, 198, 89, 166, 103, 150, 158, 91, 162, 243, 244, 167, 15, 14, 161, 226, + 6, 24, 5, 0, 0, ]); export const initialWitnessMap = new Map([ diff --git a/acvm-repo/brillig/src/lib.rs b/acvm-repo/brillig/src/lib.rs index 5bd9f898d59..cf31ff79996 100644 --- a/acvm-repo/brillig/src/lib.rs +++ b/acvm-repo/brillig/src/lib.rs @@ -4,7 +4,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies, unused_extern_crates))] //! The Brillig bytecode is distinct from regular [ACIR][acir] in that it does not generate constraints. -//! This is a generalization over the fixed directives that exists within in the ACVM. //! //! [acir]: https://crates.io/crates/acir //! [acvm]: https://crates.io/crates/acvm diff --git a/acvm-repo/brillig/src/opcodes.rs b/acvm-repo/brillig/src/opcodes.rs index 0d87c5b9410..8b72b5a9b41 100644 --- a/acvm-repo/brillig/src/opcodes.rs +++ b/acvm-repo/brillig/src/opcodes.rs @@ -309,8 +309,7 @@ pub enum BrilligOpcode { }, /// Stop execution, returning data after the offset Stop { - return_data_offset: usize, - return_data_size: usize, + return_data: HeapVector, }, } diff --git a/acvm-repo/brillig_vm/src/lib.rs b/acvm-repo/brillig_vm/src/lib.rs index 89d72c4614b..45025fbb208 100644 --- a/acvm-repo/brillig_vm/src/lib.rs +++ b/acvm-repo/brillig_vm/src/lib.rs @@ -6,7 +6,6 @@ //! The Brillig VM is a specialized VM which allows the [ACVM][acvm] to perform custom non-determinism. //! //! Brillig bytecode is distinct from regular [ACIR][acir] in that it does not generate constraints. -//! This is a generalization over the fixed directives that exists within in the ACVM. //! //! [acir]: https://crates.io/crates/acir //! [acvm]: https://crates.io/crates/acvm @@ -357,8 +356,16 @@ impl<'a, F: AcirField, B: BlackBoxFunctionSolver> VM<'a, F, B> { self.trap(0, 0) } } - Opcode::Stop { return_data_offset, return_data_size } => { - self.finish(*return_data_offset, *return_data_size) + Opcode::Stop { return_data } => { + let return_data_size = self.memory.read(return_data.size).to_usize(); + if return_data_size > 0 { + self.finish( + self.memory.read_ref(return_data.pointer).unwrap_direct(), + return_data_size, + ) + } else { + self.finish(0, 0) + } } Opcode::Load { destination: destination_address, source_pointer } => { // Convert our source_pointer to an address @@ -1005,28 +1012,37 @@ mod tests { fn cast_opcode() { let calldata: Vec = vec![((2_u128.pow(32)) - 1).into()]; + let value_address = MemoryAddress::direct(1); + let one_usize = MemoryAddress::direct(2); + let zero_usize = MemoryAddress::direct(3); + let opcodes = &[ Opcode::Const { - destination: MemoryAddress::direct(0), + destination: one_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(1u64), }, Opcode::Const { - destination: MemoryAddress::direct(1), + destination: zero_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(0u64), }, Opcode::CalldataCopy { - destination_address: MemoryAddress::direct(0), - size_address: MemoryAddress::direct(0), - offset_address: MemoryAddress::direct(1), + destination_address: value_address, + size_address: one_usize, + offset_address: zero_usize, }, Opcode::Cast { - destination: MemoryAddress::direct(1), - source: MemoryAddress::direct(0), + destination: value_address, + source: value_address, bit_size: BitSize::Integer(IntegerBitSize::U8), }, - Opcode::Stop { return_data_offset: 1, return_data_size: 1 }, + Opcode::Stop { + return_data: HeapVector { + pointer: one_usize, // Since value_address is direct(1) + size: one_usize, + }, + }, ]; let mut vm = VM::new(calldata, opcodes, vec![], &StubbedBlackBoxSolver, false); @@ -1051,33 +1067,42 @@ mod tests { fn not_opcode() { let calldata: Vec = vec![(1_usize).into()]; + let value_address = MemoryAddress::direct(1); + let one_usize = MemoryAddress::direct(2); + let zero_usize = MemoryAddress::direct(3); + let opcodes = &[ Opcode::Const { - destination: MemoryAddress::direct(0), + destination: one_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(1u64), }, Opcode::Const { - destination: MemoryAddress::direct(1), + destination: zero_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(0u64), }, Opcode::CalldataCopy { - destination_address: MemoryAddress::direct(0), - size_address: MemoryAddress::direct(0), - offset_address: MemoryAddress::direct(1), + destination_address: value_address, + size_address: one_usize, + offset_address: zero_usize, }, Opcode::Cast { - destination: MemoryAddress::direct(1), - source: MemoryAddress::direct(0), + destination: value_address, + source: value_address, bit_size: BitSize::Integer(IntegerBitSize::U128), }, Opcode::Not { - destination: MemoryAddress::direct(1), - source: MemoryAddress::direct(1), + destination: value_address, + source: value_address, bit_size: IntegerBitSize::U128, }, - Opcode::Stop { return_data_offset: 1, return_data_size: 1 }, + Opcode::Stop { + return_data: HeapVector { + pointer: one_usize, // Since value_address is direct(1) + size: one_usize, + }, + }, ]; let mut vm = VM::new(calldata, opcodes, vec![], &StubbedBlackBoxSolver, false); diff --git a/compiler/integration-tests/package.json b/compiler/integration-tests/package.json index a438c2965c3..a9d437da792 100644 --- a/compiler/integration-tests/package.json +++ b/compiler/integration-tests/package.json @@ -13,7 +13,7 @@ "lint": "NODE_NO_WARNINGS=1 eslint . --ext .ts --ignore-path ./.eslintignore --max-warnings 0" }, "dependencies": { - "@aztec/bb.js": "0.61.0", + "@aztec/bb.js": "0.63.1", "@noir-lang/noir_js": "workspace:*", "@noir-lang/noir_wasm": "workspace:*", "@nomicfoundation/hardhat-chai-matchers": "^2.0.0", diff --git a/compiler/integration-tests/scripts/codegen-verifiers.sh b/compiler/integration-tests/scripts/codegen-verifiers.sh index bec59eb6889..de1f71a4cc0 100755 --- a/compiler/integration-tests/scripts/codegen-verifiers.sh +++ b/compiler/integration-tests/scripts/codegen-verifiers.sh @@ -17,8 +17,8 @@ KEYS=$(mktemp -d) # Codegen verifier contract for 1_mul mul_dir=$repo_root/test_programs/execution_success/1_mul nargo --program-dir $mul_dir compile -$NARGO_BACKEND_PATH write_vk -b $mul_dir/target/1_mul.json -o $KEYS/1_mul -$NARGO_BACKEND_PATH contract -k $KEYS/1_mul -o $contracts_dir/1_mul.sol +$NARGO_BACKEND_PATH write_vk -b $mul_dir/target/1_mul.json -o $KEYS/1_mul +$NARGO_BACKEND_PATH contract -k $KEYS/1_mul -o $contracts_dir/1_mul.sol # Codegen verifier contract for assert_statement assert_statement_dir=$repo_root/test_programs/execution_success/assert_statement diff --git a/compiler/integration-tests/test/browser/recursion.test.ts b/compiler/integration-tests/test/browser/recursion.test.ts index 4ee92d5b795..71d6697f843 100644 --- a/compiler/integration-tests/test/browser/recursion.test.ts +++ b/compiler/integration-tests/test/browser/recursion.test.ts @@ -19,7 +19,7 @@ await newABICoder(); await initACVM(); const base_relative_path = '../../../../..'; -const circuit_main = 'test_programs/execution_success/assert_statement_recursive'; +const circuit_main = 'test_programs/execution_success/assert_statement'; const circuit_recursion = 'compiler/integration-tests/circuits/recursion'; async function getCircuit(projectPath: string) { @@ -45,7 +45,7 @@ describe('It compiles noir program code, receiving circuit bytes and abi object. const main_program = await getCircuit(`${base_relative_path}/${circuit_main}`); const main_inputs: InputMap = TOML.parse(circuit_main_toml) as InputMap; - const main_backend = new UltraPlonkBackend(main_program.bytecode); + const main_backend = new UltraPlonkBackend(main_program.bytecode, {}, { recursive: true }); const { witness: main_witnessUint8Array } = await new Noir(main_program).execute(main_inputs); @@ -73,7 +73,7 @@ describe('It compiles noir program code, receiving circuit bytes and abi object. const recursion_program = await getCircuit(`${base_relative_path}/${circuit_recursion}`); - const recursion_backend = new UltraPlonkBackend(recursion_program.bytecode); + const recursion_backend = new UltraPlonkBackend(recursion_program.bytecode, {}, { recursive: false }); const { witness: recursion_witnessUint8Array } = await new Noir(recursion_program).execute(recursion_inputs); diff --git a/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts b/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts index b08d52e1604..1694da28805 100644 --- a/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts +++ b/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts @@ -17,7 +17,7 @@ it.skip(`smart contract can verify a recursive proof`, async () => { const fm = createFileManager(basePath); const innerCompilationResult = await compile( fm, - join(basePath, './test_programs/execution_success/assert_statement_recursive'), + join(basePath, './test_programs/execution_success/assert_statement'), ); if (!('program' in innerCompilationResult)) { throw new Error('Compilation failed'); @@ -35,11 +35,11 @@ it.skip(`smart contract can verify a recursive proof`, async () => { // Intermediate proof - const inner_backend = new UltraPlonkBackend(innerProgram.bytecode); + const inner_backend = new UltraPlonkBackend(innerProgram.bytecode, {}, { recursive: true }); const inner = new Noir(innerProgram); const inner_prover_toml = readFileSync( - join(basePath, `./test_programs/execution_success/assert_statement_recursive/Prover.toml`), + join(basePath, `./test_programs/execution_success/assert_statement/Prover.toml`), ).toString(); const inner_inputs = toml.parse(inner_prover_toml); @@ -67,7 +67,7 @@ it.skip(`smart contract can verify a recursive proof`, async () => { const { witness: recursionWitness } = await recursion.execute(recursion_inputs); - const recursion_backend = new UltraPlonkBackend(recursionProgram.bytecode); + const recursion_backend = new UltraPlonkBackend(recursionProgram.bytecode, {}, { recursive: false }); const recursion_proof = await recursion_backend.generateProof(recursionWitness); expect(await recursion_backend.verifyProof(recursion_proof)).to.be.true; diff --git a/compiler/integration-tests/test/node/smart_contract_verifier.test.ts b/compiler/integration-tests/test/node/smart_contract_verifier.test.ts index c43fba01424..74a36262829 100644 --- a/compiler/integration-tests/test/node/smart_contract_verifier.test.ts +++ b/compiler/integration-tests/test/node/smart_contract_verifier.test.ts @@ -46,7 +46,7 @@ test_cases.forEach((testInfo) => { const inputs = toml.parse(prover_toml); const { witness } = await program.execute(inputs); - const backend = new UltraPlonkBackend(noir_program.bytecode); + const backend = new UltraPlonkBackend(noir_program.bytecode, {}, { recursive: false }); const proofData = await backend.generateProof(witness); // JS verification diff --git a/compiler/noirc_driver/src/abi_gen.rs b/compiler/noirc_driver/src/abi_gen.rs index 4cd480b883d..625a35c8d15 100644 --- a/compiler/noirc_driver/src/abi_gen.rs +++ b/compiler/noirc_driver/src/abi_gen.rs @@ -7,6 +7,7 @@ use noirc_abi::{ Abi, AbiErrorType, AbiParameter, AbiReturnType, AbiType, AbiValue, AbiVisibility, Sign, }; use noirc_errors::Span; +use noirc_evaluator::ErrorType; use noirc_frontend::ast::{Signedness, Visibility}; use noirc_frontend::TypeBinding; use noirc_frontend::{ @@ -26,7 +27,7 @@ pub(super) fn gen_abi( context: &Context, func_id: &FuncId, return_visibility: Visibility, - error_types: BTreeMap, + error_types: BTreeMap, ) -> Abi { let (parameters, return_type) = compute_function_abi(context, func_id); let return_type = return_type.map(|typ| AbiReturnType { @@ -35,7 +36,7 @@ pub(super) fn gen_abi( }); let error_types = error_types .into_iter() - .map(|(selector, typ)| (selector, build_abi_error_type(context, &typ))) + .map(|(selector, typ)| (selector, build_abi_error_type(context, typ))) .collect(); Abi { parameters, return_type, error_types } } @@ -49,19 +50,23 @@ fn get_main_function_span(context: &Context) -> Span { } } -fn build_abi_error_type(context: &Context, typ: &Type) -> AbiErrorType { +fn build_abi_error_type(context: &Context, typ: ErrorType) -> AbiErrorType { match typ { - Type::FmtString(len, item_types) => { - let span = get_main_function_span(context); - let length = len.evaluate_to_u32(span).expect("Cannot evaluate fmt length"); - let Type::Tuple(item_types) = item_types.as_ref() else { - unreachable!("FmtString items must be a tuple") - }; - let item_types = - item_types.iter().map(|typ| abi_type_from_hir_type(context, typ)).collect(); - AbiErrorType::FmtString { length, item_types } + ErrorType::Dynamic(typ) => { + if let Type::FmtString(len, item_types) = typ { + let span = get_main_function_span(context); + let length = len.evaluate_to_u32(span).expect("Cannot evaluate fmt length"); + let Type::Tuple(item_types) = item_types.as_ref() else { + unreachable!("FmtString items must be a tuple") + }; + let item_types = + item_types.iter().map(|typ| abi_type_from_hir_type(context, typ)).collect(); + AbiErrorType::FmtString { length, item_types } + } else { + AbiErrorType::Custom(abi_type_from_hir_type(context, &typ)) + } } - _ => AbiErrorType::Custom(abi_type_from_hir_type(context, typ)), + ErrorType::String(string) => AbiErrorType::String { string }, } } diff --git a/compiler/noirc_evaluator/Cargo.toml b/compiler/noirc_evaluator/Cargo.toml index aac07339dd6..e25b5bf855a 100644 --- a/compiler/noirc_evaluator/Cargo.toml +++ b/compiler/noirc_evaluator/Cargo.toml @@ -20,7 +20,6 @@ fxhash.workspace = true iter-extended.workspace = true thiserror.workspace = true num-bigint = "0.4" -num-traits.workspace = true im.workspace = true serde.workspace = true serde_json.workspace = true @@ -33,6 +32,7 @@ cfg-if.workspace = true [dev-dependencies] proptest.workspace = true similar-asserts.workspace = true +num-traits.workspace = true [features] bn254 = ["noirc_frontend/bn254"] diff --git a/compiler/noirc_evaluator/src/acir/acir_variable.rs b/compiler/noirc_evaluator/src/acir/acir_variable.rs index 6aa8bc32975..6ba072f01a4 100644 --- a/compiler/noirc_evaluator/src/acir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/acir/acir_variable.rs @@ -1112,10 +1112,10 @@ impl> AcirContext { let witness = self.var_to_witness(witness_var)?; self.acir_ir.range_constraint(witness, *bit_size)?; if let Some(message) = message { - self.acir_ir.assertion_payloads.insert( - self.acir_ir.last_acir_opcode_location(), - AssertionPayload::StaticString(message.clone()), - ); + let payload = self.generate_assertion_message_payload(message.clone()); + self.acir_ir + .assertion_payloads + .insert(self.acir_ir.last_acir_opcode_location(), payload); } } NumericType::NativeField => { @@ -2072,6 +2072,13 @@ impl> AcirContext { self.acir_ir.push_opcode(Opcode::Call { id, inputs, outputs, predicate }); Ok(results) } + + pub(crate) fn generate_assertion_message_payload( + &mut self, + message: String, + ) -> AssertionPayload { + self.acir_ir.generate_assertion_message_payload(message) + } } /// Enum representing the possible values that a diff --git a/compiler/noirc_evaluator/src/acir/brillig_directive.rs b/compiler/noirc_evaluator/src/acir/brillig_directive.rs index 49e259e0ce5..89fc7f1eda5 100644 --- a/compiler/noirc_evaluator/src/acir/brillig_directive.rs +++ b/compiler/noirc_evaluator/src/acir/brillig_directive.rs @@ -1,5 +1,8 @@ use acvm::acir::{ - brillig::{BinaryFieldOp, BitSize, IntegerBitSize, MemoryAddress, Opcode as BrilligOpcode}, + brillig::{ + BinaryFieldOp, BinaryIntOp, BitSize, HeapVector, IntegerBitSize, MemoryAddress, + Opcode as BrilligOpcode, + }, AcirField, }; @@ -18,25 +21,27 @@ pub(crate) fn directive_invert() -> GeneratedBrillig { let one_const = MemoryAddress::direct(1); let zero_const = MemoryAddress::direct(2); let input_is_zero = MemoryAddress::direct(3); + let zero_usize = MemoryAddress::direct(20); + let one_usize = MemoryAddress::direct(21); // Location of the stop opcode let stop_location = 8; GeneratedBrillig { byte_code: vec![ BrilligOpcode::Const { - destination: MemoryAddress::direct(20), + destination: one_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: F::from(1_usize), }, BrilligOpcode::Const { - destination: MemoryAddress::direct(21), + destination: zero_usize, bit_size: BitSize::Integer(IntegerBitSize::U32), value: F::from(0_usize), }, BrilligOpcode::CalldataCopy { destination_address: input, - size_address: MemoryAddress::direct(20), - offset_address: MemoryAddress::direct(21), + size_address: one_usize, + offset_address: zero_usize, }, // Put value zero in register (2) BrilligOpcode::Const { @@ -65,7 +70,9 @@ pub(crate) fn directive_invert() -> GeneratedBrillig { rhs: input, destination: input, }, - BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 1 }, + BrilligOpcode::Stop { + return_data: HeapVector { pointer: zero_usize, size: one_usize }, + }, ], name: "directive_invert".to_string(), ..Default::default() @@ -129,9 +136,141 @@ pub(crate) fn directive_quotient() -> GeneratedBrillig { destination: MemoryAddress::direct(0), source: MemoryAddress::direct(2), }, - BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 2 }, + BrilligOpcode::Stop { + return_data: HeapVector { + pointer: MemoryAddress::direct(11), + size: MemoryAddress::direct(10), + }, + }, ], name: "directive_integer_quotient".to_string(), ..Default::default() } } + +/// Generates brillig bytecode which performs a radix-base decomposition of `a` +/// The brillig inputs are 'a', the numbers of limbs and the radix +pub(crate) fn directive_to_radix() -> GeneratedBrillig { + let memory_adr_int_size = IntegerBitSize::U32; + let memory_adr_size = BitSize::Integer(memory_adr_int_size); + + // (0) is the input field `a` to decompose + // (1) contains the number of limbs (second input) + let limbs_nb = MemoryAddress::direct(1); + // (2) contains the radix (third input) + let radix = MemoryAddress::direct(2); + // (3) and (4) are intermediate registers + // (5,6,7) are constants: 0,1,3 + let zero = MemoryAddress::direct(5); + let one = MemoryAddress::direct(6); + let three = MemoryAddress::direct(7); + // (7) is the iteration bound, it is the same register as three because the latter is only used at the start + let bound = MemoryAddress::direct(7); + // (8) is the register for storing the loop condition + let cond = MemoryAddress::direct(8); + // (9) is the pointer to the result array + let result_pointer = MemoryAddress::direct(9); + // address of the result array + let result_base_adr = 10_usize; + + let result_vector = HeapVector { pointer: result_pointer, size: limbs_nb }; + + let byte_code = vec![ + // Initialize registers + // Constants + // Zero + BrilligOpcode::Const { destination: zero, bit_size: memory_adr_size, value: F::zero() }, + // One + BrilligOpcode::Const { + destination: one, + bit_size: memory_adr_size, + value: F::from(1_usize), + }, + // Three + BrilligOpcode::Const { + destination: three, + bit_size: memory_adr_size, + value: F::from(3_usize), + }, + // Brillig Inputs + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress::direct(0), + size_address: three, + offset_address: zero, + }, + // The number of limbs needs to be an integer + BrilligOpcode::Cast { destination: limbs_nb, source: limbs_nb, bit_size: memory_adr_size }, + // Result_pointer starts at the base address + BrilligOpcode::Const { + destination: result_pointer, + bit_size: memory_adr_size, + value: F::from(result_base_adr), + }, + // Loop bound + BrilligOpcode::BinaryIntOp { + destination: bound, + op: BinaryIntOp::Add, + bit_size: memory_adr_int_size, + lhs: result_pointer, + rhs: limbs_nb, + }, + // loop label: (3) = a / radix (integer division) + BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::IntegerDiv, + lhs: MemoryAddress::direct(0), + rhs: radix, + destination: MemoryAddress::direct(3), + }, + //(4) = (3)*256 + BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::Mul, + lhs: MemoryAddress::direct(3), + rhs: radix, + destination: MemoryAddress::direct(4), + }, + //(4) = a-(3)*256 (remainder) + BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::Sub, + lhs: MemoryAddress::direct(0), + rhs: MemoryAddress::direct(4), + destination: MemoryAddress::direct(4), + }, + // Store the remainder in the result array + BrilligOpcode::Store { + destination_pointer: result_pointer, + source: MemoryAddress::direct(4), + }, + // Increment the result pointer + BrilligOpcode::BinaryIntOp { + op: BinaryIntOp::Add, + lhs: result_pointer, + rhs: one, + destination: result_pointer, + bit_size: memory_adr_int_size, + }, + //a := quotient + BrilligOpcode::Mov { + destination: MemoryAddress::direct(0), + source: MemoryAddress::direct(3), + }, + // loop condition + BrilligOpcode::BinaryIntOp { + op: BinaryIntOp::LessThan, + lhs: result_pointer, + rhs: bound, + destination: cond, + bit_size: memory_adr_int_size, + }, + // loop back + BrilligOpcode::JumpIf { condition: cond, location: 7 }, + // reset result pointer to the start of the array + BrilligOpcode::Const { + destination: result_pointer, + bit_size: memory_adr_size, + value: F::from(result_base_adr), + }, + BrilligOpcode::Stop { return_data: result_vector }, + ]; + + GeneratedBrillig { byte_code, name: "directive_to_radix".to_string(), ..Default::default() } +} diff --git a/compiler/noirc_evaluator/src/acir/generated_acir.rs b/compiler/noirc_evaluator/src/acir/generated_acir.rs index 09653d5b73b..91206abe732 100644 --- a/compiler/noirc_evaluator/src/acir/generated_acir.rs +++ b/compiler/noirc_evaluator/src/acir/generated_acir.rs @@ -6,14 +6,10 @@ use acvm::acir::{ circuit::{ brillig::{BrilligFunctionId, BrilligInputs, BrilligOutputs}, opcodes::{BlackBoxFuncCall, FunctionInput, Opcode as AcirOpcode}, - AssertionPayload, BrilligOpcodeLocation, OpcodeLocation, + AssertionPayload, BrilligOpcodeLocation, ErrorSelector, OpcodeLocation, }, - native_types::Witness, - BlackBoxFunc, -}; -use acvm::{ - acir::AcirField, - acir::{circuit::directives::Directive, native_types::Expression}, + native_types::{Expression, Witness}, + AcirField, BlackBoxFunc, }; use super::brillig_directive; @@ -21,6 +17,7 @@ use crate::{ brillig::brillig_ir::artifact::GeneratedBrillig, errors::{InternalError, RuntimeError, SsaReport}, ssa::ir::dfg::CallStack, + ErrorType, }; use iter_extended::vecmap; @@ -66,6 +63,9 @@ pub(crate) struct GeneratedAcir { /// Correspondence between an opcode index and the error message associated with it. pub(crate) assertion_payloads: BTreeMap>, + /// Correspondence between error selectors and types associated with them. + pub(crate) error_types: BTreeMap, + pub(crate) warnings: Vec, /// Name for the corresponding entry point represented by this Acir-gen output. @@ -94,6 +94,7 @@ pub(crate) type BrilligProcedureRangeMap = BTreeMap brillig_directive::directive_invert(), BrilligStdlibFunc::Quotient => brillig_directive::directive_quotient(), + BrilligStdlibFunc::ToLeBytes => brillig_directive::directive_to_radix(), } } } @@ -379,12 +381,7 @@ impl GeneratedAcir { "ICE: Radix must be a power of 2" ); - let limb_witnesses = vecmap(0..limb_count, |_| self.next_witness_index()); - self.push_opcode(AcirOpcode::Directive(Directive::ToLeRadix { - a: input_expr.clone(), - b: limb_witnesses.clone(), - radix, - })); + let limb_witnesses = self.brillig_to_radix(input_expr, radix, limb_count); let mut composed_limbs = Expression::default(); @@ -405,6 +402,54 @@ impl GeneratedAcir { Ok(limb_witnesses) } + /// Adds brillig opcode for to_radix + /// + /// This code will decompose `expr` in a radix-base + /// and return `Witnesses` which may (or not, because it does not apply constraints) + /// be limbs resulting from the decomposition. + /// + /// Safety: It is the callers responsibility to ensure that the + /// resulting `Witnesses` are properly constrained. + pub(crate) fn brillig_to_radix( + &mut self, + expr: &Expression, + radix: u32, + limb_count: u32, + ) -> Vec { + // Create the witness for the result + let limb_witnesses = vecmap(0..limb_count, |_| self.next_witness_index()); + + // Get the decomposition brillig code + let le_bytes_code = brillig_directive::directive_to_radix(); + // Prepare the inputs/outputs + let limbs_nb = Expression { + mul_terms: Vec::new(), + linear_combinations: Vec::new(), + q_c: F::from(limb_count as u128), + }; + let radix_expr = Expression { + mul_terms: Vec::new(), + linear_combinations: Vec::new(), + q_c: F::from(radix as u128), + }; + let inputs = vec![ + BrilligInputs::Single(expr.clone()), + BrilligInputs::Single(limbs_nb), + BrilligInputs::Single(radix_expr), + ]; + let outputs = vec![BrilligOutputs::Array(limb_witnesses.clone())]; + + self.brillig_call( + None, + &le_bytes_code, + inputs, + outputs, + PLACEHOLDER_BRILLIG_INDEX, + Some(BrilligStdlibFunc::ToLeBytes), + ); + limb_witnesses + } + /// Adds an inversion brillig opcode. /// /// This code will invert `expr` without applying constraints @@ -587,15 +632,8 @@ impl GeneratedAcir { return; } - // TODO(https://github.com/noir-lang/noir/issues/5792) - for (brillig_index, message) in generated_brillig.assert_messages.iter() { - self.assertion_payloads.insert( - OpcodeLocation::Brillig { - acir_index: self.opcodes.len() - 1, - brillig_index: *brillig_index, - }, - AssertionPayload::StaticString(message.clone()), - ); + for (error_selector, error_type) in generated_brillig.error_types.iter() { + self.record_error_type(*error_selector, error_type.clone()); } if inserted_func_before { @@ -638,6 +676,20 @@ impl GeneratedAcir { pub(crate) fn last_acir_opcode_location(&self) -> OpcodeLocation { OpcodeLocation::Acir(self.opcodes.len() - 1) } + + pub(crate) fn record_error_type(&mut self, selector: ErrorSelector, typ: ErrorType) { + self.error_types.insert(selector, typ); + } + + pub(crate) fn generate_assertion_message_payload( + &mut self, + message: String, + ) -> AssertionPayload { + let error_type = ErrorType::String(message); + let error_selector = error_type.selector(); + self.record_error_type(error_selector, error_type); + AssertionPayload { error_selector: error_selector.as_u64(), payload: Vec::new() } + } } /// This function will return the number of inputs that a blackbox function diff --git a/compiler/noirc_evaluator/src/acir/mod.rs b/compiler/noirc_evaluator/src/acir/mod.rs index 16b07b40863..5c7899b5035 100644 --- a/compiler/noirc_evaluator/src/acir/mod.rs +++ b/compiler/noirc_evaluator/src/acir/mod.rs @@ -39,7 +39,7 @@ use crate::ssa::{ dfg::{CallStack, DataFlowGraph}, function::{Function, FunctionId, RuntimeType}, instruction::{ - Binary, BinaryOp, ConstrainError, ErrorType, Instruction, InstructionId, Intrinsic, + Binary, BinaryOp, ConstrainError, Instruction, InstructionId, Intrinsic, TerminatorInstruction, }, map::Id, @@ -52,6 +52,7 @@ use crate::ssa::{ use acir_variable::{AcirContext, AcirType, AcirVar}; use generated_acir::BrilligStdlibFunc; pub(crate) use generated_acir::GeneratedAcir; +use noirc_frontend::hir_def::types::Type as HirType; #[derive(Default)] struct SharedContext { @@ -295,7 +296,7 @@ pub(crate) type Artifacts = ( Vec>, Vec>, Vec, - BTreeMap, + BTreeMap, ); impl Ssa { @@ -308,6 +309,7 @@ impl Ssa { let mut acirs = Vec::new(); // TODO: can we parallelize this? let mut shared_context = SharedContext::default(); + for function in self.functions.values() { let context = Context::new(&mut shared_context, expression_width); if let Some(mut generated_acir) = @@ -687,16 +689,19 @@ impl<'a> Context<'a> { let assert_payload = if let Some(error) = assert_message { match error { - ConstrainError::StaticString(string) => { - Some(AssertionPayload::StaticString(string.clone())) - } - ConstrainError::Dynamic(error_selector, values) => { + ConstrainError::StaticString(string) => Some( + self.acir_context.generate_assertion_message_payload(string.clone()), + ), + ConstrainError::Dynamic(error_selector, is_string_type, values) => { if let Some(constant_string) = try_to_extract_string_from_error_payload( - *error_selector, + *is_string_type, values, dfg, ) { - Some(AssertionPayload::StaticString(constant_string)) + Some( + self.acir_context + .generate_assertion_message_payload(constant_string), + ) } else { let acir_vars: Vec<_> = values .iter() @@ -706,10 +711,10 @@ impl<'a> Context<'a> { let expressions_or_memory = self.acir_context.vars_to_expressions_or_memory(&acir_vars)?; - Some(AssertionPayload::Dynamic( - error_selector.as_u64(), - expressions_or_memory, - )) + Some(AssertionPayload { + error_selector: error_selector.as_u64(), + payload: expressions_or_memory, + }) } } } @@ -1010,7 +1015,7 @@ impl<'a> Context<'a> { // Link the entry point with all dependencies while let Some(unresolved_fn_label) = entry_point.first_unresolved_function_call() { - let artifact = &brillig.find_by_label(unresolved_fn_label); + let artifact = &brillig.find_by_label(unresolved_fn_label.clone()); let artifact = match artifact { Some(artifact) => artifact, None => { @@ -1022,13 +1027,13 @@ impl<'a> Context<'a> { }; entry_point.link_with(artifact); // Insert the range of opcode locations occupied by a procedure - if let Some(procedure_id) = artifact.procedure { + if let Some(procedure_id) = &artifact.procedure { let num_opcodes = entry_point.byte_code.len(); let previous_num_opcodes = entry_point.byte_code.len() - artifact.byte_code.len(); // We subtract one as to keep the range inclusive on both ends entry_point .procedure_locations - .insert(procedure_id, (previous_num_opcodes, num_opcodes - 1)); + .insert(procedure_id.clone(), (previous_num_opcodes, num_opcodes - 1)); } } // Generate the final bytecode diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 9661406abee..36e1ee90e11 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -256,7 +256,7 @@ impl<'block> BrilligBlock<'block> { }; match assert_message { - Some(ConstrainError::Dynamic(selector, values)) => { + Some(ConstrainError::Dynamic(selector, _, values)) => { let payload_values = vecmap(values, |value| self.convert_ssa_value(*value, dfg)); let payload_as_params = vecmap(values, |value| { @@ -267,7 +267,7 @@ impl<'block> BrilligBlock<'block> { condition, payload_values, payload_as_params, - selector.as_u64(), + *selector, ); } Some(ConstrainError::StaticString(message)) => { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs index 66d51b2accc..b4e10035af6 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs @@ -378,12 +378,12 @@ pub(crate) mod tests { // We push a JumpIf and Trap opcode directly as the constrain instruction // uses unresolved jumps which requires a block to be constructed in SSA and // we don't need this for Brillig IR tests - context.push_opcode(BrilligOpcode::JumpIf { condition: r_equality, location: 9 }); context.push_opcode(BrilligOpcode::Const { destination: MemoryAddress::direct(0), bit_size: BitSize::Integer(IntegerBitSize::U32), value: FieldElement::from(0u64), }); + context.push_opcode(BrilligOpcode::JumpIf { condition: r_equality, location: 9 }); context.push_opcode(BrilligOpcode::Trap { revert_data: HeapVector { pointer: MemoryAddress::direct(0), @@ -391,7 +391,10 @@ pub(crate) mod tests { }, }); - context.stop_instruction(); + context.stop_instruction(HeapVector { + pointer: MemoryAddress::direct(0), + size: MemoryAddress::direct(0), + }); let bytecode: Vec> = context.artifact().finish().byte_code; let number_sequence: Vec = diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs index 276456fc40d..0e5b72c0b85 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs @@ -1,7 +1,9 @@ use acvm::acir::brillig::Opcode as BrilligOpcode; +use acvm::acir::circuit::ErrorSelector; use std::collections::{BTreeMap, HashMap}; use crate::ssa::ir::{basic_block::BasicBlockId, dfg::CallStack, function::FunctionId}; +use crate::ErrorType; use super::procedures::ProcedureId; @@ -23,7 +25,7 @@ pub(crate) enum BrilligParameter { pub(crate) struct GeneratedBrillig { pub(crate) byte_code: Vec>, pub(crate) locations: BTreeMap, - pub(crate) assert_messages: BTreeMap, + pub(crate) error_types: BTreeMap, pub(crate) name: String, pub(crate) procedure_locations: HashMap, } @@ -33,10 +35,7 @@ pub(crate) struct GeneratedBrillig { /// It includes the bytecode of the function and all the metadata that allows linking with other functions. pub(crate) struct BrilligArtifact { pub(crate) byte_code: Vec>, - /// A map of bytecode positions to assertion messages. - /// Some error messages (compiler intrinsics) are not emitted via revert data, - /// instead, they are handled externally so they don't add size to user programs. - pub(crate) assert_messages: BTreeMap, + pub(crate) error_types: BTreeMap, /// The set of jumps that need to have their locations /// resolved. unresolved_jumps: Vec<(JumpInstructionPosition, UnresolvedJumpLocation)>, @@ -68,7 +67,7 @@ pub(crate) struct BrilligArtifact { /// A pointer to a location in the opcode. pub(crate) type OpcodeLocation = usize; -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Eq, PartialEq, Hash)] pub(crate) enum LabelType { /// Labels for the entry point bytecode Entrypoint, @@ -98,7 +97,7 @@ impl std::fmt::Display for LabelType { /// /// It is assumed that an entity will keep a map /// of labels to Opcode locations. -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Eq, PartialEq, Hash)] pub(crate) struct Label { pub(crate) label_type: LabelType, pub(crate) section: Option, @@ -106,7 +105,7 @@ pub(crate) struct Label { impl Label { pub(crate) fn add_section(&self, section: usize) -> Self { - Label { label_type: self.label_type, section: Some(section) } + Label { label_type: self.label_type.clone(), section: Some(section) } } pub(crate) fn function(func_id: FunctionId) -> Self { @@ -157,7 +156,7 @@ impl BrilligArtifact { GeneratedBrillig { byte_code: self.byte_code, locations: self.locations, - assert_messages: self.assert_messages, + error_types: self.error_types, name: self.name, procedure_locations: self.procedure_locations, } @@ -165,7 +164,7 @@ impl BrilligArtifact { /// Gets the first unresolved function call of this artifact. pub(crate) fn first_unresolved_function_call(&self) -> Option