Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Sync from noir #10660

Merged
merged 7 commits into from
Dec 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .noir-sync-commit
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1b0dd4149d9249f0ea4fb5e2228c688e0135618f
b88db67a4fa92f861329105fb732a7b1309620fe
22 changes: 11 additions & 11 deletions noir/noir-repo/.github/workflows/test-js-packages.yml
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ jobs:
- name: Build list of libraries
id: get_critical_libraries
run: |
LIBRARIES=$(grep -Po "^https://github.com/\K.+" ./CRITICAL_NOIR_LIBRARIES | jq -R -s -c 'split("\n") | map(select(. != "")) | map({ repo: ., path: "./"})')
LIBRARIES=$(grep -Po "^https://github.com/\K.+" ./CRITICAL_NOIR_LIBRARIES | jq -R -s -c 'split("\n") | map(select(. != "")) | map({ repo: ., path: ""})')
echo "libraries=$LIBRARIES"
echo "libraries=$LIBRARIES" >> $GITHUB_OUTPUT
env:
Expand All @@ -551,15 +551,15 @@ jobs:
matrix:
project: ${{ fromJson( needs.critical-library-list.outputs.libraries )}}
include:
- project: { repo: AztecProtocol/aztec-packages, path: ./noir-projects/aztec-nr }
- project: { repo: AztecProtocol/aztec-packages, path: ./noir-projects/noir-contracts }
- project: { repo: AztecProtocol/aztec-packages, path: ./noir-projects/noir-protocol-circuits/crates/parity-lib }
- project: { repo: AztecProtocol/aztec-packages, path: ./noir-projects/noir-protocol-circuits/crates/private-kernel-lib }
- project: { repo: AztecProtocol/aztec-packages, path: ./noir-projects/noir-protocol-circuits/crates/reset-kernel-lib }
- project: { repo: AztecProtocol/aztec-packages, path: ./noir-projects/noir-protocol-circuits/crates/rollup-lib }
- project: { repo: AztecProtocol/aztec-packages, path: ./noir-projects/noir-protocol-circuits/crates/types }

name: Check external repo - ${{ matrix.project.repo }}
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/aztec-nr }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-contracts }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/parity-lib }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/private-kernel-lib }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/reset-kernel-lib }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-lib }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/types }

name: Check external repo - ${{ matrix.project.repo }}/${{ matrix.project.path }}
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down Expand Up @@ -591,7 +591,7 @@ jobs:

- name: Run nargo test
working-directory: ./test-repo/${{ matrix.project.path }}
run: nargo test --silence-warnings
run: nargo test -q --silence-warnings
env:
NARGO_IGNORE_TEST_FAILURES_FROM_FOREIGN_CALLS: true

Expand Down
9 changes: 5 additions & 4 deletions noir/noir-repo/acvm-repo/acvm/src/compiler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,15 +82,16 @@ pub fn compile<F: AcirField>(
acir: Circuit<F>,
expression_width: ExpressionWidth,
) -> (Circuit<F>, AcirTransformationMap) {
let initial_opcode_positions = (0..acir.opcodes.len()).collect::<Vec<_>>();
let acir_opcode_positions = (0..acir.opcodes.len()).collect::<Vec<_>>();

if MAX_OPTIMIZER_PASSES == 0 {
let transformation_map = AcirTransformationMap::new(&initial_opcode_positions);
return (acir, transformation_map);
return (acir, AcirTransformationMap::new(&acir_opcode_positions));
}

let mut pass = 0;
let mut prev_opcodes_hash = fxhash::hash64(&acir.opcodes);
let mut prev_acir = acir;
let mut prev_acir_opcode_positions = initial_opcode_positions;
let mut prev_acir_opcode_positions = acir_opcode_positions;

TomAFrench marked this conversation as resolved.
Show resolved Hide resolved
// For most test programs it would be enough to only loop `transform_internal`,
// but some of them don't stabilize unless we also repeat the backend agnostic optimizations.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ pub fn optimize<F: AcirField>(acir: Circuit<F>) -> (Circuit<F>, AcirTransformati
// Track original acir opcode positions throughout the transformation passes of the compilation
// by applying the modifications done to the circuit opcodes and also to the opcode_positions (delete and insert)
let acir_opcode_positions = (0..acir.opcodes.len()).collect();

let (mut acir, new_opcode_positions) = optimize_internal(acir, acir_opcode_positions);

let transformation_map = AcirTransformationMap::new(&new_opcode_positions);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,10 @@ impl Binary {
if rhs_is_zero {
return SimplifyResult::SimplifiedTo(self.lhs);
}
if operand_type == NumericType::bool() && (lhs_is_one || rhs_is_one) {
let one = dfg.make_constant(FieldElement::one(), operand_type);
return SimplifyResult::SimplifiedTo(one);
}
if dfg.resolve(self.lhs) == dfg.resolve(self.rhs) {
return SimplifyResult::SimplifiedTo(self.lhs);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -480,20 +480,14 @@ fn simplify_slice_push_back(
}

fn simplify_slice_pop_back(
element_type: Type,
slice_type: Type,
arguments: &[ValueId],
dfg: &mut DataFlowGraph,
block: BasicBlockId,
call_stack: CallStack,
) -> SimplifyResult {
let element_types = match element_type.clone() {
Type::Slice(element_types) | Type::Array(element_types, _) => element_types,
_ => {
unreachable!("ICE: Expected slice or array, but got {element_type}");
}
};

let element_count = element_type.element_size();
let element_types = slice_type.element_types();
let element_count = element_types.len();
let mut results = VecDeque::with_capacity(element_count + 1);

let new_slice_length = update_slice_length(arguments[0], dfg, BinaryOp::Sub, block);
Expand All @@ -507,14 +501,17 @@ fn simplify_slice_pop_back(
flattened_len = update_slice_length(flattened_len, dfg, BinaryOp::Sub, block);

// We must pop multiple elements in the case of a slice of tuples
for _ in 0..element_count {
// Iterating through element types in reverse here since we're popping from the end
for element_type in element_types.iter().rev() {
let get_last_elem_instr =
Instruction::ArrayGet { array: arguments[1], index: flattened_len };

let element_type = Some(vec![element_type.clone()]);
let get_last_elem = dfg
.insert_instruction_and_results(
get_last_elem_instr,
block,
Some(element_types.to_vec()),
element_type,
call_stack.clone(),
)
.first();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,11 @@ pub(super) fn simplify_msm(
var_scalars.push(zero);
let result_x = dfg.make_constant(result_x, NumericType::NativeField);
let result_y = dfg.make_constant(result_y, NumericType::NativeField);

// Pushing a bool here is intentional, multi_scalar_mul takes two arguments:
// `points: [(Field, Field, bool); N]` and `scalars: [(Field, Field); N]`.
let result_is_infinity = dfg.make_constant(result_is_infinity, NumericType::bool());

var_points.push(result_x);
var_points.push(result_y);
var_points.push(result_is_infinity);
Expand Down
117 changes: 110 additions & 7 deletions noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use crate::ssa::{
ir::{
basic_block::BasicBlockId,
function::{Function, RuntimeType},
function::Function,
function_inserter::FunctionInserter,
instruction::{Instruction, InstructionId},
types::Type,
Expand All @@ -27,12 +27,7 @@ use super::unrolling::{Loop, Loops};
impl Ssa {
#[tracing::instrument(level = "trace", skip(self))]
pub(crate) fn loop_invariant_code_motion(mut self) -> Ssa {
let brillig_functions = self
.functions
.iter_mut()
.filter(|(_, func)| matches!(func.runtime(), RuntimeType::Brillig(_)));

for (_, function) in brillig_functions {
for function in self.functions.values_mut() {
function.loop_invariant_code_motion();
}

Expand Down Expand Up @@ -63,6 +58,7 @@ impl Loops {
}

context.map_dependent_instructions();
context.inserter.map_data_bus_in_place();
}
}

Expand Down Expand Up @@ -113,6 +109,22 @@ impl<'f> LoopInvariantContext<'f> {

if hoist_invariant {
self.inserter.push_instruction(instruction_id, pre_header);

// If we are hoisting a MakeArray instruction,
// we need to issue an extra inc_rc in case they are mutated afterward.
if matches!(
self.inserter.function.dfg[instruction_id],
Instruction::MakeArray { .. }
) {
let result =
self.inserter.function.dfg.instruction_results(instruction_id)[0];
let inc_rc = Instruction::IncrementRc { value: result };
let call_stack = self.inserter.function.dfg.get_call_stack(instruction_id);
self.inserter
.function
.dfg
.insert_instruction_and_results(inc_rc, *block, None, call_stack);
}
} else {
self.inserter.push_instruction(instruction_id, *block);
}
Expand Down Expand Up @@ -190,6 +202,7 @@ impl<'f> LoopInvariantContext<'f> {
});

let can_be_deduplicated = instruction.can_be_deduplicated(self.inserter.function, false)
|| matches!(instruction, Instruction::MakeArray { .. })
|| self.can_be_deduplicated_from_upper_bound(&instruction);

is_loop_invariant && can_be_deduplicated
Expand Down Expand Up @@ -559,4 +572,94 @@ mod test {
let ssa = ssa.loop_invariant_code_motion();
assert_normalized_ssa_equals(ssa, expected);
}

#[test]
fn insert_inc_rc_when_moving_make_array() {
// SSA for the following program:
//
// unconstrained fn main(x: u32, y: u32) {
// let mut a1 = [1, 2, 3, 4, 5];
// a1[x] = 64;
// for i in 0 .. 5 {
// let mut a2 = [1, 2, 3, 4, 5];
// a2[y + i] = 128;
// foo(a2);
// }
// foo(a1);
// }
//
// We want to make sure move a loop invariant make_array instruction,
// to account for whether that array has been marked as mutable.
// To do so, we increment the reference counter on the array we are moving.
// In the SSA below, we want to move `v42` out of the loop.
let src = "
brillig(inline) fn main f0 {
b0(v0: u32, v1: u32):
v8 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
v9 = allocate -> &mut [Field; 5]
v11 = array_set v8, index v0, value Field 64
v13 = add v0, u32 1
store v11 at v9
jmp b1(u32 0)
b1(v2: u32):
v16 = lt v2, u32 5
jmpif v16 then: b3, else: b2
b2():
v17 = load v9 -> [Field; 5]
call f1(v17)
return
b3():
v19 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
v20 = allocate -> &mut [Field; 5]
v21 = add v1, v2
v23 = array_set v19, index v21, value Field 128
call f1(v23)
v25 = add v2, u32 1
jmp b1(v25)
}
brillig(inline) fn foo f1 {
b0(v0: [Field; 5]):
return
}
";

let ssa = Ssa::from_str(src).unwrap();

// We expect the `make_array` at the top of `b3` to be replaced with an `inc_rc`
// of the newly hoisted `make_array` at the end of `b0`.
let expected = "
brillig(inline) fn main f0 {
b0(v0: u32, v1: u32):
v8 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
v9 = allocate -> &mut [Field; 5]
v11 = array_set v8, index v0, value Field 64
v13 = add v0, u32 1
store v11 at v9
v14 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
jmp b1(u32 0)
b1(v2: u32):
v17 = lt v2, u32 5
jmpif v17 then: b3, else: b2
b2():
v18 = load v9 -> [Field; 5]
call f1(v18)
return
b3():
inc_rc v14
v20 = allocate -> &mut [Field; 5]
v21 = add v1, v2
v23 = array_set v14, index v21, value Field 128
call f1(v23)
v25 = add v2, u32 1
jmp b1(v25)
}
brillig(inline) fn foo f1 {
b0(v0: [Field; 5]):
return
}
";

let ssa = ssa.loop_invariant_code_motion();
assert_normalized_ssa_equals(ssa, expected);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,9 @@ impl<'a> FunctionContext<'a> {
// Don't mutate the reference count if we're assigning an array literal to a Let:
// `let mut foo = [1, 2, 3];`
// we consider the array to be moved, so we should have an initial rc of just 1.
let should_inc_rc = !let_expr.expression.is_array_or_slice_literal();
//
// TODO: this exception breaks #6763
let should_inc_rc = true; // !let_expr.expression.is_array_or_slice_literal();

values = values.map(|value| {
let value = value.eval(self);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ step 2: Follow the [Noirup instructions](#installing-noirup).

## Setting up shell completions

Once `nargo` is installed, you can [set up shell completions for it](setting_up_shell_completions).
Once `nargo` is installed, you can [set up shell completions for it](setting_up_shell_completions.md).

## Uninstalling Nargo

Expand Down
2 changes: 1 addition & 1 deletion noir/noir-repo/scripts/check-critical-libraries.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ for REPO in ${REPOS_TO_CHECK[@]}; do
TAG=$(getLatestReleaseTagForRepo $REPO)
git clone $REPO -c advice.detachedHead=false --depth 1 --branch $TAG $TMP_DIR

nargo test --program-dir $TMP_DIR
nargo test -q --program-dir $TMP_DIR

rm -rf $TMP_DIR
done
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
x = 1
y = 1
return = 5
return = 7
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use std::mem::array_refcount;

fn main() {
let mut array = [0, 1, 2];
assert_refcount(array, 1);
assert_refcount(array, 2);

borrow(array, array_refcount(array));
borrow_mut(&mut array, array_refcount(array));
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[package]
name = "return_twice"
version = "0.1.0"
type = "bin"
authors = [""]

[dependencies]
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
return = [100, 100]
in0 = 10
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pub fn main(in0: Field) -> pub (Field, Field) {
let out0 = (in0 * in0);
let out1 = (in0 * in0);
(out0, out1)
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ excluded_dirs=(
"double_verify_nested_proof"
"overlapping_dep_and_mod"
"comptime_println"
# Takes a very long time to execute as large loops do not get simplified.
"regression_4709"
# bit sizes for bigint operation doesn't match up.
"bigint"
# Expected to fail as test asserts on which runtime it is in.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[package]
name = "assert_message"
type = "bin"
authors = [""]
compiler_version = ">=0.23.0"

[dependencies]
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// Have to use inputs otherwise the ACIR gets optimized away due to constants.
// With the original ACIR the optimizations can rearrange or merge opcodes,
// which might end up getting out of sync with assertion messages.
#[test(should_fail_with = "first")]
fn test_assert_message_preserved_during_optimization(a: Field, b: Field, c: Field) {
if (a + b) * (a - b) != c {
assert((a + b) * (a - b) == c, "first");
assert((a - b) * (a + b) == c, "second");
assert((a + b) * (a - b) == c, "third");
assert((2 * a + b) * (a - b / 2) == c * c, "fourth");
} else {
// The fuzzer might have generated a passing test.
assert((a + b) * (a - b) != c, "first");
}
}
Loading
Loading