Skip to content

Commit

Permalink
chore: Do not create new memory block when not needed (#2142)
Browse files Browse the repository at this point in the history
* Do not create new MemoryBlock when not needed

* code review - handle function call

* code review

* code review

* add missing commit

* code review

* fix failing test case
  • Loading branch information
guipublic authored Aug 4, 2023
1 parent 928b3ad commit bbf7ae2
Show file tree
Hide file tree
Showing 5 changed files with 119 additions and 25 deletions.
3 changes: 2 additions & 1 deletion crates/noirc_evaluator/src/ssa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ pub(crate) fn optimize_into_acir(
.dead_instruction_elimination()
.print(print_ssa_passes, "After Dead Instruction Elimination:");
}
ssa.into_acir(brillig, abi_distinctness)
let last_array_uses = ssa.find_last_array_uses();
ssa.into_acir(brillig, abi_distinctness, &last_array_uses)
}

/// Compiles the [`Program`] into [`ACIR`][acvm::acir::circuit::Circuit].
Expand Down
79 changes: 55 additions & 24 deletions crates/noirc_evaluator/src/ssa/acir_gen/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,10 @@ impl Ssa {
self,
brillig: Brillig,
abi_distinctness: AbiDistinctness,
last_array_uses: &HashMap<ValueId, InstructionId>,
) -> Result<GeneratedAcir, RuntimeError> {
let context = Context::new();
let mut generated_acir = context.convert_ssa(self, brillig)?;
let mut generated_acir = context.convert_ssa(self, brillig, last_array_uses)?;

match abi_distinctness {
AbiDistinctness::Distinct => {
Expand Down Expand Up @@ -154,10 +155,15 @@ impl Context {
}

/// Converts SSA into ACIR
fn convert_ssa(self, ssa: Ssa, brillig: Brillig) -> Result<GeneratedAcir, RuntimeError> {
fn convert_ssa(
self,
ssa: Ssa,
brillig: Brillig,
last_array_uses: &HashMap<ValueId, InstructionId>,
) -> Result<GeneratedAcir, RuntimeError> {
let main_func = ssa.main();
match main_func.runtime() {
RuntimeType::Acir => self.convert_acir_main(main_func, &ssa, brillig),
RuntimeType::Acir => self.convert_acir_main(main_func, &ssa, brillig, last_array_uses),
RuntimeType::Brillig => self.convert_brillig_main(main_func, brillig),
}
}
Expand All @@ -167,13 +173,14 @@ impl Context {
main_func: &Function,
ssa: &Ssa,
brillig: Brillig,
last_array_uses: &HashMap<ValueId, InstructionId>,
) -> Result<GeneratedAcir, RuntimeError> {
let dfg = &main_func.dfg;
let entry_block = &dfg[main_func.entry_block()];
let input_witness = self.convert_ssa_block_params(entry_block.parameters(), dfg)?;

for instruction_id in entry_block.instructions() {
self.convert_ssa_instruction(*instruction_id, dfg, ssa, &brillig)?;
self.convert_ssa_instruction(*instruction_id, dfg, ssa, &brillig, last_array_uses)?;
}

self.convert_ssa_return(entry_block.unwrap_terminator(), dfg)?;
Expand Down Expand Up @@ -310,6 +317,7 @@ impl Context {
dfg: &DataFlowGraph,
ssa: &Ssa,
brillig: &Brillig,
last_array_uses: &HashMap<ValueId, InstructionId>,
) -> Result<(), RuntimeError> {
let instruction = &dfg[instruction_id];
self.acir_context.set_location(dfg.get_location(&instruction_id));
Expand Down Expand Up @@ -389,10 +397,24 @@ impl Context {
self.current_side_effects_enabled_var = acir_var;
}
Instruction::ArrayGet { array, index } => {
self.handle_array_operation(instruction_id, *array, *index, None, dfg)?;
self.handle_array_operation(
instruction_id,
*array,
*index,
None,
dfg,
last_array_uses,
)?;
}
Instruction::ArraySet { array, index, value } => {
self.handle_array_operation(instruction_id, *array, *index, Some(*value), dfg)?;
self.handle_array_operation(
instruction_id,
*array,
*index,
Some(*value),
dfg,
last_array_uses,
)?;
}
Instruction::Allocate => {
unreachable!("Expected all allocate instructions to be removed before acir_gen")
Expand Down Expand Up @@ -447,6 +469,7 @@ impl Context {
index: ValueId,
store_value: Option<ValueId>,
dfg: &DataFlowGraph,
last_array_uses: &HashMap<ValueId, InstructionId>,
) -> Result<(), RuntimeError> {
let index_const = dfg.get_numeric_constant(index);

Expand Down Expand Up @@ -504,9 +527,10 @@ impl Context {
}
AcirValue::DynamicArray(_) => (),
}

let resolved_array = dfg.resolve(array);
let map_array = last_array_uses.get(&resolved_array) == Some(&instruction);
if let Some(store) = store_value {
self.array_set(instruction, array, index, store, dfg)?;
self.array_set(instruction, array, index, store, dfg, map_array)?;
} else {
self.array_get(instruction, array, index, dfg)?;
}
Expand Down Expand Up @@ -568,6 +592,7 @@ impl Context {
index: ValueId,
store_value: ValueId,
dfg: &DataFlowGraph,
map_array: bool,
) -> Result<(), InternalError> {
// Fetch the internal SSA ID for the array
let array = dfg.resolve(array);
Expand Down Expand Up @@ -602,24 +627,30 @@ impl Context {
}

// Since array_set creates a new array, we create a new block ID for this
// array.
// array, unless map_array is true. In that case, we operate directly on block_id
// and we do not create a new block ID.
let result_id = dfg
.instruction_results(instruction)
.first()
.expect("Array set does not have one result");
let result_block_id = self.block_id(result_id);

// Initialize the new array with the values from the old array
let init_values = try_vecmap(0..len, |i| {
let index = AcirValue::Var(
self.acir_context.add_constant(FieldElement::from(i as u128)),
AcirType::NumericType(NumericType::NativeField),
);
let var = index.into_var()?;
let read = self.acir_context.read_from_memory(block_id, &var)?;
Ok(AcirValue::Var(read, AcirType::NumericType(NumericType::NativeField)))
})?;
self.initialize_array(result_block_id, len, Some(&init_values))?;
let result_block_id;
if map_array {
self.memory_blocks.insert(*result_id, block_id);
result_block_id = block_id;
} else {
// Initialize the new array with the values from the old array
result_block_id = self.block_id(result_id);
let init_values = try_vecmap(0..len, |i| {
let index = AcirValue::Var(
self.acir_context.add_constant(FieldElement::from(i as u128)),
AcirType::NumericType(NumericType::NativeField),
);
let var = index.into_var()?;
let read = self.acir_context.read_from_memory(block_id, &var)?;
Ok(AcirValue::Var(read, AcirType::NumericType(NumericType::NativeField)))
})?;
self.initialize_array(result_block_id, len, Some(&init_values))?;
}

// Write the new value into the new array at the specified index
let index_var = self.convert_value(index, dfg).into_var()?;
Expand Down Expand Up @@ -1090,7 +1121,7 @@ impl Context {

#[cfg(test)]
mod tests {
use std::rc::Rc;
use std::{rc::Rc, collections::HashMap};

use acvm::{
acir::{
Expand Down Expand Up @@ -1130,7 +1161,7 @@ mod tests {
let ssa = builder.finish();

let context = Context::new();
let acir = context.convert_ssa(ssa, Brillig::default()).unwrap();
let acir = context.convert_ssa(ssa, Brillig::default(), &HashMap::new()).unwrap();

let expected_opcodes =
vec![Opcode::Arithmetic(&Expression::one() - &Expression::from(Witness(1)))];
Expand Down
4 changes: 4 additions & 0 deletions crates/noirc_evaluator/src/ssa/ir/post_order.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ impl PostOrder {
PostOrder(Self::compute_post_order(func))
}

pub(crate) fn into_vec(self) -> Vec<BasicBlockId> {
self.0
}

// Computes the post-order of the function by doing a depth-first traversal of the
// function's entry block's previously unvisited children. Each block is sequenced according
// to when the traversal exits it.
Expand Down
57 changes: 57 additions & 0 deletions crates/noirc_evaluator/src/ssa/opt/array_use.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
use std::collections::HashMap;

use crate::ssa::{
ir::{
basic_block::BasicBlockId,
dfg::DataFlowGraph,
instruction::{Instruction, InstructionId},
post_order::PostOrder,
value::{Value, ValueId},
},
ssa_gen::Ssa,
};

impl Ssa {
/// Map arrays with the last instruction that uses it
/// For this we simply process all the instructions in execution order
/// and update the map whenever there is a match
pub(crate) fn find_last_array_uses(&self) -> HashMap<ValueId, InstructionId> {
let mut array_use = HashMap::new();
for func in self.functions.values() {
let mut reverse_post_order = PostOrder::with_function(func).into_vec();
reverse_post_order.reverse();
for block in reverse_post_order {
last_use(block, &func.dfg, &mut array_use);
}
}
array_use
}
}

/// Updates the array_def map when an instructions is using an array
fn last_use(
block_id: BasicBlockId,
dfg: &DataFlowGraph,
array_def: &mut HashMap<ValueId, InstructionId>,
) {
let block = &dfg[block_id];
for instruction_id in block.instructions() {
match &dfg[*instruction_id] {
Instruction::ArrayGet { array, .. } | Instruction::ArraySet { array, .. } => {
let array = dfg.resolve(*array);
array_def.insert(array, *instruction_id);
}
Instruction::Call { arguments, .. } => {
for argument in arguments {
let resolved_arg = dfg.resolve(*argument);
if matches!(dfg[resolved_arg], Value::Array { .. }) {
array_def.insert(resolved_arg, *instruction_id);
}
}
}
_ => {
// Nothing to do
}
}
}
}
1 change: 1 addition & 0 deletions crates/noirc_evaluator/src/ssa/opt/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
//! Each pass is generally expected to mutate the SSA IR into a gradually
//! simpler form until the IR only has a single function remaining with 1 block within it.
//! Generally, these passes are also expected to minimize the final amount of instructions.
mod array_use;
mod constant_folding;
mod defunctionalize;
mod die;
Expand Down

0 comments on commit bbf7ae2

Please sign in to comment.