Skip to content

Commit

Permalink
Merge branch 'master' into mm/pg-refactor-new
Browse files Browse the repository at this point in the history
  • Loading branch information
maramihali committed Feb 20, 2024
2 parents f1934c1 + 4c7f24f commit dc72a99
Show file tree
Hide file tree
Showing 117 changed files with 5,059 additions and 4,184 deletions.
6 changes: 3 additions & 3 deletions avm-transpiler/src/opcodes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,10 @@ impl AvmOpcode {
// World State
AvmOpcode::SLOAD => "SLOAD", // Public Storage
AvmOpcode::SSTORE => "SSTORE", // Public Storage
AvmOpcode::NOTEHASHEXISTS => "NOTEHASHEXISTS", // Notes & Nullifiers
AvmOpcode::EMITNOTEHASH => "EMITNOTEHASH", // Notes & Nullifiers
AvmOpcode::NOTEHASHEXISTS => "NOTEHASHEXISTS", // Notes & Nullifiers
AvmOpcode::EMITNOTEHASH => "EMITNOTEHASH", // Notes & Nullifiers
AvmOpcode::NULLIFIEREXISTS => "NULLIFIEREXISTS", // Notes & Nullifiers
AvmOpcode::EMITNULLIFIER => "EMITNULLIFIER", // Notes & Nullifiers
AvmOpcode::EMITNULLIFIER => "EMITNULLIFIER", // Notes & Nullifiers
AvmOpcode::READL1TOL2MSG => "READL1TOL2MSG", // Messages
AvmOpcode::HEADERMEMBER => "HEADERMEMBER", // Archive tree & Headers

Expand Down
203 changes: 190 additions & 13 deletions avm-transpiler/src/transpile.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
use acvm::acir::brillig::Opcode as BrilligOpcode;
use acvm::acir::circuit::brillig::Brillig;

use acvm::brillig_vm::brillig::{BinaryFieldOp, BinaryIntOp, MemoryAddress, Value, ValueOrArray};
use acvm::brillig_vm::brillig::{
BinaryFieldOp, BinaryIntOp, BlackBoxOp, HeapArray, MemoryAddress, Value, ValueOrArray,
};

use crate::instructions::{
AvmInstruction, AvmOperand, AvmTypeTag, ALL_DIRECT, FIRST_OPERAND_INDIRECT,
Expand Down Expand Up @@ -93,7 +95,6 @@ pub fn brillig_to_avm(brillig: &Brillig) -> Vec<u8> {
value: destination.to_usize() as u32,
},
],
..Default::default()
});
}
BrilligOpcode::CalldataCopy { destination_address, size, offset } => {
Expand Down Expand Up @@ -200,9 +201,13 @@ pub fn brillig_to_avm(brillig: &Brillig) -> Vec<u8> {
..Default::default()
});
},
BrilligOpcode::Cast { destination, source, bit_size } => {
avm_instrs.push(emit_cast(source.to_usize() as u32, destination.to_usize() as u32, tag_from_bit_size(*bit_size)));
}
BrilligOpcode::ForeignCall { function, destinations, inputs, destination_value_types:_, input_value_types:_ } => {
handle_foreign_call(&mut avm_instrs, function, destinations, inputs);
},
BrilligOpcode::BlackBox(operation) => handle_black_box_function(&mut avm_instrs, operation),
_ => panic!(
"Transpiler doesn't know how to process {:?} brillig instruction",
brillig_instr
Expand All @@ -228,9 +233,148 @@ fn handle_foreign_call(
function: &String,
destinations: &Vec<ValueOrArray>,
inputs: &Vec<ValueOrArray>,
) {
match function.as_str() {
"keccak256" | "sha256" => {
emit_2_field_hash_instruction(avm_instrs, function, destinations, inputs)
}
"poseidon" => {
emit_single_field_hash_instruction(avm_instrs, function, destinations, inputs)
}
_ => handle_getter_instruction(avm_instrs, function, destinations, inputs),
}
}

/// Two field hash instructions represent instruction's that's outputs are larger than a field element
///
/// This includes:
/// - keccak
/// - sha256
///
/// In the future the output of these may expand / contract depending on what is most efficient for the circuit
/// to reason about. In order to decrease user friction we will use two field outputs.
fn emit_2_field_hash_instruction(
avm_instrs: &mut Vec<AvmInstruction>,
function: &String,
destinations: &[ValueOrArray],
inputs: &[ValueOrArray],
) {
// handle field returns differently
let hash_offset_maybe = inputs[0];
println!("hash_offset_maybe: {:?}", hash_offset_maybe);
let (hash_offset, hash_size) = match hash_offset_maybe {
ValueOrArray::HeapArray(HeapArray { pointer, size }) => (pointer.0, size),
_ => panic!("Keccak | Sha256 address inputs destination should be a single value"),
};

assert!(destinations.len() == 1);
let dest_offset_maybe = destinations[0];
let dest_offset = match dest_offset_maybe {
ValueOrArray::HeapArray(HeapArray { pointer, size }) => {
assert!(size == 2);
pointer.0
}
_ => panic!("Keccak | Poseidon address destination should be a single value"),
};

let opcode = match function.as_str() {
"keccak256" => AvmOpcode::KECCAK,
"sha256" => AvmOpcode::SHA256,
_ => panic!(
"Transpiler doesn't know how to process ForeignCall function {:?}",
function
),
};

avm_instrs.push(AvmInstruction {
opcode,
indirect: Some(3), // 11 - addressing mode, indirect for input and output
operands: vec![
AvmOperand::U32 {
value: dest_offset as u32,
},
AvmOperand::U32 {
value: hash_offset as u32,
},
AvmOperand::U32 {
value: hash_size as u32,
},
],
..Default::default()
});
}

/// A single field hash instruction includes hash functions that emit a single field element
/// directly onto the stack.
///
/// This includes (snark friendly functions):
/// - poseidon2
///
/// Pedersen is not implemented this way as the black box function representation has the correct api.
/// As the Poseidon BBF only deals with a single permutation, it is not quite suitable for our current avm
/// representation.
fn emit_single_field_hash_instruction(
avm_instrs: &mut Vec<AvmInstruction>,
function: &String,
destinations: &[ValueOrArray],
inputs: &[ValueOrArray],
) {
// handle field returns differently
let hash_offset_maybe = inputs[0];
let (hash_offset, hash_size) = match hash_offset_maybe {
ValueOrArray::HeapArray(HeapArray { pointer, size }) => (pointer.0, size),
_ => panic!("Poseidon address inputs destination should be a single value"),
};

assert!(destinations.len() == 1);
let dest_offset_maybe = destinations[0];
let dest_offset = match dest_offset_maybe {
ValueOrArray::MemoryAddress(dest_offset) => dest_offset.0,
_ => panic!("Poseidon address destination should be a single value"),
};

let opcode = match function.as_str() {
"poseidon" => AvmOpcode::POSEIDON,
_ => panic!(
"Transpiler doesn't know how to process ForeignCall function {:?}",
function
),
};

avm_instrs.push(AvmInstruction {
opcode,
indirect: Some(1),
operands: vec![
AvmOperand::U32 {
value: dest_offset as u32,
},
AvmOperand::U32 {
value: hash_offset as u32,
},
AvmOperand::U32 {
value: hash_size as u32,
},
],
..Default::default()
});
}

/// Getter Instructions are instructions that take NO inputs, and return information
/// from the current execution context.
///
/// This includes:
/// - Global variables
/// - Caller
/// - storage address
/// - ...
fn handle_getter_instruction(
avm_instrs: &mut Vec<AvmInstruction>,
function: &String,
destinations: &Vec<ValueOrArray>,
inputs: &Vec<ValueOrArray>,
) {
// For the foreign calls we want to handle, we do not want inputs, as they are getters
assert!(inputs.len() == 0);
assert!(inputs.is_empty());
assert!(destinations.len() == 1);
let dest_offset_maybe = destinations[0];
let dest_offset = match dest_offset_maybe {
Expand All @@ -257,15 +401,14 @@ fn handle_foreign_call(
function
),
};

avm_instrs.push(AvmInstruction {
opcode,
indirect: Some(ALL_DIRECT),
operands: vec![AvmOperand::U32 {
value: dest_offset as u32,
}],
..Default::default()
});
})
}

/// Handles Brillig's CONST opcode.
Expand Down Expand Up @@ -316,7 +459,7 @@ fn emit_set(tag: AvmTypeTag, dest: u32, value: u128) -> AvmInstruction {
AvmTypeTag::UINT64 => AvmOperand::U64 {
value: value as u64,
},
AvmTypeTag::UINT128 => AvmOperand::U128 { value: value },
AvmTypeTag::UINT128 => AvmOperand::U128 { value },
_ => panic!("Invalid type tag {:?} for set", tag),
},
// dest offset
Expand All @@ -342,7 +485,7 @@ fn emit_cast(source: u32, destination: u32, dst_tag: AvmTypeTag) -> AvmInstructi
fn emit_mov(indirect: Option<u8>, source: u32, dest: u32) -> AvmInstruction {
AvmInstruction {
opcode: AvmOpcode::MOV,
indirect: indirect,
indirect,
operands: vec![
AvmOperand::U32 { value: source },
AvmOperand::U32 { value: dest },
Expand All @@ -351,6 +494,44 @@ fn emit_mov(indirect: Option<u8>, source: u32, dest: u32) -> AvmInstruction {
}
}

/// Black box functions, for the meantime only covers pedersen operations as the blackbox function api suits our current needs.
/// (array goes in -> field element comes out)
fn handle_black_box_function(avm_instrs: &mut Vec<AvmInstruction>, operation: &BlackBoxOp) {
match operation {
BlackBoxOp::PedersenHash {
inputs,
domain_separator: _,
output,
} => {
let hash_offset = inputs.pointer.0;
let hash_size = inputs.size.0;

let dest_offset = output.0;

avm_instrs.push(AvmInstruction {
opcode: AvmOpcode::PEDERSEN,
indirect: Some(1),
operands: vec![
AvmOperand::U32 {
value: dest_offset as u32,
},
AvmOperand::U32 {
value: hash_offset as u32,
},
AvmOperand::U32 {
value: hash_size as u32,
},
],
..Default::default()
});
}
_ => panic!(
"Transpiler doesn't know how to process BlackBoxOp {:?}",
operation
),
}
}

/// Compute an array that maps each Brillig pc to an AVM pc.
/// This must be done before transpiling to properly transpile jump destinations.
/// This is necessary for two reasons:
Expand All @@ -367,12 +548,7 @@ fn map_brillig_pcs_to_avm_pcs(initial_offset: usize, brillig: &Brillig) -> Vec<u
pc_map[0] = initial_offset;
for i in 0..brillig.bytecode.len() - 1 {
let num_avm_instrs_for_this_brillig_instr = match &brillig.bytecode[i] {
BrilligOpcode::Load { .. } => 2,
BrilligOpcode::Store { .. } => 2,
BrilligOpcode::Const { bit_size, .. } => match bit_size {
254 => 2, // Field.
_ => 1,
},
BrilligOpcode::Const { bit_size: 254, .. } => 2,
_ => 1,
};
// next Brillig pc will map to an AVM pc offset by the
Expand All @@ -384,6 +560,7 @@ fn map_brillig_pcs_to_avm_pcs(initial_offset: usize, brillig: &Brillig) -> Vec<u

fn tag_from_bit_size(bit_size: u32) -> AvmTypeTag {
match bit_size {
1 => AvmTypeTag::UINT8, // temp workaround
8 => AvmTypeTag::UINT8,
16 => AvmTypeTag::UINT16,
32 => AvmTypeTag::UINT32,
Expand Down
4 changes: 2 additions & 2 deletions barretenberg/.gitrepo
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
[subrepo]
remote = https://github.com/AztecProtocol/barretenberg
branch = master
commit = c2f1a58aa28097f7f303a469a7499ea845104736
parent = e6d65a7fe9ebe855dcac389775aae2ccc3fa311f
commit = 71c35f6b838ed4dbd14ae6cc329677bcd0391499
parent = 9f67eec73c5d639df16e6b3bf45c4a1fc1c54bad
method = merge
cmdver = 0.4.6
10 changes: 6 additions & 4 deletions barretenberg/acir_tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,18 +43,20 @@ The `all_cmds` flow tests all the supported commands on the binary. Slower, but
$ FLOW=all_cmds ./run_acir_tests.sh 1_mul
```

## Regenerating witness for `double_verify_proof`
## Regenerating witness for `double_verify_proof` and `double_verify_nested_proof`

`double_verify_proof` has inputs that are proof system specific such as the circuit verification key and the proofs themselves which are being recursively verified. Certain proof system changes can sometimes lead to the key or inner proofs now being invalid.

This means we have to generate the proof specific inputs using our backend and pass it back into `double_verify_proof` to regenerate the accurate witness. The following is a temporary solution to manually regenerate the inputs for `double_verify_proof` on a specific Noir branch.

First find `acir_tests/gen_inner_proof_inputs.sh`. Change the $BRANCH env var to your working branch and $PROOF_NAME to your first input you want to recursively verify. The script is going to generate the proof system specific verification key output and proof for the `assert_statement` test.
First find `acir_tests/gen_inner_proof_inputs.sh`. Change the $BRANCH env var to your working branch and $PROOF_NAME to your first input you want to recursively verify. The script is going to generate the proof system specific verification key output and proof for the `assert_statement_recursive` test.

To run:
```
./gen_inner_proof_inputs.sh
```
To generate a new input you can run the script again. To generate a new file under `assert_statement/proofs/` be sure to change the $PROOF_NAME inside of the script.
To generate a new input you can run the script again. To generate a new file under `assert_statement_recursive/proofs/` be sure to change the $PROOF_NAME inside of the script.

You can then copy these inputs over to your working branch in Noir and regenerate the witness for `double_verify_proof`. You can then change the branch in `run_acir_tests.sh` to this Noir working branch as well and `double_verify_proof` should pass.
You can then copy these inputs over to your working branch in Noir and regenerate the witness for `double_verify_proof`. You can then change the branch in `run_acir_tests.sh` to this Noir working branch as well and `double_verify_proof` should pass.

The same process should then be repeated, but now `double_verify_proof` will be the circuit for which we will be generating recursive inputs using `gen_inner_proof_inputs.sh`. The recursive artifacts should then supplied as inputs to `double_verify_nested_proof`.
Loading

0 comments on commit dc72a99

Please sign in to comment.