diff --git a/ci/scripts/metric_unify/main.py b/ci/scripts/metric_unify/main.py deleted file mode 100644 index 7ac47da96a..0000000000 --- a/ci/scripts/metric_unify/main.py +++ /dev/null @@ -1,271 +0,0 @@ -import json -import argparse -# import sys - -# labels is a list of (key, value) strings -def labels_to_tuple(labels): - return tuple([tuple(pair) for pair in labels]) - -# Helper function to add metric data into the flat dict -def add_to_flat_dict(labels, metric, value, flat_dict): - if labels not in flat_dict: - flat_dict[labels] = [] - flat_dict[labels].append(Metric(metric, value)) - -def custom_sort_label_keys(label_key): - """ - Custom sorting function that ensures 'group' comes first. - Other keys are sorted alphabetically. - """ - # Prioritize 'group' by giving it the lowest possible sort value - if label_key == 'group': - return (0, label_key) # Lowest priority for 'group' - else: - return (1, label_key) # Normal priority for other keys - -class Aggregation: - name = "" - group_by = [] # Label keys to group by - metrics = [] - operation = "" - - def __init__(self, name, group_by, metrics, operation): - self.name = name - self.group_by = group_by - self.metrics = metrics - self.operation = operation - - def __str__(self): - return f"Aggregation(name={self.name}, group_by={self.group_by}, metrics={self.metrics}, operation={self.operation})" - - def __repr__(self): - return self.__str__() - -class Metric: - name = "" - value = 0 - diff_value = None - diff_percent = None - - def __init__(self, name, value): - self.name = name - self.value = value - - def __str__(self): - # Customize the string representation for printing - diff_str = "" - if self.diff_value is not None: - diff_str = f", diff_value={self.diff_value}" - if self.diff_percent is not None: - diff_str += f", diff_percent={self.diff_percent:+.2%}" - return f"Metric(name={self.name}, value={self.value}{diff_str})" - - def __repr__(self): - return self.__str__() - -class MetricDb: - def __init__(self, metrics_file): - # Dict[labels => List[Metric]] - self.flat_dict = {} - # Dict label_keys_tuple => Dict[label_values_tuple => List[Metric]] - self.dict_by_label_types = {} - with open(metrics_file, 'r') as f: - data = json.load(f) - - # Process counters - for counter in data.get('counter', []): - labels = labels_to_tuple(counter['labels']) - metric = counter['metric'] - value = int(counter['value']) - if value == 0: - continue - add_to_flat_dict(labels, metric, value, self.flat_dict) - - # Process gauges - for gauge in data.get('gauge', []): - labels = labels_to_tuple(gauge['labels']) - metric = gauge['metric'] - value = float(gauge['value']) - add_to_flat_dict(labels, metric, value, self.flat_dict) - - self.separate_by_label_types() - - def separate_by_label_types(self): - for labels, metrics in self.flat_dict.items(): - label_keys = tuple(sorted([key for key, _ in labels], key=custom_sort_label_keys)) - label_dict = dict(labels) - label_values = tuple([label_dict[key] for key in label_keys]) - if label_keys not in self.dict_by_label_types: - self.dict_by_label_types[label_keys] = {} - if label_values not in self.dict_by_label_types[label_keys]: - self.dict_by_label_types[label_keys][label_values] = [] - self.dict_by_label_types[label_keys][label_values].extend(metrics) - -# mutates db so metric dict has fields "diff_value" and "diff_percent" -def diff_metrics(db: MetricDb, db_old: MetricDb): - for (labels, metrics) in db.flat_dict.items(): - if labels not in db_old.flat_dict: - continue - for metric in metrics: - metric_old = next((m for m in db_old.flat_dict[labels] if m.name == metric.name), None) - if metric_old: - metric.diff_value = metric.value - metric_old.value - if metric_old.value != 0: - metric.diff_percent = (metric.value - metric_old.value) / metric_old.value - db.separate_by_label_types() - -# separated_dict is dict by label types -def generate_markdown_tables(separated_dict, excluded_labels=["cycle_tracker_span"]): - markdown_output = "" - # Loop through each set of tuple_keys - for tuple_keys, metrics_dict in separated_dict.items(): - tuple_keys = list(tuple_keys) - exclude = any(excluded_label in tuple_keys for excluded_label in excluded_labels) - if exclude: - continue - - # Get all unique metric names - metric_names = set() - for metric_list in metrics_dict.values(): - metric_names.update([metric.name for metric in metric_list]) - metric_names = sorted(metric_names) - - # Create the table header - header = "| " + " | ".join([f"{key}" for key in list(tuple_keys)] + metric_names) + " |" - separator = "| " + " | ".join(["---"] * (len(tuple_keys) + len(metric_names))) + " |" - markdown_output += header + "\n" + separator + "\n" - - # Fill the table with rows for each tuple_value and associated metrics - for tuple_values, metrics in metrics_dict.items(): - row_values = list(tuple_values) - row_metrics = [] - for metric_name in metric_names: - metric = next((m for m in metrics if m.name == metric_name), None) - metric_str = "" - if metric: - if metric.diff_percent is not None and metric.diff_value != 0: - color = "red" if metric.diff_percent > 0 else "green" - # Format the percentage with the color styling - metric_str += f'({metric.diff_value:+,} [{metric.diff_percent:+.1%}]) ' - metric_str += "
" + f"{metric.value:,}" + "
" - row_metrics.append(metric_str) - markdown_output += "| " + " | ".join(row_values + row_metrics) + " |\n" - markdown_output += "\n" - return markdown_output - -def read_aggregations(aggregation_json): - with open(aggregation_json, 'r') as f: - aggregation_data = json.load(f) - aggregations = [] - for aggregation in aggregation_data['aggregations']: - aggregations.append(Aggregation(aggregation['name'], aggregation['group_by'], aggregation['metrics'], aggregation['operation'])) - return aggregations - -def apply_aggregations(db: MetricDb, aggregations): - for aggregation in aggregations: - # group_by_values => aggregation metric - group_by_dict = {} - if aggregation.operation == "sum" or aggregation.operation == "unique": - for tuple_keys, metrics_dict in db.dict_by_label_types.items(): - if not set(aggregation.group_by).issubset(set(tuple_keys)): - continue - for tuple_values, metrics in metrics_dict.items(): - label_dict = dict(zip(tuple_keys, tuple_values)) - group_by_values = tuple([label_dict[key] for key in aggregation.group_by]) - for metric in metrics: - if metric.name in aggregation.metrics: - if aggregation.operation == "sum": - if group_by_values not in group_by_dict: - group_by_dict[group_by_values] = 0 - group_by_dict[group_by_values] += metric.value - elif aggregation.operation == "unique": - if group_by_values not in group_by_dict: - group_by_dict[group_by_values] = metric.value - else: - assert group_by_dict[group_by_values] == metric.value - - for group_by_values, agg_value in group_by_dict.items(): - aggregation_label = labels_to_tuple([(k,v) for k,v in zip(aggregation.group_by, group_by_values)]) - if aggregation_label not in db.flat_dict: - db.flat_dict[aggregation_label] = [] - overwrite = False - for metric in db.flat_dict[aggregation_label]: - if metric.name == aggregation.name: - if metric.value != agg_value: - print(f"[WARN] Overwriting {metric.name}: previous value = {metric.value}, new value = {sum}") - metric.value = agg_value - overwrite = True - break - if not overwrite: - db.flat_dict[aggregation_label].append(Metric(aggregation.name, agg_value)) - else: - raise ValueError(f"Unknown operation: {aggregation.operation}") - db.separate_by_label_types() - -# old_metrics_json is optional -def generate_displayable_metrics( - metrics_json, - old_metrics_json, - excluded_labels=["cycle_tracker_span"], - aggregation_json=None - ): - db = MetricDb(metrics_json) - - aggregations = [] - if aggregation_json: - aggregations = read_aggregations(aggregation_json) - apply_aggregations(db, aggregations) - - if old_metrics_json: - db_old = MetricDb(old_metrics_json) - if aggregation_json: - aggregations = read_aggregations(aggregation_json) - apply_aggregations(db_old, aggregations) - - diff_metrics(db, db_old) - - detailed_markdown_output = generate_markdown_tables(db.dict_by_label_types, excluded_labels) - - # Hacky way to get top level aggregate metrics grouped by "group" label - group_to_metrics = {} - group_tuple = tuple(["group"]) - for (group_name, metrics) in db.dict_by_label_types.get(group_tuple, {}).items(): - agg_metrics = [] - for metric in metrics: - if metric.name in [a.name for a in aggregations]: - agg_metrics.append(metric) - if len(agg_metrics) == 0: - continue - if group_name not in group_to_metrics: - group_to_metrics[group_name] = [] - group_to_metrics[group_name].extend(agg_metrics) - - markdown_output = generate_markdown_tables({ group_tuple: group_to_metrics }) - - markdown_output += "\n" - markdown_output += "
\nDetailed Metrics\n\n" - markdown_output += detailed_markdown_output - markdown_output += "
\n\n" - - return markdown_output - -def main(): - argparser = argparse.ArgumentParser() - argparser.add_argument('metrics_json', type=str, help="Path to the metrics JSON") - argparser.add_argument('--prev', type=str, required=False, help="Path to the previous metrics JSON for diff generation") - argparser.add_argument('--excluded-labels', type=str, required=False, help="Comma-separated list of labels to exclude from the table") - argparser.add_argument('--top-labels', type=str, required=False, help="Comma-separated list of labels to include in summary rows") - argparser.add_argument('--aggregation-json', type=str, required=False, help="Path to a JSON file with metrics to aggregate") - args = argparser.parse_args() - - markdown_output = generate_displayable_metrics( - args.metrics_json, - args.prev, - excluded_labels=args.excluded_labels.split(",") if args.excluded_labels else ["cycle_tracker_span"], - aggregation_json=args.aggregation_json - ) - print(markdown_output) - - -if __name__ == '__main__': - main() diff --git a/crates/circuits/primitives/src/lib.rs b/crates/circuits/primitives/src/lib.rs index 90dd810771..761e6693d6 100644 --- a/crates/circuits/primitives/src/lib.rs +++ b/crates/circuits/primitives/src/lib.rs @@ -1,7 +1,7 @@ //! This crate contains a collection of primitives for use when building circuits. -//! The primitives are separated into two types: standalone [Air](p3_air::Air)s and [SubAir]s. +//! The primitives are separated into two types: standalone [Air](openvm_stark_backend::p3_air::Air)s and [SubAir]s. //! -//! The following modules contain standalone [Air]s: +//! The following modules contain standalone [Air](openvm_stark_backend::p3_air::Air)s: //! - [range] //! - [range_gate] //! - [range_tuple] diff --git a/crates/circuits/primitives/src/sub_air.rs b/crates/circuits/primitives/src/sub_air.rs index 0ba83ad604..52966b88bb 100644 --- a/crates/circuits/primitives/src/sub_air.rs +++ b/crates/circuits/primitives/src/sub_air.rs @@ -3,7 +3,7 @@ use openvm_stark_backend::p3_air::AirBuilder; /// Trait with associated types intended to allow re-use of constraint logic /// inside other AIRs. /// -/// A `SubAir` is **not** an [`Air`](p3_air::Air) itself. +/// A `SubAir` is **not** an [Air](openvm_stark_backend::p3_air::Air) itself. /// A `SubAir` is a struct that holds the means to generate a particular set of constraints, /// meant to be re-usable within other AIRs. /// diff --git a/crates/circuits/sha256-air/src/trace.rs b/crates/circuits/sha256-air/src/trace.rs index 4bf76980cb..45477a472c 100644 --- a/crates/circuits/sha256-air/src/trace.rs +++ b/crates/circuits/sha256-air/src/trace.rs @@ -42,7 +42,7 @@ impl Sha256Air { /// and the buffer values that will be put in rows 0..4. /// Will populate the given `trace` with the trace of the block, where the width of the trace is `trace_width` /// and the starting column for the `Sha256Air` is `trace_start_col`. - /// **Note**: this function only generates some of the required trace. Another pass is required, refer to [`generate_missing_cells`] for details. + /// **Note**: this function only generates some of the required trace. Another pass is required, refer to [`Self::generate_missing_cells`] for details. #[allow(clippy::too_many_arguments)] pub fn generate_block_trace( &self, @@ -285,7 +285,7 @@ impl Sha256Air { /// This function will fill in the cells that we couldn't do during the first pass. /// This function should be called only after `generate_block_trace` was called for all blocks - /// And [`generate_default_row`] is called for all invalid rows + /// And [`Self::generate_default_row`] is called for all invalid rows /// Will populate the missing values of `trace`, where the width of the trace is `trace_width` /// and the starting column for the `Sha256Air` is `trace_start_col`. /// Note: `trace` needs to be the rows 1..17 of a block and the first row of the next block diff --git a/crates/sdk/src/commit.rs b/crates/sdk/src/commit.rs index 1472e8f46f..b1c56d28e1 100644 --- a/crates/sdk/src/commit.rs +++ b/crates/sdk/src/commit.rs @@ -35,7 +35,7 @@ pub struct AppExecutionCommit { /// ), /// hash(right_pad(pc_start, 0)) /// ) - /// `right_pad` example, if pc_start = 123, right_pad(pc_start, 0) = [123,0,0,0,0,0,0,0] + /// `right_pad` example, if pc_start = 123, right_pad(pc_start, 0) = \[123,0,0,0,0,0,0,0\] pub exe_commit: [T; DIGEST_SIZE], } diff --git a/crates/sdk/src/verifier/root/types.rs b/crates/sdk/src/verifier/root/types.rs index 86f584d1c5..d1b8988276 100644 --- a/crates/sdk/src/verifier/root/types.rs +++ b/crates/sdk/src/verifier/root/types.rs @@ -25,7 +25,7 @@ pub struct RootVmVerifierPvs { /// Input for the root VM verifier. /// Note: Root verifier is proven in Root SC, but it usually verifies proofs in SC. So -/// usually only RootVmVerifierInput is needed. +/// usually only RootVmVerifierInput\ is needed. #[derive(Serialize, Deserialize, Derivative)] #[serde(bound = "")] #[derivative(Clone(bound = "Com: Clone"))] diff --git a/crates/toolchain/instructions/src/program.rs b/crates/toolchain/instructions/src/program.rs index 20299395a6..750a45e463 100644 --- a/crates/toolchain/instructions/src/program.rs +++ b/crates/toolchain/instructions/src/program.rs @@ -18,7 +18,7 @@ pub struct Program { /// A map from program counter to instruction. /// Sometimes the instructions are enumerated as 0, 4, 8, etc. /// Maybe at some point we will replace this with a struct that would have a `Vec` under the hood and divide the incoming `pc` by whatever given. - instructions_and_debug_infos: Vec, Option)>>, + pub instructions_and_debug_infos: Vec, Option)>>, pub step: u32, pub pc_base: u32, /// The upper bound of the number of public values the program would publish. @@ -58,6 +58,27 @@ impl Program { } } + pub fn new_without_debug_infos_with_option( + instructions: &[Option>], + step: u32, + pc_base: u32, + max_num_public_values: usize, + ) -> Self { + assert!( + instructions.is_empty() + || pc_base + (instructions.len() as u32 - 1) * step <= MAX_ALLOWED_PC + ); + Self { + instructions_and_debug_infos: instructions + .iter() + .map(|instruction| instruction.clone().map(|instruction| (instruction, None))) + .collect(), + step, + pc_base, + max_num_public_values, + } + } + /// We assume that pc_start = pc_base = 0 everywhere except the RISC-V programs, until we need otherwise /// We use [DEFAULT_PC_STEP] for consistency with RISC-V pub fn from_instructions_and_debug_infos( @@ -105,7 +126,7 @@ impl Program { self.instructions_and_debug_infos.is_empty() } - pub fn instructions(&self) -> Vec> { + pub fn defined_instructions(&self) -> Vec> { self.instructions_and_debug_infos .iter() .flatten() @@ -113,6 +134,11 @@ impl Program { .collect() } + // if this is being called a lot, we may want to optimize this later + pub fn num_defined_instructions(&self) -> usize { + self.defined_instructions().len() + } + pub fn debug_infos(&self) -> Vec> { self.instructions_and_debug_infos .iter() @@ -168,7 +194,7 @@ impl Program { } impl Display for Program { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - for instruction in self.instructions().iter() { + for instruction in self.defined_instructions().iter() { let Instruction { opcode, a, @@ -179,7 +205,7 @@ impl Display for Program { f, g, } = instruction; - write!( + writeln!( formatter, "{:?} {} {} {} {} {} {} {}", opcode, a, b, c, d, e, f, g, @@ -190,7 +216,7 @@ impl Display for Program { } pub fn display_program_with_pc(program: &Program) { - for (pc, instruction) in program.instructions().iter().enumerate() { + for (pc, instruction) in program.defined_instructions().iter().enumerate() { let Instruction { opcode, a, diff --git a/crates/toolchain/transpiler/src/extension.rs b/crates/toolchain/transpiler/src/extension.rs index 8163301fff..805c50a33f 100644 --- a/crates/toolchain/transpiler/src/extension.rs +++ b/crates/toolchain/transpiler/src/extension.rs @@ -3,10 +3,10 @@ use openvm_instructions::instruction::Instruction; /// Trait to add custom RISC-V instruction transpilation to OpenVM instruction format. /// RISC-V instructions always come in 32-bit chunks. /// An important feature is that multiple 32-bit RISC-V instructions can be transpiled into a single OpenVM instruction. -/// See `process_custom` for details. +/// See [process_custom](Self::process_custom) for details. pub trait TranspilerExtension { /// The `instruction_stream` provides a view of the remaining RISC-V instructions to be processed, - /// presented as 32-bit chunks. The [`CustomInstructionProcessor`] should determine if it knows how to transpile + /// presented as 32-bit chunks. The [process_custom](Self::process_custom) should determine if it knows how to transpile /// the next contiguous section of RISC-V instructions into an [`Instruction`]. /// It returns `None` if it cannot transpile. Otherwise it returns `(instruction, how_many_u32s)` to indicate that /// `instruction_stream[..how_many_u32s]` should be transpiled into `instruction`. diff --git a/crates/vm/src/arch/execution.rs b/crates/vm/src/arch/execution.rs index 624ca69da8..fc33405688 100644 --- a/crates/vm/src/arch/execution.rs +++ b/crates/vm/src/arch/execution.rs @@ -287,7 +287,7 @@ impl From<(u32, Option)> for PcIncOrSet { } /// Phantom sub-instructions affect the runtime of the VM and the trace matrix values. -/// However they all have no AIR constraints besides advancing the pc by [DEFAULT_PC_STEP](super::program::DEFAULT_PC_STEP). +/// However they all have no AIR constraints besides advancing the pc by [DEFAULT_PC_STEP](openvm_instructions::program::DEFAULT_PC_STEP). /// /// They should not mutate memory, but they can mutate the input & hint streams. /// diff --git a/crates/vm/src/arch/integration_api.rs b/crates/vm/src/arch/integration_api.rs index 336a7d5b7f..294b6a4b4c 100644 --- a/crates/vm/src/arch/integration_api.rs +++ b/crates/vm/src/arch/integration_api.rs @@ -94,7 +94,7 @@ pub trait VmAdapterChip { pub trait VmAdapterAir: BaseAir { type Interface: VmAdapterInterface; - /// [Air](p3_air::Air) constraints owned by the adapter. + /// [Air](openvm_stark_backend::p3_air::Air) constraints owned by the adapter. /// The `interface` is given as abstract expressions so it can be directly used in other AIR constraints. /// /// Adapters should document the max constraint degree as a function of the constraint degrees of `reads, writes, instruction`. diff --git a/crates/vm/src/arch/testing/memory/mod.rs b/crates/vm/src/arch/testing/memory/mod.rs index 723446601c..71e4cf9275 100644 --- a/crates/vm/src/arch/testing/memory/mod.rs +++ b/crates/vm/src/arch/testing/memory/mod.rs @@ -40,7 +40,7 @@ impl MemoryTester { } } - /// Returns the cell value at the current timestamp according to [MemoryController]. + /// Returns the cell value at the current timestamp according to `MemoryController`. pub fn read_cell(&mut self, address_space: usize, pointer: usize) -> F { let [addr_space, pointer] = [address_space, pointer].map(F::from_canonical_usize); // core::BorrowMut confuses compiler diff --git a/crates/vm/src/system/memory/offline_checker/bridge.rs b/crates/vm/src/system/memory/offline_checker/bridge.rs index 84bef2351b..64031d13c1 100644 --- a/crates/vm/src/system/memory/offline_checker/bridge.rs +++ b/crates/vm/src/system/memory/offline_checker/bridge.rs @@ -115,7 +115,7 @@ pub struct MemoryReadOperation<'a, T, V, const N: usize> { /// The max degree of constraints is: /// eval_timestamps: deg(enabled) + max(1, deg(self.timestamp)) -/// eval_bulk_access: refer to [MemoryOfflineChecker::eval_bulk_access] +/// eval_bulk_access: refer to private function MemoryOfflineChecker::eval_bulk_access impl, const N: usize> MemoryReadOperation<'_, F, V, N> { /// Evaluate constraints and send/receive interactions. pub fn eval(self, builder: &mut AB, enabled: impl Into) @@ -169,7 +169,7 @@ pub struct MemoryReadOrImmediateOperation<'a, T, V> { /// deg(address.address_space) + deg(aux.is_zero_aux)) /// is_immediate check: deg(aux.is_immediate) + max(deg(data), deg(address.pointer)) /// eval_timestamps: deg(enabled) + max(1, deg(self.timestamp)) -/// eval_bulk_access: refer to [MemoryOfflineChecker::eval_bulk_access] +/// eval_bulk_access: refer to private function MemoryOfflineChecker::eval_bulk_access impl> MemoryReadOrImmediateOperation<'_, F, V> { /// Evaluate constraints and send/receive interactions. pub fn eval(self, builder: &mut AB, enabled: impl Into) @@ -229,7 +229,7 @@ pub struct MemoryWriteOperation<'a, T, V, const N: usize> { /// The max degree of constraints is: /// eval_timestamps: deg(enabled) + max(1, deg(self.timestamp)) -/// eval_bulk_access: refer to [MemoryOfflineChecker::eval_bulk_access] +/// eval_bulk_access: refer to private function MemoryOfflineChecker::eval_bulk_access impl, const N: usize> MemoryWriteOperation<'_, T, V, N> { /// Evaluate constraints and send/receive interactions. `enabled` must be boolean. pub fn eval(self, builder: &mut AB, enabled: impl Into) diff --git a/crates/vm/src/system/poseidon2/mod.rs b/crates/vm/src/system/poseidon2/mod.rs index 3d8aa84a90..f24dd496ed 100644 --- a/crates/vm/src/system/poseidon2/mod.rs +++ b/crates/vm/src/system/poseidon2/mod.rs @@ -1,5 +1,5 @@ //! Chip to handle **native kernel** instructions for Poseidon2 `compress` and `permute`. -//! This chip is put in [intrinsics](crate::intrinsics) for organizational convenience, but +//! This chip is put in `intrinsics` for organizational convenience, but //! it is used as a system chip for persistent memory and as a native kernel chip for aggregation. //! //! Note that neither `compress` nor `permute` on its own diff --git a/crates/vm/src/system/program/mod.rs b/crates/vm/src/system/program/mod.rs index 5f7fe40a36..db83444ce8 100644 --- a/crates/vm/src/system/program/mod.rs +++ b/crates/vm/src/system/program/mod.rs @@ -44,8 +44,10 @@ impl ProgramChip { pub fn set_program(&mut self, mut program: Program) { let true_program_length = program.len(); - while !program.len().is_power_of_two() { + let mut number_actual_instructions = program.num_defined_instructions(); + while !number_actual_instructions.is_power_of_two() { program.push_instruction(padding_instruction()); + number_actual_instructions += 1; } self.true_program_length = true_program_length; self.execution_frequencies = vec![0; program.len()]; diff --git a/crates/vm/src/system/program/tests/mod.rs b/crates/vm/src/system/program/tests/mod.rs index b126a37888..27ffb1a0d4 100644 --- a/crates/vm/src/system/program/tests/mod.rs +++ b/crates/vm/src/system/program/tests/mod.rs @@ -2,7 +2,7 @@ use std::{iter, sync::Arc}; use openvm_instructions::{ instruction::Instruction, - program::{Program, DEFAULT_PC_STEP}, + program::{Program, DEFAULT_MAX_NUM_PUBLIC_VALUES, DEFAULT_PC_STEP}, VmOpcode, }; use openvm_native_compiler::{ @@ -35,10 +35,9 @@ assert_impl_all!(VmCommittedExe: Serialize, Deserialize assert_impl_all!(VmCommittedExe: Serialize, DeserializeOwned); fn interaction_test(program: Program, execution: Vec) { - let instructions = program.instructions(); let bus = ProgramBus(READ_INSTRUCTION_BUS); - let mut chip = ProgramChip::new_with_program(program, bus); - let mut execution_frequencies = vec![0; instructions.len()]; + let mut chip = ProgramChip::new_with_program(program.clone(), bus); + let mut execution_frequencies = vec![0; program.len()]; for pc_idx in execution { execution_frequencies[pc_idx as usize] += 1; chip.get_instruction(pc_idx * DEFAULT_PC_STEP).unwrap(); @@ -47,29 +46,33 @@ fn interaction_test(program: Program, execution: Vec) { let counter_air = DummyInteractionAir::new(9, true, bus.0); let mut program_cells = vec![]; - for (pc_idx, instruction) in instructions.iter().enumerate() { - program_cells.extend(vec![ - BabyBear::from_canonical_usize(execution_frequencies[pc_idx]), // hacky: we should switch execution_frequencies into hashmap - BabyBear::from_canonical_usize(pc_idx * (DEFAULT_PC_STEP as usize)), - instruction.opcode.to_field(), - instruction.a, - instruction.b, - instruction.c, - instruction.d, - instruction.e, - instruction.f, - instruction.g, - ]); + for (index, frequency) in execution_frequencies.into_iter().enumerate() { + let option = program.get_instruction_and_debug_info(index); + if let Some((instruction, _)) = option { + program_cells.extend([ + BabyBear::from_canonical_usize(frequency), // hacky: we should switch execution_frequencies into hashmap + BabyBear::from_canonical_usize(index * (DEFAULT_PC_STEP as usize)), + instruction.opcode.to_field(), + instruction.a, + instruction.b, + instruction.c, + instruction.d, + instruction.e, + instruction.f, + instruction.g, + ]); + } } // Pad program cells with zeroes to make height a power of two. let width = 10; - let desired_height = instructions.len().next_power_of_two(); - let cells_to_add = (desired_height - instructions.len()) * width; + let original_height = program.num_defined_instructions(); + let desired_height = original_height.next_power_of_two(); + let cells_to_add = (desired_height - original_height) * width; program_cells.extend(iter::repeat(BabyBear::ZERO).take(cells_to_add)); let counter_trace = RowMajorMatrix::new(program_cells, 10); - println!("trace height = {}", instructions.len()); + println!("trace height = {}", original_height); println!("counter trace height = {}", counter_trace.height()); BabyBearPoseidon2Engine::run_test_fast(vec![ @@ -211,3 +214,63 @@ fn test_program_negative() { ]) .expect("Verification failed"); } + +#[test] +fn test_program_with_undefined_instructions() { + let n = 2; + + // see core/tests/mod.rs + let instructions = vec![ + // word[0]_1 <- word[n]_0 + Some(Instruction::large_from_isize( + VmOpcode::with_default_offset(STOREW), + n, + 0, + 0, + 0, + 1, + 0, + 1, + )), + // word[1]_1 <- word[1]_1 + Some(Instruction::large_from_isize( + VmOpcode::with_default_offset(STOREW), + 1, + 1, + 0, + 0, + 1, + 0, + 1, + )), + // if word[0]_1 == n then pc += 3*DEFAULT_PC_STEP + Some(Instruction::from_isize( + VmOpcode::with_default_offset(NativeBranchEqualOpcode(BEQ)), + 0, + n, + 3 * DEFAULT_PC_STEP as isize, + 1, + 0, + )), + None, + None, + // terminate + Some(Instruction::from_isize( + VmOpcode::with_default_offset(TERMINATE), + 0, + 0, + 0, + 0, + 0, + )), + ]; + + let program = Program::new_without_debug_infos_with_option( + &instructions, + DEFAULT_PC_STEP, + 0, + DEFAULT_MAX_NUM_PUBLIC_VALUES, + ); + + interaction_test(program, vec![0, 2, 5]); +} diff --git a/crates/vm/src/system/program/trace.rs b/crates/vm/src/system/program/trace.rs index 70e7e99238..3a0407b50d 100644 --- a/crates/vm/src/system/program/trace.rs +++ b/crates/vm/src/system/program/trace.rs @@ -62,7 +62,10 @@ impl ProgramChip { let common_trace = RowMajorMatrix::new_col( self.execution_frequencies .into_iter() - .map(|x| F::from_canonical_usize(x)) + .zip_eq(self.program.instructions_and_debug_infos.iter()) + .filter_map(|(frequency, option)| { + option.as_ref().map(|_| F::from_canonical_usize(frequency)) + }) .collect::>(), ); if let Some(cached_trace) = cached_trace { diff --git a/docs/crates/benchmarks.md b/docs/crates/benchmarks.md index 8ced00c68c..57b5af2fa9 100644 --- a/docs/crates/benchmarks.md +++ b/docs/crates/benchmarks.md @@ -94,6 +94,24 @@ The `OUTPUT_PATH` environmental variable shouuld be set to the file path where y To run a benchmark with the leaf aggregation, add `--features aggregation` to the above command. +### Markdown Output + +To generate a markdown summary of the collected metrics, first install `openvm-prof`: + +```bash +cd /crates/prof +cargo install --force --path . +``` + +Then run the command: + +```bash +openvm-prof --json-paths $OUTPUT_PATH +``` + +This will generate a markdown file to the same path as $OUTPUT_PATH but with a `.md` extension. The `--json-paths` argument can take multiple files, comma separated. +There is also an optional `--prev-json-paths` argument to compare the metrics with a previous run. + ### Circuit Flamegraphs While traditional flamegraphs generated from instrumenting a proving binary run on the host machine are useful, diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 39dd77020e..00e28482bf 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -6,6 +6,7 @@ The main components of the repository are: - [Documentation](#documentation) - [Benchmarks](#benchmarks) - [CI](#ci) + - [Profiling](#profiling) - [CLI](#cli) - [SDK](#sdk) - [Toolchain](#toolchain) @@ -33,13 +34,17 @@ Benchmark guest programs and benchmark scripts are in [`openvm-benchmarks`](../. Scripts for CI use and metrics post-processing are in [`ci`](../../ci). +### Profiling + +- [`openvm-prof`](../../crates/prof): Tools to post-process metrics emitted by the VM for performance profiling. + ### CLI Command-line binary to compile, execute, and prove guest programs is in [`cli`](../../crates/cli). ### SDK -- [`sdk`](../../crates/sdk): The developer SDK for the VM. It includes the OpenVM aggregation programs to support continuations for all VMs in the framework, and well as a local aggregation scheduling implementation. It provides the final interface for proving an arbitrary program for a target VM. The SDK includes functionality to generate the final onchain SNARK verifier contract. +- [`openvm-sdk`](../../crates/sdk): The developer SDK for the VM. It includes the OpenVM aggregation programs to support continuations for all VMs in the framework, and well as a local aggregation scheduling implementation. It provides the final interface for proving an arbitrary program for a target VM. The SDK includes functionality to generate the final onchain SNARK verifier contract. ### Toolchain @@ -122,4 +127,4 @@ The toolchain, ISA, and VM are simultaenously extendable. All non-system functio - [`openvm-pairing-circuit`](../../extensions/pairing/circuit): Circuit extension for optimal Ate pairing on BN254 and BLS12-381 curves. - [`openvm-pairing-transpiler`](../../extensions/pairing/transpiler): Transpiler extension for optimal Ate pairing on BN254 and BLS12-381. - [`openvm-pairing-guest`](../../extensions/pairing/guest): Guest library with optimal Ate pairing on BN254 and BLS12-381 and associated constants. Also includes elliptic curve operations for VM runtime with the `halo2curves` feature gate. -- [`openvm-pairing-tests`](../../extensions/pairing/tests): Integration tests for the pairing extension. \ No newline at end of file +- [`openvm-pairing-tests`](../../extensions/pairing/tests): Integration tests for the pairing extension. diff --git a/extensions/keccak256/circuit/src/air.rs b/extensions/keccak256/circuit/src/air.rs index 3939cad3cb..d5ecaa3e6e 100644 --- a/extensions/keccak256/circuit/src/air.rs +++ b/extensions/keccak256/circuit/src/air.rs @@ -164,7 +164,7 @@ impl KeccakVmAir { } /// Keccak follows the 10*1 padding rule. - /// See Section 5.1 of https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + /// See Section 5.1 of /// Note this is the ONLY difference between Keccak and SHA-3 /// /// Constrains padding constraints and length between rounds and diff --git a/extensions/keccak256/circuit/src/columns.rs b/extensions/keccak256/circuit/src/columns.rs index 03e7ce4f79..edb28fa8ed 100644 --- a/extensions/keccak256/circuit/src/columns.rs +++ b/extensions/keccak256/circuit/src/columns.rs @@ -37,7 +37,7 @@ pub struct KeccakInstructionCols { /// False on dummy rows only used to pad the height. pub is_enabled: T, /// Is enabled and first round of block. Used to lower constraint degree. - /// is_enabled * inner.step_flags[0] + /// is_enabled * inner.step_flags\[0\] pub is_enabled_first_round: T, /// The starting timestamp to use for memory access in this row. /// A single row will do multiple memory accesses. @@ -51,17 +51,17 @@ pub struct KeccakInstructionCols { /// Memory address space pub e: T, // Register values - /// dst <- [dst_ptr:4]_1 + /// dst <- \[dst_ptr:4\]_1 pub dst: [T; RV32_REGISTER_NUM_LIMBS], - /// src <- [src_ptr:4]_1 - /// We store src_limbs[i] = [src_ptr + i + 1]_1 and src = u32([src_ptr:4]_1) from which [src_ptr]_1 + /// src <- \[src_ptr:4\]_1 + /// We store src_limbs\[i\] = \[src_ptr + i + 1\]_1 and src = u32(\[src_ptr:4\]_1) from which \[src_ptr\]_1 /// can be recovered by linear combination. /// We do this because `src` needs to be incremented between keccak-f permutations. pub src_limbs: [T; RV32_REGISTER_NUM_LIMBS - 1], pub src: T, - /// len <- [len_ptr:4]_1 - /// We store len_limbs[i] = [len_ptr + i + 1]_1 and remaining_len = u32([len_ptr:4]_1) - /// from which [len_ptr]_1 can be recovered by linear combination. + /// len <- \[len_ptr:4\]_1 + /// We store len_limbs\[i\] = \[len_ptr + i + 1\]_1 and remaining_len = u32(\[len_ptr:4\]_1) + /// from which \[len_ptr\]_1 can be recovered by linear combination. /// We do this because `remaining_len` needs to be decremented between keccak-f permutations. pub len_limbs: [T; RV32_REGISTER_NUM_LIMBS - 1], /// The remaining length of the unpadded input, in bytes. @@ -99,8 +99,8 @@ pub struct KeccakMemoryCols { pub register_aux: [MemoryReadAuxCols; KECCAK_REGISTER_READS], pub absorb_reads: [MemoryReadAuxCols; KECCAK_ABSORB_READS], pub digest_writes: [MemoryWriteAuxCols; KECCAK_DIGEST_WRITES], - /// The input bytes are batch read in blocks of [KECCAK_WORD_SIZE] bytes. However - /// if the input length is not a multiple of [KECCAK_WORD_SIZE], we read into + /// The input bytes are batch read in blocks of private constant KECCAK_WORD_SIZE bytes. However + /// if the input length is not a multiple of KECCAK_WORD_SIZE, we read into /// `partial_block` more bytes than we need. On the other hand `block_bytes` expects /// only the partial block of bytes and then the correctly padded bytes. /// We will select between `partial_block` and `block_bytes` for what to read from memory. diff --git a/extensions/native/compiler/src/ir/bits.rs b/extensions/native/compiler/src/ir/bits.rs index 64449a2f9f..746cff10de 100644 --- a/extensions/native/compiler/src/ir/bits.rs +++ b/extensions/native/compiler/src/ir/bits.rs @@ -120,10 +120,10 @@ impl Builder { /// Reverse a list of bits. /// - /// SAFETY: calling this function with `bit_len` greater [`NUM_BITS`] will result in undefined + /// SAFETY: calling this function with `bit_len` greater `NUM_BITS` will result in undefined /// behavior. /// - /// Reference: [p3_util::reverse_bits_len] + /// Reference: [`openvm_stark_backend::p3_util`] pub fn reverse_bits_len( &mut self, index_bits: &Array>, @@ -149,10 +149,10 @@ impl Builder { /// Reverse a list of bits inside a circuit. /// - /// SAFETY: calling this function with `bit_len` greater [`NUM_BITS`] will result in undefined + /// SAFETY: calling this function with `bit_len` greater `NUM_BITS` will result in undefined /// behavior. /// - /// Reference: [p3_util::reverse_bits_len] + /// Reference: [`openvm_stark_backend::p3_util`] pub fn reverse_bits_len_circuit( &mut self, index_bits: Vec>, diff --git a/extensions/native/compiler/src/ir/poseidon.rs b/extensions/native/compiler/src/ir/poseidon.rs index 8ae509606b..c4df01dab7 100644 --- a/extensions/native/compiler/src/ir/poseidon.rs +++ b/extensions/native/compiler/src/ir/poseidon.rs @@ -9,7 +9,7 @@ pub const PERMUTATION_WIDTH: usize = 16; impl Builder { /// Applies the Poseidon2 permutation to the given array. /// - /// Reference: [p3_poseidon2::Poseidon2] + /// [Reference](https://docs.rs/p3-poseidon2/latest/p3_poseidon2/struct.Poseidon2.html) pub fn poseidon2_permute(&mut self, array: &Array>) -> Array> { let output = match array { Array::Fixed(values) => { @@ -27,7 +27,7 @@ impl Builder { /// Applies the Poseidon2 permutation to the given array. /// - /// Reference: [p3_poseidon2::Poseidon2] + /// [Reference](https://docs.rs/p3-poseidon2/latest/p3_poseidon2/struct.Poseidon2.html) pub fn poseidon2_permute_mut(&mut self, array: &Array>) { if let Array::Fixed(_) = array { panic!("Poseidon2 permutation is not allowed on fixed arrays"); @@ -40,7 +40,7 @@ impl Builder { /// Applies the Poseidon2 compression function to the given array. /// - /// Reference: [p3_symmetric::TruncatedPermutation] + /// [Reference](https://docs.rs/p3-symmetric/latest/p3_symmetric/struct.TruncatedPermutation.html) pub fn poseidon2_compress( &mut self, left: &Array>, @@ -60,7 +60,7 @@ impl Builder { /// Applies the Poseidon2 compression to the given array. /// - /// Reference: [p3_symmetric::TruncatedPermutation] + /// [Reference](https://docs.rs/p3-symmetric/latest/p3_symmetric/struct.TruncatedPermutation.html) pub fn poseidon2_compress_x( &mut self, result: &Array>, @@ -76,7 +76,7 @@ impl Builder { /// Applies the Poseidon2 permutation to the given array. /// - /// Reference: [p3_symmetric::PaddingFreeSponge] + /// [Reference](https://docs.rs/p3-symmetric/latest/p3_symmetric/struct.PaddingFreeSponge.html) pub fn poseidon2_hash(&mut self, array: &Array>) -> Array> { let perm_width = PERMUTATION_WIDTH; let state: Array> = self.dyn_array(perm_width); diff --git a/extensions/native/compiler/src/ir/utils.rs b/extensions/native/compiler/src/ir/utils.rs index 54d420c68d..165eb33037 100644 --- a/extensions/native/compiler/src/ir/utils.rs +++ b/extensions/native/compiler/src/ir/utils.rs @@ -23,7 +23,7 @@ pub fn prime_field_to_usize(x: F) -> usize { impl Builder { /// The generator for the field. /// - /// Reference: [p3_baby_bear::BabyBear] + /// Reference: [`openvm_stark_sdk::p3_baby_bear::BabyBear`] pub fn generator(&mut self) -> Felt { self.eval(C::F::from_canonical_u32(31)) } @@ -141,7 +141,7 @@ impl Builder { /// Exponentiates a variable to a list of reversed bits with a given length. /// - /// Reference: [p3_util::reverse_bits_len] + /// Reference: [`openvm_stark_backend::p3_util::reverse_bits_len`] pub fn exp_reverse_bits_len( &mut self, x: V, diff --git a/extensions/native/recursion/src/challenger/duplex.rs b/extensions/native/recursion/src/challenger/duplex.rs index 317f738ee4..180f2c155b 100644 --- a/extensions/native/recursion/src/challenger/duplex.rs +++ b/extensions/native/recursion/src/challenger/duplex.rs @@ -12,7 +12,7 @@ use crate::{ digest::DigestVariable, }; -/// Reference: [p3_challenger::DuplexChallenger] +/// Reference: [`openvm_stark_backend::p3_challenger::DuplexChallenger`] #[derive(Clone)] pub struct DuplexChallengerVariable { pub sponge_state: Array>, diff --git a/extensions/native/recursion/src/challenger/mod.rs b/extensions/native/recursion/src/challenger/mod.rs index 26eccfc3fb..78488ad498 100644 --- a/extensions/native/recursion/src/challenger/mod.rs +++ b/extensions/native/recursion/src/challenger/mod.rs @@ -8,14 +8,14 @@ use crate::digest::DigestVariable; pub mod duplex; pub mod multi_field32; -/// Reference: [p3_challenger::CanObserve]. +/// Reference: [`openvm_stark_backend::p3_challenger::CanObserve`] pub trait CanObserveVariable { fn observe(&mut self, builder: &mut Builder, value: V); fn observe_slice(&mut self, builder: &mut Builder, values: Array); } -/// Reference: [p3_challenger::CanObserve]. +/// Reference: [`openvm_stark_backend::p3_challenger::CanObserve`] pub trait CanObserveDigest { fn observe_digest(&mut self, builder: &mut Builder, value: DigestVariable); } @@ -25,7 +25,7 @@ pub trait CanSampleVariable { fn sample(&mut self, builder: &mut Builder) -> V; } -/// Reference: [p3_challenger::FieldChallenger]. +/// Reference: [`openvm_stark_backend::p3_challenger::FieldChallenger`] pub trait FeltChallenger: CanObserveVariable> + CanSampleVariable> + CanSampleBitsVariable { diff --git a/extensions/native/recursion/src/fri/domain.rs b/extensions/native/recursion/src/fri/domain.rs index cb3d2f69cc..b2a3f8a07e 100644 --- a/extensions/native/recursion/src/fri/domain.rs +++ b/extensions/native/recursion/src/fri/domain.rs @@ -7,7 +7,7 @@ use openvm_stark_backend::{ use super::types::FriConfigVariable; use crate::commit::PolynomialSpaceVariable; -/// Reference: [p3_commit::TwoAdicMultiplicativeCoset] +/// Reference: [`openvm_stark_backend::p3_commit::TwoAdicMultiplicativeCoset`] #[derive(DslVariable, Clone)] pub struct TwoAdicMultiplicativeCosetVariable { pub log_n: Usize, diff --git a/extensions/native/recursion/src/fri/mod.rs b/extensions/native/recursion/src/fri/mod.rs index 3f5411a239..615ac11a60 100644 --- a/extensions/native/recursion/src/fri/mod.rs +++ b/extensions/native/recursion/src/fri/mod.rs @@ -27,7 +27,7 @@ pub mod witness; /// /// Currently assumes the index that is accessed is constant. /// -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/fri/src/verifier.rs#L101 +/// Reference: #[allow(clippy::too_many_arguments)] #[allow(unused_variables)] pub fn verify_query( @@ -138,7 +138,7 @@ pub enum NestedOpenedValues { /// /// Assumes the dimensions have already been sorted by tallest first. /// -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/merkle-tree/src/mmcs.rs#L92 +/// Reference: #[allow(clippy::type_complexity)] #[allow(unused_variables)] pub fn verify_batch( @@ -255,7 +255,7 @@ pub fn verify_batch( /// /// Assumes the dimensions have already been sorted by tallest first. /// -/// Reference: https://github.com/Plonky3/Plonky3/blob/4809fa7bedd9ba8f6f5d3267b1592618e3776c57/merkle-tree/src/mmcs.rs#L92 +/// Reference: #[allow(clippy::type_complexity)] #[allow(unused_variables)] pub fn verify_batch_static( diff --git a/extensions/native/recursion/src/halo2/utils.rs b/extensions/native/recursion/src/halo2/utils.rs index 0cbe28a3a5..703f0bf4bb 100644 --- a/extensions/native/recursion/src/halo2/utils.rs +++ b/extensions/native/recursion/src/halo2/utils.rs @@ -149,7 +149,7 @@ fn read_params(k: u32) -> Arc { /// Sort AIRs by their trace height in descending order. This should not be used outside /// static-verifier because a dynamic verifier should support any AIR order. /// This is related to an implementation detail of FieldMerkleTreeMMCS which is used in most configs. -/// Reference: https://github.com/Plonky3/Plonky3/blob/27b3127dab047e07145c38143379edec2960b3e1/merkle-tree/src/merkle_tree.rs#L53 +/// Reference: pub fn sort_chips( mut air_proof_inputs: Vec>, ) -> Vec> { diff --git a/extensions/native/recursion/src/stark/mod.rs b/extensions/native/recursion/src/stark/mod.rs index f5344dace8..bb322b4386 100644 --- a/extensions/native/recursion/src/stark/mod.rs +++ b/extensions/native/recursion/src/stark/mod.rs @@ -48,7 +48,7 @@ pub struct VerifierProgram { } impl VerifierProgram { - /// Create a new instance of the program for the [BabyBearPoseidon2] config. + /// Create a new instance of the program for the [`openvm_stark_sdk::config::baby_bear_poseidon2`] pub fn build( constants: MultiStarkVerificationAdvice, fri_params: &FriParameters, @@ -60,7 +60,7 @@ impl VerifierProgram { Self::build_with_options(constants, fri_params, options) } - /// Create a new instance of the program for the [BabyBearPoseidon2] config. + /// Create a new instance of the program for the [`openvm_stark_sdk::config::baby_bear_poseidon2`] pub fn build_with_options( constants: MultiStarkVerificationAdvice, fri_params: &FriParameters, diff --git a/extensions/native/recursion/src/utils.rs b/extensions/native/recursion/src/utils.rs index 85a56c2434..0129994835 100644 --- a/extensions/native/recursion/src/utils.rs +++ b/extensions/native/recursion/src/utils.rs @@ -38,7 +38,7 @@ pub fn const_fri_config( } } -/// Reference: https://github.com/Plonky3/Plonky3/blob/622375885320ac6bf3c338001760ed8f2230e3cb/field/src/helpers.rs#L136 +/// Reference: pub fn reduce_32(builder: &mut Builder, vals: &[Felt]) -> Var { let mut power = C::N::ONE; let result: Var = builder.eval(C::N::ZERO); @@ -50,7 +50,7 @@ pub fn reduce_32(builder: &mut Builder, vals: &[Felt]) -> Va result } -/// Reference: https://github.com/Plonky3/Plonky3/blob/622375885320ac6bf3c338001760ed8f2230e3cb/field/src/helpers.rs#L149 +/// Reference: pub fn split_32(builder: &mut Builder, val: Var, n: usize) -> Vec> { let bits = builder.num2bits_v_circuit(val, 256); let mut results = Vec::new(); diff --git a/extensions/pairing/guest/src/pairing/mod.rs b/extensions/pairing/guest/src/pairing/mod.rs index f1dadf93f6..25530fefde 100644 --- a/extensions/pairing/guest/src/pairing/mod.rs +++ b/extensions/pairing/guest/src/pairing/mod.rs @@ -30,7 +30,7 @@ pub trait PairingIntrinsics { /// The sextic extension `Fp12` is `Fp2[X] / (X^6 - \xi)`, where `\xi` is a non-residue. const XI: Self::Fp2; /// Multiplication constants for the Frobenius map for coefficients in Fp2 c1..=c5 for powers 0..12 - /// FROBENIUS_COEFFS[i][j] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) + /// FROBENIUS_COEFFS\[i\]\[j\] = \xi^{(j + 1) * (p^i - 1)/6} when p = 1 (mod 6) const FROBENIUS_COEFFS: [[Self::Fp2; 5]; 12]; } diff --git a/extensions/rv32im/circuit/src/adapters/alu.rs b/extensions/rv32im/circuit/src/adapters/alu.rs index 5be72f418f..5c32e4aae2 100644 --- a/extensions/rv32im/circuit/src/adapters/alu.rs +++ b/extensions/rv32im/circuit/src/adapters/alu.rs @@ -32,7 +32,7 @@ use openvm_stark_backend::{ use super::{RV32_CELL_BITS, RV32_REGISTER_NUM_LIMBS}; -/// Reads instructions of the form OP a, b, c, d, e where [a:4]_d = [b:4]_d op [c:4]_e. +/// Reads instructions of the form OP a, b, c, d, e where \[a:4\]_d = \[b:4\]_d op \[c:4\]_e. /// Operand d can only be 1, and e can be either 1 (for register reads) or 0 (when c /// is an immediate). #[derive(Debug)] diff --git a/extensions/rv32im/circuit/src/adapters/branch.rs b/extensions/rv32im/circuit/src/adapters/branch.rs index c169cab4a7..e478ffde2d 100644 --- a/extensions/rv32im/circuit/src/adapters/branch.rs +++ b/extensions/rv32im/circuit/src/adapters/branch.rs @@ -29,7 +29,7 @@ use openvm_stark_backend::{ use super::RV32_REGISTER_NUM_LIMBS; -/// Reads instructions of the form OP a, b, c, d, e where if([a:4]_d op [b:4]_e) pc += c. +/// Reads instructions of the form OP a, b, c, d, e where if(\[a:4\]_d op \[b:4\]_e) pc += c. /// Operands d and e can only be 1. #[derive(Debug)] pub struct Rv32BranchAdapterChip { diff --git a/extensions/rv32im/circuit/src/adapters/mul.rs b/extensions/rv32im/circuit/src/adapters/mul.rs index 6b0aabe44f..cca2d8fcbd 100644 --- a/extensions/rv32im/circuit/src/adapters/mul.rs +++ b/extensions/rv32im/circuit/src/adapters/mul.rs @@ -29,7 +29,7 @@ use openvm_stark_backend::{ use super::RV32_REGISTER_NUM_LIMBS; -/// Reads instructions of the form OP a, b, c, d where [a:4]_d = [b:4]_d op [c:4]_d. +/// Reads instructions of the form OP a, b, c, d where \[a:4\]_d = \[b:4\]_d op \[c:4\]_d. /// Operand d can only be 1, and there is no immediate support. #[derive(Debug)] pub struct Rv32MultAdapterChip { diff --git a/extensions/rv32im/circuit/src/adapters/rdwrite.rs b/extensions/rv32im/circuit/src/adapters/rdwrite.rs index 42c9a76117..e1f49924d5 100644 --- a/extensions/rv32im/circuit/src/adapters/rdwrite.rs +++ b/extensions/rv32im/circuit/src/adapters/rdwrite.rs @@ -30,14 +30,14 @@ use openvm_stark_backend::{ use super::RV32_REGISTER_NUM_LIMBS; -/// This adapter doesn't read anything, and writes to [a:4]_d, where d == 1 +/// This adapter doesn't read anything, and writes to \[a:4\]_d, where d == 1 #[derive(Debug)] pub struct Rv32RdWriteAdapterChip { pub air: Rv32RdWriteAdapterAir, _marker: PhantomData, } -/// This adapter doesn't read anything, and **maybe** writes to [a:4]_d, where d == 1 +/// This adapter doesn't read anything, and **maybe** writes to \[a:4\]_d, where d == 1 #[derive(Debug)] pub struct Rv32CondRdWriteAdapterChip { /// Do not use the inner air directly, use `air` instead.