From a885cb94bb42a2fc7373d1443dd1c1aded300a9a Mon Sep 17 00:00:00 2001 From: cchudant Date: Thu, 31 Oct 2024 09:33:36 +0000 Subject: [PATCH 1/9] refactor: move block production to its crate --- Cargo.lock | 49 +++ Cargo.toml | 2 + crates/client/block_production/Cargo.toml | 80 +++++ .../src/close_block.rs | 0 .../src/finalize_execution_state.rs | 281 +++++++++++++++ .../src/lib.rs} | 324 ++---------------- .../src/metrics.rs} | 0 crates/client/devnet/Cargo.toml | 1 + crates/client/devnet/src/lib.rs | 4 +- .../mempool/src/inner/deployed_contracts.rs | 31 ++ .../mempool/src/{inner.rs => inner/mod.rs} | 240 +------------ .../client/mempool/src/inner/nonce_chain.rs | 143 ++++++++ crates/client/mempool/src/inner/tx.rs | 56 +++ crates/client/mempool/src/l1.rs | 1 + crates/client/mempool/src/lib.rs | 3 - crates/node/Cargo.toml | 1 + crates/node/src/service/block_production.rs | 11 +- 17 files changed, 704 insertions(+), 523 deletions(-) create mode 100644 crates/client/block_production/Cargo.toml rename crates/client/{mempool => block_production}/src/close_block.rs (100%) create mode 100644 crates/client/block_production/src/finalize_execution_state.rs rename crates/client/{mempool/src/block_production.rs => block_production/src/lib.rs} (61%) rename crates/client/{mempool/src/block_production_metrics.rs => block_production/src/metrics.rs} (100%) create mode 100644 crates/client/mempool/src/inner/deployed_contracts.rs rename crates/client/mempool/src/{inner.rs => inner/mod.rs} (72%) create mode 100644 crates/client/mempool/src/inner/nonce_chain.rs create mode 100644 crates/client/mempool/src/inner/tx.rs diff --git a/Cargo.lock b/Cargo.lock index 2ddf62a3d..bc3c1bfa9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5303,6 +5303,7 @@ dependencies = [ "log", "mc-analytics", "mc-block-import", + "mc-block-production", "mc-db", "mc-devnet", "mc-eth", @@ -5428,6 +5429,53 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "mc-block-production" +version = "0.7.0" +dependencies = [ + "anyhow", + "assert_matches", + "bitvec", + "blockifier", + "env_logger 0.11.5", + "lazy_static", + "log", + "mc-analytics", + "mc-block-import", + "mc-db", + "mc-exec", + "mc-mempool", + "mockall", + "mp-block", + "mp-chain-config", + "mp-class", + "mp-convert", + "mp-receipt", + "mp-state-update", + "mp-transactions", + "mp-utils", + "once_cell", + "opentelemetry", + "opentelemetry-appender-tracing", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "opentelemetry-stdout", + "opentelemetry_sdk", + "proptest", + "proptest-derive", + "rstest 0.18.2", + "serde_json", + "starknet-core 0.11.0", + "starknet-types-core", + "starknet_api", + "thiserror", + "tokio", + "tracing", + "tracing-core", + "tracing-opentelemetry", + "tracing-subscriber", +] + [[package]] name = "mc-db" version = "0.7.0" @@ -5477,6 +5525,7 @@ dependencies = [ "log", "m-cairo-test-contracts", "mc-block-import", + "mc-block-production", "mc-db", "mc-mempool", "mockall", diff --git a/Cargo.toml b/Cargo.toml index 685308d8c..d159192ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ members = [ "crates/proc-macros", "crates/tests", "crates/cairo-test-contracts", + "crates/client/block_production", ] resolver = "2" # Everything except test-related packages, so that they are not compiled when doing `cargo build`. @@ -118,6 +119,7 @@ mc-gateway = { path = "crates/client/gateway" } mc-sync = { path = "crates/client/sync" } mc-eth = { path = "crates/client/eth" } mc-mempool = { path = "crates/client/mempool" } +mc-block-production = { path = "crates/client/block_production" } mc-block-import = { path = "crates/client/block_import" } mc-devnet = { path = "crates/client/devnet" } diff --git a/crates/client/block_production/Cargo.toml b/crates/client/block_production/Cargo.toml new file mode 100644 index 000000000..85c852c86 --- /dev/null +++ b/crates/client/block_production/Cargo.toml @@ -0,0 +1,80 @@ +[package] +name = "mc-block-production" +description = "Madara client block production service" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +version.workspace = true +homepage.workspace = true + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] + +rstest = { workspace = true } +mc-db = { workspace = true, features = ["testing"] } +tokio = { workspace = true, features = ["rt-multi-thread"] } +proptest.workspace = true +proptest-derive.workspace = true +bitvec.workspace = true +env_logger.workspace = true +blockifier = { workspace = true, features = ["testing"] } +mockall.workspace = true +assert_matches.workspace = true +lazy_static.workspace = true +serde_json.workspace = true + +[features] +testing = ["blockifier/testing", "mc-db/testing", "mockall"] + +[dependencies] + +# Madara +mc-analytics.workspace = true +mc-block-import.workspace = true +mc-db.workspace = true +mc-exec.workspace = true +mc-mempool.workspace = true +mp-block.workspace = true +mp-chain-config.workspace = true +mp-class.workspace = true +mp-convert.workspace = true +mp-receipt.workspace = true +mp-state-update.workspace = true +mp-transactions.workspace = true +mp-utils.workspace = true + +# Starknet +blockifier.workspace = true +starknet-core.workspace = true +starknet-types-core.workspace = true +starknet_api.workspace = true + +# Other +anyhow.workspace = true +log.workspace = true +mockall = { workspace = true, optional = true } +thiserror.workspace = true +tokio.workspace = true + +#Instrumentation +once_cell = { workspace = true } +opentelemetry = { workspace = true, features = ["metrics", "logs"] } +opentelemetry-appender-tracing = { workspace = true, default-features = false } +opentelemetry-otlp = { workspace = true, features = [ + "tonic", + "metrics", + "logs", +] } +opentelemetry-semantic-conventions = { workspace = true } +opentelemetry-stdout = { workspace = true } +opentelemetry_sdk = { workspace = true, features = ["rt-tokio", "logs"] } +tracing = { workspace = true } +tracing-core = { workspace = true, default-features = false } +tracing-opentelemetry = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/client/mempool/src/close_block.rs b/crates/client/block_production/src/close_block.rs similarity index 100% rename from crates/client/mempool/src/close_block.rs rename to crates/client/block_production/src/close_block.rs diff --git a/crates/client/block_production/src/finalize_execution_state.rs b/crates/client/block_production/src/finalize_execution_state.rs new file mode 100644 index 000000000..a3112b9ff --- /dev/null +++ b/crates/client/block_production/src/finalize_execution_state.rs @@ -0,0 +1,281 @@ +use std::collections::{hash_map, HashMap}; + +use blockifier::{ + blockifier::transaction_executor::{TransactionExecutor, VisitedSegmentsMapping}, + bouncer::BouncerWeights, + state::{cached_state::StateMaps, state_api::StateReader}, + transaction::errors::TransactionExecutionError, +}; +use mc_db::{db_block_id::DbBlockId, MadaraBackend}; +use mc_mempool::MempoolTransaction; +use mp_convert::ToFelt; +use mp_state_update::{ + ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, + StorageEntry, +}; +use starknet_api::core::ContractAddress; + +use crate::Error; + +fn state_map_to_state_diff( + backend: &MadaraBackend, + on_top_of: &Option, + diff: StateMaps, +) -> Result { + let mut backing_map = HashMap::::default(); + let mut storage_diffs = Vec::::default(); + for ((address, key), value) in diff.storage { + match backing_map.entry(address) { + hash_map::Entry::Vacant(e) => { + e.insert(storage_diffs.len()); + storage_diffs.push(ContractStorageDiffItem { + address: address.to_felt(), + storage_entries: vec![StorageEntry { key: key.to_felt(), value }], + }); + } + hash_map::Entry::Occupied(e) => { + storage_diffs[*e.get()].storage_entries.push(StorageEntry { key: key.to_felt(), value }); + } + } + } + + let mut deprecated_declared_classes = Vec::default(); + for (class_hash, _) in diff.declared_contracts { + if !diff.compiled_class_hashes.contains_key(&class_hash) { + deprecated_declared_classes.push(class_hash.to_felt()); + } + } + + let declared_classes = diff + .compiled_class_hashes + .iter() + .map(|(class_hash, compiled_class_hash)| DeclaredClassItem { + class_hash: class_hash.to_felt(), + compiled_class_hash: compiled_class_hash.to_felt(), + }) + .collect(); + + let nonces = diff + .nonces + .into_iter() + .map(|(contract_address, nonce)| NonceUpdate { + contract_address: contract_address.to_felt(), + nonce: nonce.to_felt(), + }) + .collect(); + + let mut deployed_contracts = Vec::new(); + let mut replaced_classes = Vec::new(); + for (contract_address, new_class_hash) in diff.class_hashes { + let replaced = if let Some(on_top_of) = on_top_of { + backend.get_contract_class_hash_at(on_top_of, &contract_address.to_felt())?.is_some() + } else { + // Executing genesis block: nothing being redefined here + false + }; + if replaced { + replaced_classes.push(ReplacedClassItem { + contract_address: contract_address.to_felt(), + class_hash: new_class_hash.to_felt(), + }) + } else { + deployed_contracts.push(DeployedContractItem { + address: contract_address.to_felt(), + class_hash: new_class_hash.to_felt(), + }) + } + } + + Ok(StateDiff { + storage_diffs, + deprecated_declared_classes, + declared_classes, + nonces, + deployed_contracts, + replaced_classes, + }) +} + +pub const BLOCK_STATE_ACCESS_ERR: &str = "Error: The block state should be `Some`."; +fn get_visited_segments( + tx_executor: &mut TransactionExecutor, +) -> Result { + let visited_segments = tx_executor + .block_state + .as_ref() + .expect(BLOCK_STATE_ACCESS_ERR) + .visited_pcs + .iter() + .map(|(class_hash, class_visited_pcs)| -> Result<_, Error> { + let contract_class = tx_executor + .block_state + .as_ref() + .expect(BLOCK_STATE_ACCESS_ERR) + .get_compiled_contract_class(*class_hash) + .map_err(TransactionExecutionError::StateError)?; + Ok((*class_hash, contract_class.get_visited_segments(class_visited_pcs)?)) + }) + .collect::>()?; + + Ok(visited_segments) +} + +pub(crate) fn finalize_execution_state( + _executed_txs: &[MempoolTransaction], + tx_executor: &mut TransactionExecutor, + backend: &MadaraBackend, + on_top_of: &Option, +) -> Result<(StateDiff, VisitedSegmentsMapping, BouncerWeights), Error> { + let state_map = tx_executor + .block_state + .as_mut() + .expect(BLOCK_STATE_ACCESS_ERR) + .to_state_diff() + .map_err(TransactionExecutionError::StateError)?; + let state_update = state_map_to_state_diff(backend, on_top_of, state_map)?; + + let visited_segments = get_visited_segments(tx_executor)?; + + Ok((state_update, visited_segments, *tx_executor.bouncer.get_accumulated_weights())) +} + +#[cfg(test)] +mod test { + use std::{collections::HashMap, sync::Arc}; + + use blockifier::{compiled_class_hash, nonce, state::cached_state::StateMaps, storage_key}; + use mc_db::MadaraBackend; + use mp_chain_config::ChainConfig; + use mp_convert::ToFelt; + use mp_state_update::{ + ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, StateDiff, StorageEntry, + }; + use starknet_api::{ + class_hash, contract_address, + core::{ClassHash, ContractAddress, PatriciaKey}, + felt, patricia_key, + }; + use starknet_core::types::Felt; + + #[test] + fn state_map_to_state_diff() { + let backend = MadaraBackend::open_for_testing(Arc::new(ChainConfig::madara_test())); + + let mut nonces = HashMap::new(); + nonces.insert(contract_address!(1u32), nonce!(1)); + nonces.insert(contract_address!(2u32), nonce!(2)); + nonces.insert(contract_address!(3u32), nonce!(3)); + + let mut class_hashes = HashMap::new(); + class_hashes.insert(contract_address!(1u32), class_hash!("0xc1a551")); + class_hashes.insert(contract_address!(2u32), class_hash!("0xc1a552")); + class_hashes.insert(contract_address!(3u32), class_hash!("0xc1a553")); + + let mut storage = HashMap::new(); + storage.insert((contract_address!(1u32), storage_key!(1u32)), felt!(1u32)); + storage.insert((contract_address!(1u32), storage_key!(2u32)), felt!(2u32)); + storage.insert((contract_address!(1u32), storage_key!(3u32)), felt!(3u32)); + + storage.insert((contract_address!(2u32), storage_key!(1u32)), felt!(1u32)); + storage.insert((contract_address!(2u32), storage_key!(2u32)), felt!(2u32)); + storage.insert((contract_address!(2u32), storage_key!(3u32)), felt!(3u32)); + + storage.insert((contract_address!(3u32), storage_key!(1u32)), felt!(1u32)); + storage.insert((contract_address!(3u32), storage_key!(2u32)), felt!(2u32)); + storage.insert((contract_address!(3u32), storage_key!(3u32)), felt!(3u32)); + + let mut compiled_class_hashes = HashMap::new(); + // "0xc1a553" is marked as deprecated by not having a compiled + // class hashe + compiled_class_hashes.insert(class_hash!("0xc1a551"), compiled_class_hash!(0x1)); + compiled_class_hashes.insert(class_hash!("0xc1a552"), compiled_class_hash!(0x2)); + + let mut declared_contracts = HashMap::new(); + declared_contracts.insert(class_hash!("0xc1a551"), true); + declared_contracts.insert(class_hash!("0xc1a552"), true); + declared_contracts.insert(class_hash!("0xc1a553"), true); + + let state_map = StateMaps { nonces, class_hashes, storage, compiled_class_hashes, declared_contracts }; + + let storage_diffs = vec![ + ContractStorageDiffItem { + address: felt!(1u32), + storage_entries: vec![ + StorageEntry { key: felt!(1u32), value: Felt::ONE }, + StorageEntry { key: felt!(2u32), value: Felt::TWO }, + StorageEntry { key: felt!(3u32), value: Felt::THREE }, + ], + }, + ContractStorageDiffItem { + address: felt!(2u32), + storage_entries: vec![ + StorageEntry { key: felt!(1u32), value: Felt::ONE }, + StorageEntry { key: felt!(2u32), value: Felt::TWO }, + StorageEntry { key: felt!(3u32), value: Felt::THREE }, + ], + }, + ContractStorageDiffItem { + address: felt!(3u32), + storage_entries: vec![ + StorageEntry { key: felt!(1u32), value: Felt::ONE }, + StorageEntry { key: felt!(2u32), value: Felt::TWO }, + StorageEntry { key: felt!(3u32), value: Felt::THREE }, + ], + }, + ]; + + let deprecated_declared_classes = vec![class_hash!("0xc1a553").to_felt()]; + + let declared_classes = vec![ + DeclaredClassItem { + class_hash: class_hash!("0xc1a551").to_felt(), + compiled_class_hash: compiled_class_hash!(0x1).to_felt(), + }, + DeclaredClassItem { + class_hash: class_hash!("0xc1a552").to_felt(), + compiled_class_hash: compiled_class_hash!(0x2).to_felt(), + }, + ]; + + let nonces = vec![ + NonceUpdate { contract_address: felt!(1u32), nonce: felt!(1u32) }, + NonceUpdate { contract_address: felt!(2u32), nonce: felt!(2u32) }, + NonceUpdate { contract_address: felt!(3u32), nonce: felt!(3u32) }, + ]; + + let deployed_contracts = vec![ + DeployedContractItem { address: felt!(1u32), class_hash: class_hash!("0xc1a551").to_felt() }, + DeployedContractItem { address: felt!(2u32), class_hash: class_hash!("0xc1a552").to_felt() }, + DeployedContractItem { address: felt!(3u32), class_hash: class_hash!("0xc1a553").to_felt() }, + ]; + + let replaced_classes = vec![]; + + let expected = StateDiff { + storage_diffs, + deprecated_declared_classes, + declared_classes, + nonces, + deployed_contracts, + replaced_classes, + }; + + let mut actual = super::state_map_to_state_diff(&backend, &Option::<_>::None, state_map).unwrap(); + + actual.storage_diffs.sort_by(|a, b| a.address.cmp(&b.address)); + actual.storage_diffs.iter_mut().for_each(|s| s.storage_entries.sort_by(|a, b| a.key.cmp(&b.key))); + actual.deprecated_declared_classes.sort(); + actual.declared_classes.sort_by(|a, b| a.class_hash.cmp(&b.class_hash)); + actual.nonces.sort_by(|a, b| a.contract_address.cmp(&b.contract_address)); + actual.deployed_contracts.sort_by(|a, b| a.address.cmp(&b.address)); + actual.replaced_classes.sort_by(|a, b| a.contract_address.cmp(&b.contract_address)); + + assert_eq!( + actual, + expected, + "actual: {}\nexpected: {}", + serde_json::to_string_pretty(&actual).unwrap_or_default(), + serde_json::to_string_pretty(&expected).unwrap_or_default() + ); + } +} diff --git a/crates/client/mempool/src/block_production.rs b/crates/client/block_production/src/lib.rs similarity index 61% rename from crates/client/mempool/src/block_production.rs rename to crates/client/block_production/src/lib.rs index 11949f392..7e7b42cf6 100644 --- a/crates/client/mempool/src/block_production.rs +++ b/crates/client/block_production/src/lib.rs @@ -1,40 +1,48 @@ -// TODO: Move this into its own crate. +//! Block production service. +//! +//! # Testing +//! +//! Testing is done in a few places: +//! - devnet has a few tests for declare transactions and basic transfers as of now. This is proobably +//! the simplest place where we could add more tests about block-time, mempool saving to db and such. +//! - e2e tests test a few transactions, with the rpc/gateway in scope too. +//! - js-tests in the CI, not that much in depth +//! - higher level, block production is more hreavily tested (currently outside of the CI) by running the +//! bootstrapper and the kakarot test suite. This is the only place where L1-L2 messaging is really tested +//! as of now. We should make better tests around this area. +//! +//! There are no tests in this crate because they would require a proper genesis block. Devnet provides that, +//! so that's where block-production integration tests are the simplest to add. +//! L1-L2 testing is a bit harder to setup, but we should definitely make the testing more comprehensive here. -use crate::block_production_metrics::BlockProductionMetrics; use crate::close_block::close_block; -use crate::header::make_pending_header; -use crate::{L1DataProvider, MempoolProvider, MempoolTransaction}; -use blockifier::blockifier::transaction_executor::{TransactionExecutor, VisitedSegmentsMapping}; +use crate::metrics::BlockProductionMetrics; +use blockifier::blockifier::transaction_executor::TransactionExecutor; use blockifier::bouncer::{Bouncer, BouncerWeights, BuiltinCount}; -use blockifier::state::cached_state::StateMaps; -use blockifier::state::state_api::StateReader; use blockifier::transaction::errors::TransactionExecutionError; use mc_block_import::{BlockImportError, BlockImporter}; -use mc_db::db_block_id::DbBlockId; use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::{BlockifierStateAdapter, ExecutionContext}; +use mc_mempool::header::make_pending_header; +use mc_mempool::{L1DataProvider, MempoolProvider}; use mp_block::{BlockId, BlockTag, MadaraPendingBlock}; use mp_class::ConvertedClass; use mp_convert::ToFelt; use mp_receipt::from_blockifier_execution_info; -use mp_state_update::{ - ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, - StorageEntry, -}; +use mp_state_update::{ContractStorageDiffItem, StateDiff, StorageEntry}; use mp_transactions::TransactionWithHash; use mp_utils::graceful_shutdown; use opentelemetry::KeyValue; -use starknet_api::core::ContractAddress; use starknet_types_core::felt::Felt; use std::borrow::Cow; -use std::collections::hash_map; -use std::collections::HashMap; use std::collections::VecDeque; use std::mem; use std::sync::Arc; use std::time::Instant; -use crate::clone_transaction; +mod close_block; +mod finalize_execution_state; +pub mod metrics; #[derive(Default, Clone)] struct ContinueBlockStats { @@ -61,129 +69,6 @@ pub enum Error { #[error("Unexpected error: {0:#}")] Unexpected(Cow<'static, str>), } - -fn state_map_to_state_diff( - backend: &MadaraBackend, - on_top_of: &Option, - diff: StateMaps, -) -> Result { - let mut backing_map = HashMap::::default(); - let mut storage_diffs = Vec::::default(); - for ((address, key), value) in diff.storage { - match backing_map.entry(address) { - hash_map::Entry::Vacant(e) => { - e.insert(storage_diffs.len()); - storage_diffs.push(ContractStorageDiffItem { - address: address.to_felt(), - storage_entries: vec![StorageEntry { key: key.to_felt(), value }], - }); - } - hash_map::Entry::Occupied(e) => { - storage_diffs[*e.get()].storage_entries.push(StorageEntry { key: key.to_felt(), value }); - } - } - } - - let mut deprecated_declared_classes = Vec::default(); - for (class_hash, _) in diff.declared_contracts { - if !diff.compiled_class_hashes.contains_key(&class_hash) { - deprecated_declared_classes.push(class_hash.to_felt()); - } - } - - let declared_classes = diff - .compiled_class_hashes - .iter() - .map(|(class_hash, compiled_class_hash)| DeclaredClassItem { - class_hash: class_hash.to_felt(), - compiled_class_hash: compiled_class_hash.to_felt(), - }) - .collect(); - - let nonces = diff - .nonces - .into_iter() - .map(|(contract_address, nonce)| NonceUpdate { - contract_address: contract_address.to_felt(), - nonce: nonce.to_felt(), - }) - .collect(); - - let mut deployed_contracts = Vec::new(); - let mut replaced_classes = Vec::new(); - for (contract_address, new_class_hash) in diff.class_hashes { - let replaced = if let Some(on_top_of) = on_top_of { - backend.get_contract_class_hash_at(on_top_of, &contract_address.to_felt())?.is_some() - } else { - // Executing genesis block: nothing being redefined here - false - }; - if replaced { - replaced_classes.push(ReplacedClassItem { - contract_address: contract_address.to_felt(), - class_hash: new_class_hash.to_felt(), - }) - } else { - deployed_contracts.push(DeployedContractItem { - address: contract_address.to_felt(), - class_hash: new_class_hash.to_felt(), - }) - } - } - - Ok(StateDiff { - storage_diffs, - deprecated_declared_classes, - declared_classes, - nonces, - deployed_contracts, - replaced_classes, - }) -} - -pub const BLOCK_STATE_ACCESS_ERR: &str = "Error: The block state should be `Some`."; -fn get_visited_segments( - tx_executor: &mut TransactionExecutor, -) -> Result { - let visited_segments = tx_executor - .block_state - .as_ref() - .expect(BLOCK_STATE_ACCESS_ERR) - .visited_pcs - .iter() - .map(|(class_hash, class_visited_pcs)| -> Result<_, Error> { - let contract_class = tx_executor - .block_state - .as_ref() - .expect(BLOCK_STATE_ACCESS_ERR) - .get_compiled_contract_class(*class_hash) - .map_err(TransactionExecutionError::StateError)?; - Ok((*class_hash, contract_class.get_visited_segments(class_visited_pcs)?)) - }) - .collect::>()?; - - Ok(visited_segments) -} - -fn finalize_execution_state( - _executed_txs: &[MempoolTransaction], - tx_executor: &mut TransactionExecutor, - backend: &MadaraBackend, - on_top_of: &Option, -) -> Result<(StateDiff, VisitedSegmentsMapping, BouncerWeights), Error> { - let state_map = tx_executor - .block_state - .as_mut() - .expect(BLOCK_STATE_ACCESS_ERR) - .to_state_diff() - .map_err(TransactionExecutionError::StateError)?; - let state_update = state_map_to_state_diff(backend, on_top_of, state_map)?; - - let visited_segments = get_visited_segments(tx_executor)?; - - Ok((state_update, visited_segments, *tx_executor.bouncer.get_accumulated_weights())) -} - /// The block production task consumes transactions from the mempool in batches. /// This is to allow optimistic concurrency. However, the block may get full during batch execution, /// and we need to re-add the transactions back into the mempool. @@ -264,6 +149,10 @@ impl BlockProductionTask { // This does not need to be outside the loop, but that saves an allocation let mut executed_txs = Vec::with_capacity(batch_size); + // Cloning transactions: That's a lot of cloning, but we're kind of forced to do that because blockifier takes + // a `&[Transaction]` slice. In addition, declare transactions have their class behind an Arc. As a result, the + // `Transaction` struct, as of blockifier 0.8.0-rc.3, is only 320 bytes. So we'll consider that cloning is + // fine! loop { // Take transactions from mempool. let to_take = batch_size.saturating_sub(txs_to_process.len()); @@ -271,8 +160,7 @@ impl BlockProductionTask { if to_take > 0 { self.mempool.take_txs_chunk(/* extend */ &mut txs_to_process, batch_size); - txs_to_process_blockifier - .extend(txs_to_process.iter().skip(cur_len).map(|tx| clone_transaction(&tx.tx))); + txs_to_process_blockifier.extend(txs_to_process.iter().skip(cur_len).map(|tx| tx.clone_tx())); } if txs_to_process.is_empty() { @@ -309,8 +197,8 @@ impl BlockProductionTask { self.block .inner .receipts - .push(from_blockifier_execution_info(&execution_info, &clone_transaction(&mempool_tx.tx))); - let converted_tx = TransactionWithHash::from(clone_transaction(&mempool_tx.tx)); // TODO: too many tx clones! + .push(from_blockifier_execution_info(&execution_info, &mempool_tx.clone_tx())); + let converted_tx = TransactionWithHash::from(mempool_tx.clone_tx()); self.block.info.tx_hashes.push(converted_tx.hash); self.block.inner.transactions.push(converted_tx.transaction); } @@ -347,8 +235,13 @@ impl BlockProductionTask { .state .on_top_of_block_id; - let (state_diff, _visited_segments, _weights) = - finalize_execution_state(&executed_txs, &mut self.executor, &self.backend, &on_top_of)?; + // TODO: save visited segments for SNOS. + let (state_diff, _visited_segments, _weights) = finalize_execution_state::finalize_execution_state( + &executed_txs, + &mut self.executor, + &self.backend, + &on_top_of, + )?; tracing::debug!( "Finished tick with {} new transactions, now at {} - re-adding {} txs to mempool", @@ -561,144 +454,3 @@ impl BlockProductionTask { self.executor.block_context.block_info().block_number.0 } } - -#[cfg(test)] -mod test { - use std::{collections::HashMap, sync::Arc}; - - use blockifier::{compiled_class_hash, nonce, state::cached_state::StateMaps, storage_key}; - use mc_db::MadaraBackend; - use mp_chain_config::ChainConfig; - use mp_convert::ToFelt; - use mp_state_update::{ - ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, StateDiff, StorageEntry, - }; - use starknet_api::{ - class_hash, contract_address, - core::{ClassHash, ContractAddress, PatriciaKey}, - felt, patricia_key, - }; - use starknet_core::types::Felt; - - #[test] - fn state_map_to_state_diff() { - let backend = MadaraBackend::open_for_testing(Arc::new(ChainConfig::madara_test())); - - let mut nonces = HashMap::new(); - nonces.insert(contract_address!(1u32), nonce!(1)); - nonces.insert(contract_address!(2u32), nonce!(2)); - nonces.insert(contract_address!(3u32), nonce!(3)); - - let mut class_hashes = HashMap::new(); - class_hashes.insert(contract_address!(1u32), class_hash!("0xc1a551")); - class_hashes.insert(contract_address!(2u32), class_hash!("0xc1a552")); - class_hashes.insert(contract_address!(3u32), class_hash!("0xc1a553")); - - let mut storage = HashMap::new(); - storage.insert((contract_address!(1u32), storage_key!(1u32)), felt!(1u32)); - storage.insert((contract_address!(1u32), storage_key!(2u32)), felt!(2u32)); - storage.insert((contract_address!(1u32), storage_key!(3u32)), felt!(3u32)); - - storage.insert((contract_address!(2u32), storage_key!(1u32)), felt!(1u32)); - storage.insert((contract_address!(2u32), storage_key!(2u32)), felt!(2u32)); - storage.insert((contract_address!(2u32), storage_key!(3u32)), felt!(3u32)); - - storage.insert((contract_address!(3u32), storage_key!(1u32)), felt!(1u32)); - storage.insert((contract_address!(3u32), storage_key!(2u32)), felt!(2u32)); - storage.insert((contract_address!(3u32), storage_key!(3u32)), felt!(3u32)); - - let mut compiled_class_hashes = HashMap::new(); - // "0xc1a553" is marked as deprecated by not having a compiled - // class hashe - compiled_class_hashes.insert(class_hash!("0xc1a551"), compiled_class_hash!(0x1)); - compiled_class_hashes.insert(class_hash!("0xc1a552"), compiled_class_hash!(0x2)); - - let mut declared_contracts = HashMap::new(); - declared_contracts.insert(class_hash!("0xc1a551"), true); - declared_contracts.insert(class_hash!("0xc1a552"), true); - declared_contracts.insert(class_hash!("0xc1a553"), true); - - let state_map = StateMaps { nonces, class_hashes, storage, compiled_class_hashes, declared_contracts }; - - let storage_diffs = vec![ - ContractStorageDiffItem { - address: felt!(1u32), - storage_entries: vec![ - StorageEntry { key: felt!(1u32), value: Felt::ONE }, - StorageEntry { key: felt!(2u32), value: Felt::TWO }, - StorageEntry { key: felt!(3u32), value: Felt::THREE }, - ], - }, - ContractStorageDiffItem { - address: felt!(2u32), - storage_entries: vec![ - StorageEntry { key: felt!(1u32), value: Felt::ONE }, - StorageEntry { key: felt!(2u32), value: Felt::TWO }, - StorageEntry { key: felt!(3u32), value: Felt::THREE }, - ], - }, - ContractStorageDiffItem { - address: felt!(3u32), - storage_entries: vec![ - StorageEntry { key: felt!(1u32), value: Felt::ONE }, - StorageEntry { key: felt!(2u32), value: Felt::TWO }, - StorageEntry { key: felt!(3u32), value: Felt::THREE }, - ], - }, - ]; - - let deprecated_declared_classes = vec![class_hash!("0xc1a553").to_felt()]; - - let declared_classes = vec![ - DeclaredClassItem { - class_hash: class_hash!("0xc1a551").to_felt(), - compiled_class_hash: compiled_class_hash!(0x1).to_felt(), - }, - DeclaredClassItem { - class_hash: class_hash!("0xc1a552").to_felt(), - compiled_class_hash: compiled_class_hash!(0x2).to_felt(), - }, - ]; - - let nonces = vec![ - NonceUpdate { contract_address: felt!(1u32), nonce: felt!(1u32) }, - NonceUpdate { contract_address: felt!(2u32), nonce: felt!(2u32) }, - NonceUpdate { contract_address: felt!(3u32), nonce: felt!(3u32) }, - ]; - - let deployed_contracts = vec![ - DeployedContractItem { address: felt!(1u32), class_hash: class_hash!("0xc1a551").to_felt() }, - DeployedContractItem { address: felt!(2u32), class_hash: class_hash!("0xc1a552").to_felt() }, - DeployedContractItem { address: felt!(3u32), class_hash: class_hash!("0xc1a553").to_felt() }, - ]; - - let replaced_classes = vec![]; - - let expected = StateDiff { - storage_diffs, - deprecated_declared_classes, - declared_classes, - nonces, - deployed_contracts, - replaced_classes, - }; - - let mut actual = super::state_map_to_state_diff(&backend, &Option::<_>::None, state_map).unwrap(); - - actual.storage_diffs.sort_by(|a, b| a.address.cmp(&b.address)); - actual.storage_diffs.iter_mut().for_each(|s| s.storage_entries.sort_by(|a, b| a.key.cmp(&b.key))); - actual.deprecated_declared_classes.sort(); - actual.declared_classes.sort_by(|a, b| a.class_hash.cmp(&b.class_hash)); - actual.nonces.sort_by(|a, b| a.contract_address.cmp(&b.contract_address)); - actual.deployed_contracts.sort_by(|a, b| a.address.cmp(&b.address)); - actual.replaced_classes.sort_by(|a, b| a.contract_address.cmp(&b.contract_address)); - - assert_eq!( - actual, - expected, - "actual: {}\nexpected: {}", - serde_json::to_string_pretty(&actual).unwrap_or_default(), - serde_json::to_string_pretty(&expected).unwrap_or_default() - ); - } -} diff --git a/crates/client/mempool/src/block_production_metrics.rs b/crates/client/block_production/src/metrics.rs similarity index 100% rename from crates/client/mempool/src/block_production_metrics.rs rename to crates/client/block_production/src/metrics.rs diff --git a/crates/client/devnet/Cargo.toml b/crates/client/devnet/Cargo.toml index 744846ab6..c7c724e10 100644 --- a/crates/client/devnet/Cargo.toml +++ b/crates/client/devnet/Cargo.toml @@ -19,6 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] rstest = { workspace = true } mc-db = { workspace = true, features = ["testing"] } mc-mempool = { workspace = true, features = ["testing"] } +mc-block-production = { workspace = true, features = ["testing"] } tokio = { workspace = true, features = ["rt-multi-thread"] } proptest.workspace = true proptest-derive.workspace = true diff --git a/crates/client/devnet/src/lib.rs b/crates/client/devnet/src/lib.rs index 915bc76c7..5490b9978 100644 --- a/crates/client/devnet/src/lib.rs +++ b/crates/client/devnet/src/lib.rs @@ -187,9 +187,9 @@ mod tests { use super::*; use assert_matches::assert_matches; use mc_block_import::{BlockImporter, BlockValidationContext}; + use mc_block_production::metrics::BlockProductionMetrics; + use mc_block_production::BlockProductionTask; use mc_db::MadaraBackend; - use mc_mempool::block_production::BlockProductionTask; - use mc_mempool::block_production_metrics::BlockProductionMetrics; use mc_mempool::MempoolProvider; use mc_mempool::{transaction_hash, L1DataProvider, Mempool, MockL1DataProvider}; diff --git a/crates/client/mempool/src/inner/deployed_contracts.rs b/crates/client/mempool/src/inner/deployed_contracts.rs new file mode 100644 index 000000000..e9f307394 --- /dev/null +++ b/crates/client/mempool/src/inner/deployed_contracts.rs @@ -0,0 +1,31 @@ +use std::collections::{hash_map, HashMap}; + +use starknet_api::core::ContractAddress; + +/// This is used for quickly checking if the contract has been deployed for the same block it is invoked. +/// When force inserting transaction, it may happen that we run into a duplicate deploy_account transaction. Keep a count for that purpose. +#[derive(Debug, Clone, Default)] +pub struct DeployedContracts(HashMap); +impl DeployedContracts { + pub fn decrement(&mut self, address: ContractAddress) { + match self.0.entry(address) { + hash_map::Entry::Occupied(mut entry) => { + *entry.get_mut() -= 1; + if entry.get() == &0 { + entry.remove(); + } + } + hash_map::Entry::Vacant(_) => unreachable!("invariant violated"), + } + } + pub fn increment(&mut self, address: ContractAddress) { + *self.0.entry(address).or_insert(0) += 1 + } + #[cfg(test)] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + pub fn contains(&self, address: &ContractAddress) -> bool { + self.0.contains_key(address) + } +} diff --git a/crates/client/mempool/src/inner.rs b/crates/client/mempool/src/inner/mod.rs similarity index 72% rename from crates/client/mempool/src/inner.rs rename to crates/client/mempool/src/inner/mod.rs index c0cb62698..a081a6531 100644 --- a/crates/client/mempool/src/inner.rs +++ b/crates/client/mempool/src/inner/mod.rs @@ -2,207 +2,21 @@ //! This is the chokepoint for all insertions and popping, as such, we want to make it as fast as possible. //! Insertion and popping should be O(log n). //! We also really don't want to poison the lock by panicking. -//! -//! TODO: mempool size limits -//! TODO(perf): should we box the MempoolTransaction? -use crate::{clone_transaction, contract_addr, nonce, tx_hash}; use blockifier::transaction::account_transaction::AccountTransaction; use blockifier::transaction::transaction_execution::Transaction; -use core::fmt; -use mc_exec::execution::TxInfo; -use mp_class::ConvertedClass; -use mp_convert::FeltHexDisplay; -use starknet_api::{ - core::{ContractAddress, Nonce}, - transaction::TransactionHash, -}; +use deployed_contracts::DeployedContracts; +use nonce_chain::{InsertedPosition, NonceChain, NonceChainNewState, ReplacedState}; +use starknet_api::core::ContractAddress; use std::{ cmp, - collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap}, - iter, - time::SystemTime, + collections::{hash_map, BTreeSet, HashMap}, }; -pub type ArrivedAtTimestamp = SystemTime; - -pub struct MempoolTransaction { - pub tx: Transaction, - pub arrived_at: ArrivedAtTimestamp, - pub converted_class: Option, -} - -impl fmt::Debug for MempoolTransaction { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MempoolTransaction") - .field("tx_hash", &self.tx_hash().hex_display()) - .field("nonce", &self.nonce().hex_display()) - .field("contract_address", &self.contract_address().hex_display()) - .field("tx_type", &self.tx.tx_type()) - .field("arrived_at", &self.arrived_at) - .finish() - } -} - -impl Clone for MempoolTransaction { - fn clone(&self) -> Self { - Self { - tx: clone_transaction(&self.tx), - arrived_at: self.arrived_at, - converted_class: self.converted_class.clone(), - } - } -} - -impl MempoolTransaction { - pub fn nonce(&self) -> Nonce { - nonce(&self.tx) - } - pub fn contract_address(&self) -> ContractAddress { - contract_addr(&self.tx) - } - pub fn tx_hash(&self) -> TransactionHash { - tx_hash(&self.tx) - } -} - -#[derive(Debug)] -struct OrderMempoolTransactionByNonce(MempoolTransaction); - -impl PartialEq for OrderMempoolTransactionByNonce { - fn eq(&self, other: &Self) -> bool { - self.cmp(other).is_eq() - } -} -impl Eq for OrderMempoolTransactionByNonce {} -impl Ord for OrderMempoolTransactionByNonce { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.0.nonce().cmp(&other.0.nonce()) - } -} -impl PartialOrd for OrderMempoolTransactionByNonce { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// Invariants: -/// - front_nonce, front_arrived_at and front_tx_hash must match the front transaction timestamp. -/// - No nonce chain should ever be empty in the mempool. -#[derive(Debug)] -pub struct NonceChain { - /// Use a BTreeMap to so that we can use the entry api. - transactions: BTreeMap, - front_arrived_at: ArrivedAtTimestamp, - front_nonce: Nonce, - front_tx_hash: TransactionHash, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum InsertedPosition { - Front { former_head_arrived_at: ArrivedAtTimestamp }, - Other, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum ReplacedState { - Replaced, - NotReplaced, -} - -#[derive(Eq, PartialEq, Debug)] -pub enum NonceChainNewState { - Empty, - NotEmpty, -} - -impl NonceChain { - pub fn new_with_first_tx(tx: MempoolTransaction) -> Self { - Self { - front_arrived_at: tx.arrived_at, - front_tx_hash: tx.tx_hash(), - front_nonce: tx.nonce(), - transactions: iter::once((OrderMempoolTransactionByNonce(tx), ())).collect(), - } - } - - #[cfg(test)] - pub fn check_invariants(&self) { - assert!(!self.transactions.is_empty()); - let (front, _) = self.transactions.first_key_value().unwrap(); - assert_eq!(front.0.tx_hash(), self.front_tx_hash); - assert_eq!(front.0.nonce(), self.front_nonce); - assert_eq!(front.0.arrived_at, self.front_arrived_at); - } - - /// Returns where in the chain it was inserted. - /// When `force` is `true`, this function should never return any error. - pub fn insert( - &mut self, - mempool_tx: MempoolTransaction, - force: bool, - ) -> Result<(InsertedPosition, ReplacedState), TxInsersionError> { - let mempool_tx_arrived_at = mempool_tx.arrived_at; - let mempool_tx_nonce = mempool_tx.nonce(); - let mempool_tx_hash = mempool_tx.tx_hash(); - - let replaced = if force { - if self.transactions.insert(OrderMempoolTransactionByNonce(mempool_tx), ()).is_some() { - ReplacedState::Replaced - } else { - ReplacedState::NotReplaced - } - } else { - match self.transactions.entry(OrderMempoolTransactionByNonce(mempool_tx)) { - btree_map::Entry::Occupied(entry) => { - // duplicate nonce, either it's because the hash is duplicated or nonce conflict with another tx. - if entry.key().0.tx_hash() == mempool_tx_hash { - return Err(TxInsersionError::DuplicateTxn); - } else { - return Err(TxInsersionError::NonceConflict); - } - } - btree_map::Entry::Vacant(entry) => *entry.insert(()), - } - - ReplacedState::NotReplaced - }; - - let position = if self.front_nonce >= mempool_tx_nonce { - // We insrted at the front here - let former_head_arrived_at = core::mem::replace(&mut self.front_arrived_at, mempool_tx_arrived_at); - self.front_nonce = mempool_tx_nonce; - self.front_tx_hash = mempool_tx_hash; - InsertedPosition::Front { former_head_arrived_at } - } else { - InsertedPosition::Other - }; - - #[cfg(debug_assertions)] // unknown field `front_tx_hash` in release if debug_assert_eq is used - assert_eq!( - self.transactions.first_key_value().expect("Getting the first tx").0 .0.tx_hash(), - self.front_tx_hash - ); - - Ok((position, replaced)) - } - - pub fn pop(&mut self) -> (MempoolTransaction, NonceChainNewState) { - // TODO(perf): avoid double lookup - let (tx, _) = self.transactions.pop_first().expect("Nonce chain should not be empty"); - if let Some((new_front, _)) = self.transactions.first_key_value() { - self.front_arrived_at = new_front.0.arrived_at; - #[cfg(debug_assertions)] - { - self.front_tx_hash = new_front.0.tx_hash(); - } - self.front_nonce = new_front.0.nonce(); - (tx.0, NonceChainNewState::NotEmpty) - } else { - (tx.0, NonceChainNewState::Empty) - } - } -} +mod deployed_contracts; +mod nonce_chain; +mod tx; +pub use tx::*; #[derive(Clone, Debug)] struct AccountOrderedByTimestamp { @@ -229,34 +43,6 @@ impl PartialOrd for AccountOrderedByTimestamp { } } -/// This is used for quickly checking if the contract has been deployed for the same block it is invoked. -/// When force inserting transaction, it may happen that we run into a duplicate deploy_account transaction. Keep a count for that purpose. -#[derive(Debug, Clone, Default)] -struct DeployedContracts(HashMap); -impl DeployedContracts { - fn decrement(&mut self, address: ContractAddress) { - match self.0.entry(address) { - hash_map::Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if entry.get() == &0 { - entry.remove(); - } - } - hash_map::Entry::Vacant(_) => unreachable!("invariant violated"), - } - } - fn increment(&mut self, address: ContractAddress) { - *self.0.entry(address).or_insert(0) += 1 - } - #[cfg(test)] - fn is_empty(&self) -> bool { - self.0.is_empty() - } - fn contains(&self, address: &ContractAddress) -> bool { - self.0.contains_key(address) - } -} - #[derive(Default, Debug)] /// Invariants: /// - Every nonce chain in `nonce_chains` should have a one to one match with `tx_queue`. @@ -420,18 +206,22 @@ mod tests { use proptest::prelude::*; use proptest_derive::Arbitrary; use starknet_api::{ - core::{calculate_contract_address, ChainId}, + core::{calculate_contract_address, ChainId, Nonce}, data_availability::DataAvailabilityMode, transaction::{ ContractAddressSalt, DeclareTransactionV3, DeployAccountTransactionV3, InvokeTransactionV3, Resource, - ResourceBounds, ResourceBoundsMapping, TransactionHasher, TransactionVersion, + ResourceBounds, ResourceBoundsMapping, TransactionHash, TransactionHasher, TransactionVersion, }, }; use starknet_types_core::felt::Felt; use blockifier::abi::abi_utils::selector_from_name; use starknet_api::transaction::Fee; - use std::{collections::HashSet, fmt, time::Duration}; + use std::{ + collections::HashSet, + fmt, + time::{Duration, SystemTime}, + }; lazy_static::lazy_static! { static ref DUMMY_CLASS: ClassInfo = { diff --git a/crates/client/mempool/src/inner/nonce_chain.rs b/crates/client/mempool/src/inner/nonce_chain.rs new file mode 100644 index 000000000..a560770d8 --- /dev/null +++ b/crates/client/mempool/src/inner/nonce_chain.rs @@ -0,0 +1,143 @@ +use super::tx::{ArrivedAtTimestamp, MempoolTransaction}; +use crate::TxInsersionError; +use starknet_api::{core::Nonce, transaction::TransactionHash}; +use std::collections::{btree_map, BTreeMap}; +use std::{cmp, iter}; + +#[derive(Debug)] +pub struct OrderMempoolTransactionByNonce(pub MempoolTransaction); + +impl PartialEq for OrderMempoolTransactionByNonce { + fn eq(&self, other: &Self) -> bool { + self.cmp(other).is_eq() + } +} +impl Eq for OrderMempoolTransactionByNonce {} +impl Ord for OrderMempoolTransactionByNonce { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.0.nonce().cmp(&other.0.nonce()) + } +} +impl PartialOrd for OrderMempoolTransactionByNonce { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// Invariants: +/// - front_nonce, front_arrived_at and front_tx_hash must match the front transaction timestamp. +/// - No nonce chain should ever be empty in the mempool. +#[derive(Debug)] +pub struct NonceChain { + /// Use a BTreeMap to so that we can use the entry api. + pub(crate) transactions: BTreeMap, + pub(crate) front_arrived_at: ArrivedAtTimestamp, + pub(crate) front_nonce: Nonce, + pub(crate) front_tx_hash: TransactionHash, +} + +#[derive(Eq, PartialEq, Debug)] +pub enum InsertedPosition { + Front { former_head_arrived_at: ArrivedAtTimestamp }, + Other, +} + +#[derive(Eq, PartialEq, Debug)] +pub enum ReplacedState { + Replaced, + NotReplaced, +} + +#[derive(Eq, PartialEq, Debug)] +pub enum NonceChainNewState { + Empty, + NotEmpty, +} + +impl NonceChain { + pub fn new_with_first_tx(tx: MempoolTransaction) -> Self { + Self { + front_arrived_at: tx.arrived_at, + front_tx_hash: tx.tx_hash(), + front_nonce: tx.nonce(), + transactions: iter::once((OrderMempoolTransactionByNonce(tx), ())).collect(), + } + } + + #[cfg(test)] + pub fn check_invariants(&self) { + assert!(!self.transactions.is_empty()); + let (front, _) = self.transactions.first_key_value().unwrap(); + assert_eq!(front.0.tx_hash(), self.front_tx_hash); + assert_eq!(front.0.nonce(), self.front_nonce); + assert_eq!(front.0.arrived_at, self.front_arrived_at); + } + + /// Returns where in the chain it was inserted. + /// When `force` is `true`, this function should never return any error. + pub fn insert( + &mut self, + mempool_tx: MempoolTransaction, + force: bool, + ) -> Result<(InsertedPosition, ReplacedState), TxInsersionError> { + let mempool_tx_arrived_at = mempool_tx.arrived_at; + let mempool_tx_nonce = mempool_tx.nonce(); + let mempool_tx_hash = mempool_tx.tx_hash(); + + let replaced = if force { + if self.transactions.insert(OrderMempoolTransactionByNonce(mempool_tx), ()).is_some() { + ReplacedState::Replaced + } else { + ReplacedState::NotReplaced + } + } else { + match self.transactions.entry(OrderMempoolTransactionByNonce(mempool_tx)) { + btree_map::Entry::Occupied(entry) => { + // duplicate nonce, either it's because the hash is duplicated or nonce conflict with another tx. + if entry.key().0.tx_hash() == mempool_tx_hash { + return Err(TxInsersionError::DuplicateTxn); + } else { + return Err(TxInsersionError::NonceConflict); + } + } + btree_map::Entry::Vacant(entry) => *entry.insert(()), + } + + ReplacedState::NotReplaced + }; + + let position = if self.front_nonce >= mempool_tx_nonce { + // We insrted at the front here + let former_head_arrived_at = core::mem::replace(&mut self.front_arrived_at, mempool_tx_arrived_at); + self.front_nonce = mempool_tx_nonce; + self.front_tx_hash = mempool_tx_hash; + InsertedPosition::Front { former_head_arrived_at } + } else { + InsertedPosition::Other + }; + + #[cfg(debug_assertions)] // unknown field `front_tx_hash` in release if debug_assert_eq is used + assert_eq!( + self.transactions.first_key_value().expect("Getting the first tx").0 .0.tx_hash(), + self.front_tx_hash + ); + + Ok((position, replaced)) + } + + pub fn pop(&mut self) -> (MempoolTransaction, NonceChainNewState) { + // TODO(perf): avoid double lookup + let (tx, _) = self.transactions.pop_first().expect("Nonce chain should not be empty"); + if let Some((new_front, _)) = self.transactions.first_key_value() { + self.front_arrived_at = new_front.0.arrived_at; + #[cfg(debug_assertions)] + { + self.front_tx_hash = new_front.0.tx_hash(); + } + self.front_nonce = new_front.0.nonce(); + (tx.0, NonceChainNewState::NotEmpty) + } else { + (tx.0, NonceChainNewState::Empty) + } + } +} diff --git a/crates/client/mempool/src/inner/tx.rs b/crates/client/mempool/src/inner/tx.rs new file mode 100644 index 000000000..a214021dc --- /dev/null +++ b/crates/client/mempool/src/inner/tx.rs @@ -0,0 +1,56 @@ +use blockifier::transaction::transaction_execution::Transaction; +use mc_exec::execution::TxInfo; +use mp_class::ConvertedClass; +use mp_convert::FeltHexDisplay; +use starknet_api::{ + core::{ContractAddress, Nonce}, + transaction::TransactionHash, +}; +use std::{fmt, time::SystemTime}; + +use crate::{clone_transaction, contract_addr, nonce, tx_hash}; + +pub type ArrivedAtTimestamp = SystemTime; + +pub struct MempoolTransaction { + pub tx: Transaction, + pub arrived_at: ArrivedAtTimestamp, + pub converted_class: Option, +} + +impl fmt::Debug for MempoolTransaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MempoolTransaction") + .field("tx_hash", &self.tx_hash().hex_display()) + .field("nonce", &self.nonce().hex_display()) + .field("contract_address", &self.contract_address().hex_display()) + .field("tx_type", &self.tx.tx_type()) + .field("arrived_at", &self.arrived_at) + .finish() + } +} + +impl Clone for MempoolTransaction { + fn clone(&self) -> Self { + Self { + tx: clone_transaction(&self.tx), + arrived_at: self.arrived_at, + converted_class: self.converted_class.clone(), + } + } +} + +impl MempoolTransaction { + pub fn clone_tx(&self) -> Transaction { + clone_transaction(&self.tx) + } + pub fn nonce(&self) -> Nonce { + nonce(&self.tx) + } + pub fn contract_address(&self) -> ContractAddress { + contract_addr(&self.tx) + } + pub fn tx_hash(&self) -> TransactionHash { + tx_hash(&self.tx) + } +} diff --git a/crates/client/mempool/src/l1.rs b/crates/client/mempool/src/l1.rs index 3e7c86211..b3bd98c6e 100644 --- a/crates/client/mempool/src/l1.rs +++ b/crates/client/mempool/src/l1.rs @@ -1,3 +1,4 @@ +//! TODO: this should be in the backend use mp_block::header::{GasPrices, L1DataAvailabilityMode}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; diff --git a/crates/client/mempool/src/lib.rs b/crates/client/mempool/src/lib.rs index 27f2edc43..76c513544 100644 --- a/crates/client/mempool/src/lib.rs +++ b/crates/client/mempool/src/lib.rs @@ -39,9 +39,6 @@ pub use inner::{ArrivedAtTimestamp, MempoolTransaction}; pub use l1::MockL1DataProvider; pub use l1::{GasPriceProvider, L1DataProvider}; -pub mod block_production; -pub mod block_production_metrics; -mod close_block; pub mod header; mod inner; mod l1; diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 2e7daf1ff..3084766ad 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -23,6 +23,7 @@ name = "madara" # Madara mc-analytics = { workspace = true } mc-block-import = { workspace = true } +mc-block-production = { workspace = true } mc-db = { workspace = true } mc-devnet = { workspace = true } mc-eth = { workspace = true } diff --git a/crates/node/src/service/block_production.rs b/crates/node/src/service/block_production.rs index 87a3127d0..3d9f0dc05 100644 --- a/crates/node/src/service/block_production.rs +++ b/crates/node/src/service/block_production.rs @@ -1,18 +1,15 @@ -use std::{io::Write, sync::Arc}; - +use crate::cli::block_production::BlockProductionParams; use anyhow::Context; use mc_block_import::{BlockImporter, BlockValidationContext}; +use mc_block_production::{metrics::BlockProductionMetrics, BlockProductionTask}; use mc_db::{DatabaseService, MadaraBackend}; use mc_devnet::{ChainGenesisDescription, DevnetKeys}; -use mc_mempool::{ - block_production::BlockProductionTask, block_production_metrics::BlockProductionMetrics, L1DataProvider, Mempool, -}; +use mc_mempool::{L1DataProvider, Mempool}; use mc_telemetry::TelemetryHandle; use mp_utils::service::Service; +use std::{io::Write, sync::Arc}; use tokio::task::JoinSet; -use crate::cli::block_production::BlockProductionParams; - struct StartParams { backend: Arc, block_import: Arc, From b5a487d8e298bd64c16f6e5f6853c5f180baace8 Mon Sep 17 00:00:00 2001 From: cchudant Date: Thu, 31 Oct 2024 17:13:09 +0000 Subject: [PATCH 2/9] feat(mempool): mempool transaction limits --- CHANGELOG.md | 1 + Cargo.lock | 2 +- Cargo.toml | 1 + crates/client/block_production/src/lib.rs | 8 +- crates/client/devnet/Cargo.toml | 2 +- crates/client/devnet/src/lib.rs | 262 ++++++++++--- crates/client/eth/src/l1_messaging.rs | 8 +- crates/client/mempool/Cargo.toml | 3 +- crates/client/mempool/src/inner/limits.rs | 148 +++++++ crates/client/mempool/src/inner/mod.rs | 364 ++++-------------- .../client/mempool/src/inner/nonce_chain.rs | 26 +- crates/client/mempool/src/inner/proptest.rs | 269 +++++++++++++ crates/client/mempool/src/inner/tx.rs | 3 +- crates/client/mempool/src/lib.rs | 48 ++- crates/node/src/cli/chain_config_overrides.rs | 10 + crates/node/src/main.rs | 8 +- .../chain_config/src/chain_config.rs | 12 + 17 files changed, 795 insertions(+), 380 deletions(-) create mode 100644 crates/client/mempool/src/inner/limits.rs create mode 100644 crates/client/mempool/src/inner/proptest.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index feb4abb8c..295b2d88c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Next release +- feat(mempool): mempool transaction limits - fix(rocksdb): update max open files opt - code: refactor to use otel tracing instead of prometheus (removed mc-metrics, added mc-analytics) - fix(version constants): 0.13.2 was mapped to wrong constants diff --git a/Cargo.lock b/Cargo.lock index bc3c1bfa9..0604a49a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5689,7 +5689,6 @@ dependencies = [ "assert_matches", "bitvec", "blockifier", - "env_logger 0.11.5", "lazy_static", "log", "mc-analytics", @@ -5725,6 +5724,7 @@ dependencies = [ "tracing-core", "tracing-opentelemetry", "tracing-subscriber", + "tracing-test", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d159192ec..8ca04a500 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -228,6 +228,7 @@ tracing-subscriber = { version = "0.3.18", features = [ "registry", "std", ] } +tracing-test = "0.2.5" tracing-opentelemetry = "0.26.0" [patch.crates-io] diff --git a/crates/client/block_production/src/lib.rs b/crates/client/block_production/src/lib.rs index 7e7b42cf6..8a8fbd658 100644 --- a/crates/client/block_production/src/lib.rs +++ b/crates/client/block_production/src/lib.rs @@ -223,10 +223,6 @@ impl BlockProductionTask { } } - // Add back the unexecuted transactions to the mempool. - stats.n_re_added_to_mempool = txs_to_process.len(); - self.mempool.re_add_txs(txs_to_process); - let on_top_of = self .executor .block_state @@ -243,6 +239,10 @@ impl BlockProductionTask { &on_top_of, )?; + // Add back the unexecuted transactions to the mempool. + stats.n_re_added_to_mempool = txs_to_process.len(); + self.mempool.re_add_txs(txs_to_process, executed_txs); + tracing::debug!( "Finished tick with {} new transactions, now at {} - re-adding {} txs to mempool", stats.n_added_to_block, diff --git a/crates/client/devnet/Cargo.toml b/crates/client/devnet/Cargo.toml index c7c724e10..30f1f3fdf 100644 --- a/crates/client/devnet/Cargo.toml +++ b/crates/client/devnet/Cargo.toml @@ -20,7 +20,7 @@ rstest = { workspace = true } mc-db = { workspace = true, features = ["testing"] } mc-mempool = { workspace = true, features = ["testing"] } mc-block-production = { workspace = true, features = ["testing"] } -tokio = { workspace = true, features = ["rt-multi-thread"] } +tokio = { workspace = true, features = ["rt-multi-thread", "test-util"] } proptest.workspace = true proptest-derive.workspace = true env_logger.workspace = true diff --git a/crates/client/devnet/src/lib.rs b/crates/client/devnet/src/lib.rs index 5490b9978..737add303 100644 --- a/crates/client/devnet/src/lib.rs +++ b/crates/client/devnet/src/lib.rs @@ -190,8 +190,8 @@ mod tests { use mc_block_production::metrics::BlockProductionMetrics; use mc_block_production::BlockProductionTask; use mc_db::MadaraBackend; - use mc_mempool::MempoolProvider; use mc_mempool::{transaction_hash, L1DataProvider, Mempool, MockL1DataProvider}; + use mc_mempool::{MempoolLimits, MempoolProvider}; use mp_block::header::L1DataAvailabilityMode; use mp_block::{BlockId, BlockTag}; @@ -209,6 +209,7 @@ mod tests { FlattenedSierraClass, InvokeTransactionResult, ResourceBounds, ResourceBoundsMapping, }; use std::sync::Arc; + use std::time::Duration; struct DevnetForTesting { backend: Arc, @@ -222,7 +223,7 @@ mod tests { &self, mut tx: BroadcastedInvokeTransaction, contract: &DevnetPredeployedContract, - ) -> InvokeTransactionResult { + ) -> Result { let (blockifier_tx, _classes) = broadcasted_to_blockifier( BroadcastedTransaction::Invoke(tx.clone()), self.backend.chain_config().chain_id.to_felt(), @@ -239,14 +240,14 @@ mod tests { tracing::debug!("tx: {:?}", tx); - self.mempool.accept_invoke_tx(tx).unwrap() + self.mempool.accept_invoke_tx(tx) } pub fn sign_and_add_declare_tx( &self, mut tx: BroadcastedDeclareTransaction, contract: &DevnetPredeployedContract, - ) -> DeclareTransactionResult { + ) -> Result { let (blockifier_tx, _classes) = broadcasted_to_blockifier( BroadcastedTransaction::Declare(tx.clone()), self.backend.chain_config().chain_id.to_felt(), @@ -262,14 +263,14 @@ mod tests { }; *tx_signature = vec![signature.r, signature.s]; - self.mempool.accept_declare_tx(tx).unwrap() + self.mempool.accept_declare_tx(tx) } pub fn sign_and_add_deploy_account_tx( &self, mut tx: BroadcastedDeployAccountTransaction, contract: &DevnetPredeployedContract, - ) -> DeployAccountTransactionResult { + ) -> Result { let (blockifier_tx, _classes) = broadcasted_to_blockifier( BroadcastedTransaction::DeployAccount(tx.clone()), self.backend.chain_config().chain_id.to_felt(), @@ -284,7 +285,7 @@ mod tests { }; *tx_signature = vec![signature.r, signature.s]; - self.mempool.accept_deploy_account_tx(tx).unwrap() + self.mempool.accept_deploy_account_tx(tx) } /// (STRK in FRI, ETH in WEI) @@ -295,6 +296,10 @@ mod tests { #[fixture] fn chain() -> DevnetForTesting { + chain_with_mempool_limits(MempoolLimits::for_testing()) + } + + fn chain_with_mempool_limits(mempool_limits: MempoolLimits) -> DevnetForTesting { let _ = env_logger::builder().is_test(true).try_init(); let mut g = ChainGenesisDescription::base_config().unwrap(); @@ -327,7 +332,7 @@ mod tests { strk_l1_data_gas_price: 128, }); let l1_data_provider = Arc::new(l1_data_provider) as Arc; - let mempool = Arc::new(Mempool::new(Arc::clone(&backend), Arc::clone(&l1_data_provider))); + let mempool = Arc::new(Mempool::new(Arc::clone(&backend), Arc::clone(&l1_data_provider), mempool_limits)); let metrics = BlockProductionMetrics::register(); let block_production = BlockProductionTask::new( @@ -375,7 +380,7 @@ mod tests { is_query: false, }); - let res = chain.sign_and_add_declare_tx(declare_txn, sender_address); + let res = chain.sign_and_add_declare_tx(declare_txn, sender_address).unwrap(); let calculated_class_hash = sierra_class.class_hash().unwrap(); @@ -421,32 +426,34 @@ mod tests { // Transferring the funds from pre deployed account into the calculated address let contract_0 = &chain.contracts.0[0]; - let transfer_txn = chain.sign_and_add_invoke_tx( - BroadcastedInvokeTransaction::V3(BroadcastedInvokeTransactionV3 { - sender_address: contract_0.address, - calldata: Multicall::default() - .with(Call { - to: ERC20_STRK_CONTRACT_ADDRESS, - selector: Selector::from("transfer"), - calldata: vec![calculated_address, (9_999u128 * STRK_FRI_DECIMALS).into(), Felt::ZERO], - }) - .flatten() - .collect(), - signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. - nonce: Felt::ZERO, - resource_bounds: ResourceBoundsMapping { - l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - }, - tip: 0, - paymaster_data: vec![], - account_deployment_data: vec![], - nonce_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, - fee_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, - is_query: false, - }), - contract_0, - ); + let transfer_txn = chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTransaction::V3(BroadcastedInvokeTransactionV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![calculated_address, (9_999u128 * STRK_FRI_DECIMALS).into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: Felt::ZERO, + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + fee_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + is_query: false, + }), + contract_0, + ) + .unwrap(); tracing::debug!("tx hash: {:#x}", transfer_txn.transaction_hash); chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); @@ -480,7 +487,7 @@ mod tests { is_query: false, }); - let res = chain.sign_and_add_deploy_account_tx(deploy_account_txn, &account); + let res = chain.sign_and_add_deploy_account_tx(deploy_account_txn, &account).unwrap(); chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); chain.block_production.on_pending_time_tick().unwrap(); @@ -513,32 +520,34 @@ mod tests { assert_eq!(chain.get_bal_strk_eth(contract_0.address), (10_000 * STRK_FRI_DECIMALS, 10_000 * ETH_WEI_DECIMALS)); assert_eq!(chain.get_bal_strk_eth(contract_1.address), (10_000 * STRK_FRI_DECIMALS, 10_000 * ETH_WEI_DECIMALS)); - let result = chain.sign_and_add_invoke_tx( - BroadcastedInvokeTransaction::V3(BroadcastedInvokeTransactionV3 { - sender_address: contract_0.address, - calldata: Multicall::default() - .with(Call { - to: ERC20_STRK_CONTRACT_ADDRESS, - selector: Selector::from("transfer"), - calldata: vec![contract_1.address, transfer_amount.into(), Felt::ZERO], - }) - .flatten() - .collect(), - signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. - nonce: Felt::ZERO, - resource_bounds: ResourceBoundsMapping { - l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, - }, - tip: 0, - paymaster_data: vec![], - account_deployment_data: vec![], - nonce_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, - fee_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, - is_query: false, - }), - contract_0, - ); + let result = chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTransaction::V3(BroadcastedInvokeTransactionV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, transfer_amount.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: Felt::ZERO, + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + fee_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + is_query: false, + }), + contract_0, + ) + .unwrap(); tracing::info!("tx hash: {:#x}", result.transaction_hash); @@ -605,4 +614,133 @@ mod tests { } } } + + #[rstest] + fn test_mempool_tx_limit() { + let chain = chain_with_mempool_limits(MempoolLimits { + max_age: Duration::from_millis(1000000), + max_declare_transactions: 2, + max_transactions: 5, + }); + println!("{}", chain.contracts); + + let contract_0 = &chain.contracts.0[0]; + let contract_1 = &chain.contracts.0[1]; + + for nonce in 0..5 { + chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTransaction::V3(BroadcastedInvokeTransactionV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, 15.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: nonce.into(), + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + fee_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + is_query: false, + }), + contract_0, + ) + .unwrap(); + } + + let result = chain.sign_and_add_invoke_tx( + BroadcastedInvokeTransaction::V3(BroadcastedInvokeTransactionV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, 15.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: 5.into(), + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + fee_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + is_query: false, + }), + contract_0, + ); + + assert_matches!( + result, + Err(mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::Limit( + mc_mempool::MempoolLimitReached::MaxTransactions { max: 5 } + ))) + ) + } + + #[rstest] + fn test_mempool_age_limit() { + let max_age = Duration::from_millis(1000); + let mut chain = + chain_with_mempool_limits(MempoolLimits { max_age, max_declare_transactions: 2, max_transactions: 5 }); + println!("{}", chain.contracts); + + let contract_0 = &chain.contracts.0[0]; + let contract_1 = &chain.contracts.0[1]; + + chain + .sign_and_add_invoke_tx( + BroadcastedInvokeTransaction::V3(BroadcastedInvokeTransactionV3 { + sender_address: contract_0.address, + calldata: Multicall::default() + .with(Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: Selector::from("transfer"), + calldata: vec![contract_1.address, 15.into(), Felt::ZERO], + }) + .flatten() + .collect(), + signature: vec![], // Signature is filled in by `sign_and_add_invoke_tx`. + nonce: 0.into(), + resource_bounds: ResourceBoundsMapping { + l1_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + l2_gas: ResourceBounds { max_amount: 60000, max_price_per_unit: 10000 }, + }, + tip: 0, + paymaster_data: vec![], + account_deployment_data: vec![], + nonce_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + fee_data_availability_mode: starknet_core::types::DataAvailabilityMode::L1, + is_query: false, + }), + contract_0, + ) + .unwrap(); + + std::thread::sleep(max_age); // max age reached + chain.block_production.set_current_pending_tick(1); + chain.block_production.on_pending_time_tick().unwrap(); + + let block = chain.backend.get_block(&BlockId::Tag(BlockTag::Pending)).unwrap().unwrap(); + + // no transactions :) + assert_eq!(block.inner.transactions, vec![]); + assert_eq!(block.inner.receipts, vec![]); + assert!(chain.mempool.is_empty()); + } } diff --git a/crates/client/eth/src/l1_messaging.rs b/crates/client/eth/src/l1_messaging.rs index 8198fba68..16d14ff56 100644 --- a/crates/client/eth/src/l1_messaging.rs +++ b/crates/client/eth/src/l1_messaging.rs @@ -240,7 +240,7 @@ mod l1_messaging_tests { transports::http::{Client, Http}, }; use mc_db::DatabaseService; - use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool}; + use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; use mp_chain_config::ChainConfig; use rstest::*; use starknet_api::core::Nonce; @@ -362,7 +362,11 @@ mod l1_messaging_tests { let l1_gas_setter = GasPriceProvider::new(); let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); - let mempool = Arc::new(Mempool::new(Arc::clone(db.backend()), Arc::clone(&l1_data_provider))); + let mempool = Arc::new(Mempool::new( + Arc::clone(db.backend()), + Arc::clone(&l1_data_provider), + MempoolLimits::for_testing(), + )); // Set up metrics service let l1_block_metrics = L1BlockMetrics::register().unwrap(); diff --git a/crates/client/mempool/Cargo.toml b/crates/client/mempool/Cargo.toml index 6702c097b..84fd11fbe 100644 --- a/crates/client/mempool/Cargo.toml +++ b/crates/client/mempool/Cargo.toml @@ -22,7 +22,8 @@ tokio = { workspace = true, features = ["rt-multi-thread"] } proptest.workspace = true proptest-derive.workspace = true bitvec.workspace = true -env_logger.workspace = true +tracing = { workspace = true, features = ["log"] } +tracing-test.workspace = true blockifier = { workspace = true, features = ["testing"] } mockall.workspace = true assert_matches.workspace = true diff --git a/crates/client/mempool/src/inner/limits.rs b/crates/client/mempool/src/inner/limits.rs new file mode 100644 index 000000000..845439839 --- /dev/null +++ b/crates/client/mempool/src/inner/limits.rs @@ -0,0 +1,148 @@ +use std::time::{Duration, SystemTime}; + +use blockifier::transaction::transaction_types::TransactionType; +use mc_exec::execution::TxInfo; +use mp_chain_config::ChainConfig; + +use crate::MempoolTransaction; + +#[derive(Debug)] +pub struct MempoolLimits { + pub max_transactions: usize, + pub max_declare_transactions: usize, + pub max_age: Duration, +} + +impl MempoolLimits { + pub fn new(chain_config: &ChainConfig) -> Self { + Self { + max_transactions: chain_config.mempool_tx_limit, + max_declare_transactions: chain_config.mempool_declare_tx_limit, + max_age: chain_config.mempool_tx_max_age, + } + } + #[cfg(any(test, feature = "testing"))] + pub fn for_testing() -> Self { + Self { + max_age: Duration::from_secs(10000000), + max_declare_transactions: usize::MAX, + max_transactions: usize::MAX, + } + } +} + +/// Note: when a transaction is poped from the mempool by block prod, the limits will not be updated until the full +/// tick has been executed and excess transactions are added back into the mempool. +/// This means that the inner mempool may have fewer transactions than what the limits says at a given time. +#[derive(Debug)] +pub(crate) struct MempoolLimiter { + pub config: MempoolLimits, + current_transactions: usize, + current_declare_transactions: usize, +} + +#[derive(thiserror::Error, Debug, PartialEq, Eq)] +pub enum MempoolLimitReached { + #[error("The mempool has reached the limit of {max} transactions")] + MaxTransactions { max: usize }, + #[error("The mempool has reached the limit of {max} declare transactions")] + MaxDeclareTransactions { max: usize }, + #[error("The transaction age is greater than the limit of {max:?}")] + Age { max: Duration }, +} + +pub(crate) struct TransactionCheckedLimits { + check_tx_limit: bool, + check_declare_limit: bool, + check_age: bool, + tx_arrived_at: SystemTime, +} + +impl TransactionCheckedLimits { + // Returns which limits apply for this transaction. + // This struct is also used to update the limits after insretion, without having to keep a clone of the transaction around. + // We can add more limits here as needed :) + pub fn limits_for(tx: &MempoolTransaction) -> Self { + match tx.tx.tx_type() { + TransactionType::Declare => TransactionCheckedLimits { + check_tx_limit: true, + check_declare_limit: true, + check_age: true, + tx_arrived_at: tx.arrived_at, + }, + TransactionType::DeployAccount => TransactionCheckedLimits { + check_tx_limit: true, + check_declare_limit: false, + check_age: true, + tx_arrived_at: tx.arrived_at, + }, + TransactionType::InvokeFunction => TransactionCheckedLimits { + check_tx_limit: true, + check_declare_limit: false, + check_age: true, + tx_arrived_at: tx.arrived_at, + }, + TransactionType::L1Handler => TransactionCheckedLimits { + check_tx_limit: true, + check_declare_limit: false, + check_age: false, + tx_arrived_at: tx.arrived_at, + }, + } + } +} + +impl MempoolLimiter { + pub fn new(limits: MempoolLimits) -> Self { + Self { config: limits, current_transactions: 0, current_declare_transactions: 0 } + } + + pub fn check_insert_limits(&self, to_check: &TransactionCheckedLimits) -> Result<(), MempoolLimitReached> { + // tx limit + if to_check.check_tx_limit && self.current_transactions >= self.config.max_transactions { + return Err(MempoolLimitReached::MaxTransactions { max: self.config.max_transactions }); + } + + // declare tx limit + if to_check.check_declare_limit && self.current_declare_transactions >= self.config.max_declare_transactions { + return Err(MempoolLimitReached::MaxDeclareTransactions { max: self.config.max_declare_transactions }); + } + + // age + if self.tx_age_exceeded(to_check) { + return Err(MempoolLimitReached::Age { max: self.config.max_age }); + } + + Ok(()) + } + + pub fn tx_age_exceeded(&self, to_check: &TransactionCheckedLimits) -> bool { + if to_check.check_age { + let current_time = SystemTime::now(); + if to_check.tx_arrived_at < current_time.checked_sub(self.config.max_age).unwrap_or(SystemTime::UNIX_EPOCH) + { + return true; + } + } + false + } + + pub fn update_tx_limits(&mut self, limits: &TransactionCheckedLimits) { + if limits.check_tx_limit { + self.current_transactions += 1; + } + if limits.check_declare_limit { + self.current_declare_transactions += 1; + } + } + + pub fn mark_removed(&mut self, to_update: &TransactionCheckedLimits) { + // These should not overflow unless block prod marks transactions as consumed even though they have not been popped. + if to_update.check_tx_limit { + self.current_transactions -= 1; + } + if to_update.check_declare_limit { + self.current_declare_transactions -= 1; + } + } +} diff --git a/crates/client/mempool/src/inner/mod.rs b/crates/client/mempool/src/inner/mod.rs index a081a6531..b1599a945 100644 --- a/crates/client/mempool/src/inner/mod.rs +++ b/crates/client/mempool/src/inner/mod.rs @@ -14,8 +14,12 @@ use std::{ }; mod deployed_contracts; +mod limits; mod nonce_chain; +mod proptest; mod tx; + +pub use limits::*; pub use tx::*; #[derive(Clone, Debug)] @@ -43,17 +47,18 @@ impl PartialOrd for AccountOrderedByTimestamp { } } -#[derive(Default, Debug)] +#[derive(Debug)] /// Invariants: /// - Every nonce chain in `nonce_chains` should have a one to one match with `tx_queue`. /// - Every [`AccountTransaction::DeployAccount`] transaction should have a one to one match with `deployed_contracts`. /// - See [`NonceChain`] invariants. -pub struct MempoolInner { +pub(crate) struct MempoolInner { /// We have one nonce chain per contract address. nonce_chains: HashMap, /// FCFS queue. tx_queue: BTreeSet, deployed_contracts: DeployedContracts, + limiter: MempoolLimiter, } #[derive(thiserror::Error, Debug, PartialEq, Eq)] @@ -62,9 +67,20 @@ pub enum TxInsersionError { NonceConflict, #[error("A transaction with this hash already exists in the transaction pool")] DuplicateTxn, + #[error(transparent)] + Limit(#[from] MempoolLimitReached), } impl MempoolInner { + pub fn new(limits_config: MempoolLimits) -> Self { + Self { + nonce_chains: Default::default(), + tx_queue: Default::default(), + deployed_contracts: Default::default(), + limiter: MempoolLimiter::new(limits_config), + } + } + #[cfg(test)] pub fn check_invariants(&self) { self.nonce_chains.values().for_each(NonceChain::check_invariants); @@ -84,6 +100,16 @@ impl MempoolInner { /// When `force` is `true`, this function should never return any error. pub fn insert_tx(&mut self, mempool_tx: MempoolTransaction, force: bool) -> Result<(), TxInsersionError> { + // delete age-exceeded txs from the mempool + // todo(perf): this may want to limit this check once every few seconds to avoid it being in the hot path? + self.remove_age_exceeded_txs(); + + // check limits + let limits_for_tx = TransactionCheckedLimits::limits_for(&mempool_tx); + if !force { + self.limiter.check_insert_limits(&limits_for_tx)?; + } + let contract_addr = mempool_tx.contract_address(); let arrived_at = mempool_tx.arrived_at; let deployed_contract_address = @@ -99,9 +125,7 @@ impl MempoolInner { let (position, is_replaced) = match entry.get_mut().insert(mempool_tx, force) { Ok(position) => position, Err(nonce_collision_or_duplicate_hash) => { - if force { - panic!("Force add should never error") - } + debug_assert!(!force); // "Force add should never error return Err(nonce_collision_or_duplicate_hash); } }; @@ -136,12 +160,16 @@ impl MempoolInner { } }; - if is_replaced != ReplacedState::Replaced { - if let Some(contract_address) = &deployed_contract_address { - self.deployed_contracts.increment(*contract_address) - } + if let ReplacedState::Replaced { previous } = is_replaced { + // Mark the previous transaction as deleted + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&previous)); + } else if let Some(contract_address) = &deployed_contract_address { + self.deployed_contracts.increment(*contract_address) } + // Update transaction limits + self.limiter.update_tx_limits(&limits_for_tx); + Ok(()) } @@ -149,10 +177,7 @@ impl MempoolInner { self.deployed_contracts.contains(addr) } - pub fn pop_next(&mut self) -> Option { - // Pop tx queue. - let tx_queue_account = self.tx_queue.pop_first()?; // Bubble up None if the mempool is empty. - + fn pop_tx_queue_account(&mut self, tx_queue_account: &AccountOrderedByTimestamp) -> MempoolTransaction { // Update nonce chain. let nonce_chain = self.nonce_chains.get_mut(&tx_queue_account.contract_addr).expect("Nonce chain does not match tx queue"); @@ -178,288 +203,65 @@ impl MempoolInner { self.deployed_contracts.decrement(tx.contract_address); } - Some(mempool_tx) + mempool_tx } - pub fn pop_next_chunk(&mut self, dest: &mut impl Extend, n: usize) { - dest.extend((0..n).map_while(|_| self.pop_next())) - } - - pub fn re_add_txs(&mut self, txs: impl IntoIterator) { - for tx in txs { - let force = true; - self.insert_tx(tx, force).expect("Force insert tx should not error"); + pub fn remove_age_exceeded_txs(&mut self) { + // Pop tx queue. + // too bad there's no first_entry api, we should check if hashbrown has it to avoid the double lookup. + while let Some(tx_queue_account) = self.tx_queue.first() { + let tx_queue_account = tx_queue_account.clone(); // clone is cheap for this struct + let mempool_tx = self.pop_tx_queue_account(&tx_queue_account); + + if self.limiter.tx_age_exceeded(&TransactionCheckedLimits::limits_for(&mempool_tx)) { + let _res = self.tx_queue.pop_first().expect("cannot be empty, checked just above"); + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&mempool_tx)); + } else { + break; + } } } -} -#[cfg(test)] -mod tests { - use super::*; - use blockifier::{ - execution::contract_class::ClassInfo, - test_utils::{contracts::FeatureContract, CairoVersion}, - transaction::{transaction_execution::Transaction, transaction_types::TransactionType}, - }; - use mc_exec::execution::TxInfo; - use mp_convert::ToFelt; - use proptest::prelude::*; - use proptest_derive::Arbitrary; - use starknet_api::{ - core::{calculate_contract_address, ChainId, Nonce}, - data_availability::DataAvailabilityMode, - transaction::{ - ContractAddressSalt, DeclareTransactionV3, DeployAccountTransactionV3, InvokeTransactionV3, Resource, - ResourceBounds, ResourceBoundsMapping, TransactionHash, TransactionHasher, TransactionVersion, - }, - }; - use starknet_types_core::felt::Felt; - - use blockifier::abi::abi_utils::selector_from_name; - use starknet_api::transaction::Fee; - use std::{ - collections::HashSet, - fmt, - time::{Duration, SystemTime}, - }; - - lazy_static::lazy_static! { - static ref DUMMY_CLASS: ClassInfo = { - let dummy_contract_class = FeatureContract::TestContract(CairoVersion::Cairo1); - ClassInfo::new(&dummy_contract_class.get_class(), 100, 100).unwrap() - }; - } + pub fn pop_next(&mut self) -> Option { + // Pop tx queue. + let mempool_tx = loop { + let tx_queue_account = self.tx_queue.pop_first()?; // Bubble up None if the mempool is empty. + let mempool_tx = self.pop_tx_queue_account(&tx_queue_account); - struct Insert(MempoolTransaction, /* force */ bool); - impl fmt::Debug for Insert { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Insert(ty={:?},arrived_at={:?},tx_hash={:?},contract_address={:?},nonce={:?},force={:?})", - self.0.tx.tx_type(), - self.0.arrived_at, - self.0.tx_hash(), - self.0.contract_address(), - self.0.nonce(), - self.1, - ) - } - } - impl Arbitrary for Insert { - type Parameters = (); - type Strategy = BoxedStrategy; - - fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - #[derive(Debug, Arbitrary)] - enum TxTy { - Declare, - DeployAccount, - InvokeFunction, - L1Handler, + let limits = TransactionCheckedLimits::limits_for(&mempool_tx); + if !self.limiter.tx_age_exceeded(&limits) { + break mempool_tx; } - <(TxTy, u8, u8, u8, bool)>::arbitrary() - .prop_map(|(ty, arrived_at, contract_address, nonce, force)| { - let arrived_at = SystemTime::UNIX_EPOCH + Duration::from_millis(arrived_at.into()); - let contract_addr = ContractAddress::try_from(Felt::from(contract_address)).unwrap(); - let nonce = Nonce(Felt::from(nonce)); - - let resource_bounds = ResourceBoundsMapping( - [ - (Resource::L1Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), - (Resource::L2Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), - ] - .into(), - ); - - let tx = match ty { - TxTy::Declare => starknet_api::transaction::Transaction::Declare( - starknet_api::transaction::DeclareTransaction::V3(DeclareTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - class_hash: Default::default(), - compiled_class_hash: Default::default(), - sender_address: contract_addr, - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - account_deployment_data: Default::default(), - }), - ), - TxTy::DeployAccount => starknet_api::transaction::Transaction::DeployAccount( - starknet_api::transaction::DeployAccountTransaction::V3(DeployAccountTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - class_hash: Default::default(), - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - contract_address_salt: ContractAddressSalt(contract_addr.to_felt()), - constructor_calldata: Default::default(), - }), - ), - TxTy::InvokeFunction => starknet_api::transaction::Transaction::Invoke( - starknet_api::transaction::InvokeTransaction::V3(InvokeTransactionV3 { - resource_bounds, - tip: Default::default(), - signature: Default::default(), - nonce, - sender_address: contract_addr, - calldata: Default::default(), - nonce_data_availability_mode: DataAvailabilityMode::L1, - fee_data_availability_mode: DataAvailabilityMode::L1, - paymaster_data: Default::default(), - account_deployment_data: Default::default(), - }), - ), - // TODO: maybe update the values? - TxTy::L1Handler => starknet_api::transaction::Transaction::L1Handler( - starknet_api::transaction::L1HandlerTransaction { - version: TransactionVersion::ZERO, - nonce, - contract_address: contract_addr, - entry_point_selector: selector_from_name("l1_handler_set_value"), - calldata: Default::default(), - }, - ), - }; - - let deployed = if let starknet_api::transaction::Transaction::DeployAccount(tx) = &tx { - Some( - calculate_contract_address( - tx.contract_address_salt(), - Default::default(), - &Default::default(), - Default::default(), - ) - .unwrap(), - ) - } else { - None - }; - - // providing dummy l1 gas for now - let l1_gas_paid = match &tx { - starknet_api::transaction::Transaction::L1Handler(_) => Some(Fee(1)), - _ => None, - }; - - let tx_hash = tx.calculate_transaction_hash(&ChainId::Mainnet, &TransactionVersion::THREE).unwrap(); - - let tx = - Transaction::from_api(tx, tx_hash, Some(DUMMY_CLASS.clone()), l1_gas_paid, deployed, false) - .unwrap(); - - Insert(MempoolTransaction { tx, arrived_at, converted_class: None }, force) - }) - .boxed() - } - } + self.limiter.mark_removed(&limits); + }; - #[derive(Debug, Arbitrary)] - enum Operation { - Insert(Insert), - Pop, + // do not update mempool limits, block prod will update it with re-add txs. + Some(mempool_tx) } - #[derive(Debug, Arbitrary)] - struct MempoolInvariantsProblem(Vec); - impl MempoolInvariantsProblem { - fn check(&self) { - tracing::debug!("\n\n\n\n\nCase: {:#?}", self); - let mut mempool = MempoolInner::default(); - mempool.check_invariants(); - - let mut inserted = HashSet::new(); - let mut inserted_contract_nonce_pairs = HashSet::new(); - let mut new_contracts = HashSet::new(); - - let handle_pop = |res: Option, - inserted: &mut HashSet, - inserted_contract_nonce_pairs: &mut HashSet<(Nonce, ContractAddress)>, - new_contracts: &mut HashSet| { - if let Some(res) = &res { - let removed = inserted.remove(&res.tx_hash()); - assert!(removed); - let removed = inserted_contract_nonce_pairs.remove(&(res.nonce(), res.contract_address())); - assert!(removed); - - if res.tx.tx_type() == TransactionType::DeployAccount { - let _removed = new_contracts.remove(&res.contract_address()); - // there can be multiple deploy_account txs. - // assert!(removed) - } - } else { - assert!(inserted.is_empty()) - } - tracing::trace!("Popped {:?}", res.map(|el| Insert(el, false))); - }; - - for op in &self.0 { - match op { - Operation::Insert(insert) => { - let force = insert.1; - tracing::trace!("Insert {:?}", insert); - let res = mempool.insert_tx(insert.0.clone(), insert.1); - - let expected = if !force - && inserted_contract_nonce_pairs.contains(&(insert.0.nonce(), insert.0.contract_address())) - { - if inserted.contains(&insert.0.tx_hash()) { - Err(TxInsersionError::DuplicateTxn) - } else { - Err(TxInsersionError::NonceConflict) - } - } else { - Ok(()) - }; - - assert_eq!(expected, res); - - if expected.is_ok() { - if insert.0.tx.tx_type() == TransactionType::DeployAccount { - new_contracts.insert(insert.0.contract_address()); - } - inserted.insert(insert.0.tx_hash()); - inserted_contract_nonce_pairs.insert((insert.0.nonce(), insert.0.contract_address())); - } - - tracing::trace!("Result {:?}", res); - } - Operation::Pop => { - tracing::trace!("Pop"); - let res = mempool.pop_next(); - handle_pop(res, &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); - } - } - tracing::trace!("State: {mempool:#?}"); - mempool.check_invariants(); - } + pub fn pop_next_chunk(&mut self, dest: &mut impl Extend, n: usize) { + dest.extend((0..n).map_while(|_| self.pop_next())) + } - loop { - tracing::trace!("Pop"); - let Some(res) = mempool.pop_next() else { break }; - handle_pop(Some(res), &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); - mempool.check_invariants(); - } - assert!(inserted.is_empty()); - assert!(inserted_contract_nonce_pairs.is_empty()); - assert!(new_contracts.is_empty()); - tracing::trace!("Done :)"); + /// This is called by the block production after a batch of transaction is executed. + /// Mark the consumed txs as consumed, and re-add the transactions that are not consumed in the mempool. + pub fn re_add_txs( + &mut self, + txs: impl IntoIterator, + consumed_txs: impl IntoIterator, + ) { + for tx in consumed_txs { + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&tx)) + } + for tx in txs { + let force = true; + self.insert_tx(tx, force).expect("Force insert tx should not error"); } } - proptest::proptest! { - #![proptest_config(ProptestConfig::with_cases(5))] // comment this when developing, this is mostly for faster ci & whole workspace `cargo test` - #[test] - fn proptest_mempool(pb in any::()) { - let _ = env_logger::builder().is_test(true).try_init(); - tracing::log::set_max_level(tracing::log::LevelFilter::Trace); - pb.check(); - } + #[cfg(any(test, feature = "testing"))] + pub fn is_empty(&self) -> bool { + self.tx_queue.is_empty() } } diff --git a/crates/client/mempool/src/inner/nonce_chain.rs b/crates/client/mempool/src/inner/nonce_chain.rs index a560770d8..72bb77222 100644 --- a/crates/client/mempool/src/inner/nonce_chain.rs +++ b/crates/client/mempool/src/inner/nonce_chain.rs @@ -4,7 +4,7 @@ use starknet_api::{core::Nonce, transaction::TransactionHash}; use std::collections::{btree_map, BTreeMap}; use std::{cmp, iter}; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct OrderMempoolTransactionByNonce(pub MempoolTransaction); impl PartialEq for OrderMempoolTransactionByNonce { @@ -42,9 +42,9 @@ pub enum InsertedPosition { Other, } -#[derive(Eq, PartialEq, Debug)] +#[derive(Debug)] pub enum ReplacedState { - Replaced, + Replaced { previous: MempoolTransaction }, NotReplaced, } @@ -85,9 +85,17 @@ impl NonceChain { let mempool_tx_hash = mempool_tx.tx_hash(); let replaced = if force { - if self.transactions.insert(OrderMempoolTransactionByNonce(mempool_tx), ()).is_some() { - ReplacedState::Replaced + // double lookup here unfortunately.. that's because we're using the keys in a hacky way and can't update the + // entry key using the entry api. + let mempool_tx = OrderMempoolTransactionByNonce(mempool_tx); + if let Some((previous, _)) = self.transactions.get_key_value(&mempool_tx) { + let previous = previous.0.clone(); + let inserted = self.transactions.insert(mempool_tx, ()); + debug_assert!(inserted.is_some()); + ReplacedState::Replaced { previous } } else { + let inserted = self.transactions.insert(mempool_tx, ()); + debug_assert!(inserted.is_none()); ReplacedState::NotReplaced } } else { @@ -116,8 +124,7 @@ impl NonceChain { InsertedPosition::Other }; - #[cfg(debug_assertions)] // unknown field `front_tx_hash` in release if debug_assert_eq is used - assert_eq!( + debug_assert_eq!( self.transactions.first_key_value().expect("Getting the first tx").0 .0.tx_hash(), self.front_tx_hash ); @@ -130,10 +137,7 @@ impl NonceChain { let (tx, _) = self.transactions.pop_first().expect("Nonce chain should not be empty"); if let Some((new_front, _)) = self.transactions.first_key_value() { self.front_arrived_at = new_front.0.arrived_at; - #[cfg(debug_assertions)] - { - self.front_tx_hash = new_front.0.tx_hash(); - } + self.front_tx_hash = new_front.0.tx_hash(); self.front_nonce = new_front.0.nonce(); (tx.0, NonceChainNewState::NotEmpty) } else { diff --git a/crates/client/mempool/src/inner/proptest.rs b/crates/client/mempool/src/inner/proptest.rs new file mode 100644 index 000000000..501f46e08 --- /dev/null +++ b/crates/client/mempool/src/inner/proptest.rs @@ -0,0 +1,269 @@ +#![cfg(test)] + +use super::*; +use ::proptest::prelude::*; +use blockifier::{ + execution::contract_class::ClassInfo, + test_utils::{contracts::FeatureContract, CairoVersion}, + transaction::{transaction_execution::Transaction, transaction_types::TransactionType}, +}; +use mc_exec::execution::TxInfo; +use mp_convert::ToFelt; +use proptest_derive::Arbitrary; +use starknet_api::{ + core::{calculate_contract_address, ChainId, Nonce}, + data_availability::DataAvailabilityMode, + transaction::{ + ContractAddressSalt, DeclareTransactionV3, DeployAccountTransactionV3, InvokeTransactionV3, Resource, + ResourceBounds, ResourceBoundsMapping, TransactionHash, TransactionHasher, TransactionVersion, + }, +}; +use starknet_types_core::felt::Felt; + +use blockifier::abi::abi_utils::selector_from_name; +use starknet_api::transaction::Fee; +use std::{ + collections::HashSet, + fmt, + time::{Duration, SystemTime}, +}; + +lazy_static::lazy_static! { + static ref DUMMY_CLASS: ClassInfo = { + let dummy_contract_class = FeatureContract::TestContract(CairoVersion::Cairo1); + ClassInfo::new(&dummy_contract_class.get_class(), 100, 100).unwrap() + }; + + static ref NOW: SystemTime = SystemTime::now(); +} + +struct Insert(MempoolTransaction, /* force */ bool); +impl fmt::Debug for Insert { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Insert(ty={:?},arrived_at={:?},tx_hash={:?},contract_address={:?},nonce={:?},force={:?})", + self.0.tx.tx_type(), + self.0.arrived_at, + self.0.tx_hash(), + self.0.contract_address(), + self.0.nonce(), + self.1, + ) + } +} +impl Arbitrary for Insert { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { + #[derive(Debug, Arbitrary)] + enum TxTy { + Declare, + DeployAccount, + InvokeFunction, + L1Handler, + } + + <(TxTy, u8, u8, u8, bool)>::arbitrary() + .prop_map(|(ty, arrived_at, contract_address, nonce, force)| { + let arrived_at = *NOW + Duration::from_millis(arrived_at.into()); + let contract_addr = ContractAddress::try_from(Felt::from(contract_address)).unwrap(); + let nonce = Nonce(Felt::from(nonce)); + + let resource_bounds = ResourceBoundsMapping( + [ + (Resource::L1Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), + (Resource::L2Gas, ResourceBounds { max_amount: 5, max_price_per_unit: 5 }), + ] + .into(), + ); + + let tx = match ty { + TxTy::Declare => starknet_api::transaction::Transaction::Declare( + starknet_api::transaction::DeclareTransaction::V3(DeclareTransactionV3 { + resource_bounds, + tip: Default::default(), + signature: Default::default(), + nonce, + class_hash: Default::default(), + compiled_class_hash: Default::default(), + sender_address: contract_addr, + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: Default::default(), + account_deployment_data: Default::default(), + }), + ), + TxTy::DeployAccount => starknet_api::transaction::Transaction::DeployAccount( + starknet_api::transaction::DeployAccountTransaction::V3(DeployAccountTransactionV3 { + resource_bounds, + tip: Default::default(), + signature: Default::default(), + nonce, + class_hash: Default::default(), + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: Default::default(), + contract_address_salt: ContractAddressSalt(contract_addr.to_felt()), + constructor_calldata: Default::default(), + }), + ), + TxTy::InvokeFunction => starknet_api::transaction::Transaction::Invoke( + starknet_api::transaction::InvokeTransaction::V3(InvokeTransactionV3 { + resource_bounds, + tip: Default::default(), + signature: Default::default(), + nonce, + sender_address: contract_addr, + calldata: Default::default(), + nonce_data_availability_mode: DataAvailabilityMode::L1, + fee_data_availability_mode: DataAvailabilityMode::L1, + paymaster_data: Default::default(), + account_deployment_data: Default::default(), + }), + ), + // TODO: maybe update the values? + TxTy::L1Handler => starknet_api::transaction::Transaction::L1Handler( + starknet_api::transaction::L1HandlerTransaction { + version: TransactionVersion::ZERO, + nonce, + contract_address: contract_addr, + entry_point_selector: selector_from_name("l1_handler_set_value"), + calldata: Default::default(), + }, + ), + }; + + let deployed = if let starknet_api::transaction::Transaction::DeployAccount(tx) = &tx { + Some( + calculate_contract_address( + tx.contract_address_salt(), + Default::default(), + &Default::default(), + Default::default(), + ) + .unwrap(), + ) + } else { + None + }; + + // providing dummy l1 gas for now + let l1_gas_paid = match &tx { + starknet_api::transaction::Transaction::L1Handler(_) => Some(Fee(1)), + _ => None, + }; + + let tx_hash = tx.calculate_transaction_hash(&ChainId::Mainnet, &TransactionVersion::THREE).unwrap(); + + let tx = Transaction::from_api(tx, tx_hash, Some(DUMMY_CLASS.clone()), l1_gas_paid, deployed, false) + .unwrap(); + + Insert(MempoolTransaction { tx, arrived_at, converted_class: None }, force) + }) + .boxed() + } +} + +#[derive(Debug, Arbitrary)] +enum Operation { + Insert(Insert), + Pop, +} + +#[derive(Debug, Arbitrary)] +struct MempoolInvariantsProblem(Vec); +impl MempoolInvariantsProblem { + fn check(&self) { + tracing::debug!("\n\n\n\n\nCase: {:#?}", self); + let mut mempool = MempoolInner::new(MempoolLimits::for_testing()); + mempool.check_invariants(); + + let mut inserted = HashSet::new(); + let mut inserted_contract_nonce_pairs = HashSet::new(); + let mut new_contracts = HashSet::new(); + + let handle_pop = |res: Option, + inserted: &mut HashSet, + inserted_contract_nonce_pairs: &mut HashSet<(Nonce, ContractAddress)>, + new_contracts: &mut HashSet| { + if let Some(res) = &res { + let removed = inserted.remove(&res.tx_hash()); + assert!(removed); + let removed = inserted_contract_nonce_pairs.remove(&(res.nonce(), res.contract_address())); + assert!(removed); + + if res.tx.tx_type() == TransactionType::DeployAccount { + let _removed = new_contracts.remove(&res.contract_address()); + // there can be multiple deploy_account txs. + // assert!(removed) + } + } else { + assert!(inserted.is_empty()) + } + tracing::trace!("Popped {:?}", res.map(|el| Insert(el, false))); + }; + + for op in &self.0 { + match op { + Operation::Insert(insert) => { + let force = insert.1; + tracing::trace!("Insert {:?}", insert); + let res = mempool.insert_tx(insert.0.clone(), insert.1); + + let expected = if !force + && inserted_contract_nonce_pairs.contains(&(insert.0.nonce(), insert.0.contract_address())) + { + if inserted.contains(&insert.0.tx_hash()) { + Err(TxInsersionError::DuplicateTxn) + } else { + Err(TxInsersionError::NonceConflict) + } + } else { + Ok(()) + }; + + assert_eq!(expected, res); + + if expected.is_ok() { + if insert.0.tx.tx_type() == TransactionType::DeployAccount { + new_contracts.insert(insert.0.contract_address()); + } + inserted.insert(insert.0.tx_hash()); + inserted_contract_nonce_pairs.insert((insert.0.nonce(), insert.0.contract_address())); + } + + tracing::trace!("Result {:?}", res); + } + Operation::Pop => { + tracing::trace!("Pop"); + let res = mempool.pop_next(); + handle_pop(res, &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); + } + } + tracing::trace!("State: {mempool:#?}"); + mempool.check_invariants(); + } + + loop { + tracing::trace!("Pop"); + let Some(res) = mempool.pop_next() else { break }; + handle_pop(Some(res), &mut inserted, &mut inserted_contract_nonce_pairs, &mut new_contracts); + tracing::trace!("State: {mempool:#?}"); + mempool.check_invariants(); + } + assert!(inserted.is_empty()); + assert!(inserted_contract_nonce_pairs.is_empty()); + assert!(new_contracts.is_empty()); + tracing::trace!("Done :)"); + } +} + +::proptest::proptest! { + #[tracing_test::traced_test] + #[test] + fn proptest_mempool(pb in any::()) { + pb.check(); + } +} diff --git a/crates/client/mempool/src/inner/tx.rs b/crates/client/mempool/src/inner/tx.rs index a214021dc..0106a67df 100644 --- a/crates/client/mempool/src/inner/tx.rs +++ b/crates/client/mempool/src/inner/tx.rs @@ -1,3 +1,4 @@ +use crate::{clone_transaction, contract_addr, nonce, tx_hash}; use blockifier::transaction::transaction_execution::Transaction; use mc_exec::execution::TxInfo; use mp_class::ConvertedClass; @@ -8,8 +9,6 @@ use starknet_api::{ }; use std::{fmt, time::SystemTime}; -use crate::{clone_transaction, contract_addr, nonce, tx_hash}; - pub type ArrivedAtTimestamp = SystemTime; pub struct MempoolTransaction { diff --git a/crates/client/mempool/src/lib.rs b/crates/client/mempool/src/lib.rs index 76c513544..c9330d716 100644 --- a/crates/client/mempool/src/lib.rs +++ b/crates/client/mempool/src/lib.rs @@ -44,6 +44,8 @@ mod inner; mod l1; pub mod metrics; +pub use inner::*; + #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Storage error: {0:#}")] @@ -77,8 +79,14 @@ pub trait MempoolProvider: Send + Sync { where Self: Sized; fn take_tx(&self) -> Option; - fn re_add_txs + 'static>(&self, txs: I) - where + fn re_add_txs< + I: IntoIterator + 'static, + CI: IntoIterator + 'static, + >( + &self, + txs: I, + consumed_txs: CI, + ) where Self: Sized; fn chain_id(&self) -> Felt; } @@ -91,8 +99,13 @@ pub struct Mempool { } impl Mempool { - pub fn new(backend: Arc, l1_data_provider: Arc) -> Self { - Mempool { backend, l1_data_provider, inner: Default::default(), metrics: MempoolMetrics::register() } + pub fn new(backend: Arc, l1_data_provider: Arc, limits: MempoolLimits) -> Self { + Mempool { + backend, + l1_data_provider, + inner: RwLock::new(MempoolInner::new(limits)), + metrics: MempoolMetrics::register(), + } } #[tracing::instrument(skip(self), fields(module = "Mempool"))] @@ -154,6 +167,11 @@ impl Mempool { Ok(()) } + + #[cfg(any(test, feature = "testing"))] + pub fn is_empty(&self) -> bool { + self.inner.read().expect("Poisoned lock").is_empty() + } } pub fn transaction_hash(tx: &Transaction) -> Felt { @@ -267,10 +285,16 @@ impl MempoolProvider for Mempool { } /// Warning: A lock is taken while a user-supplied function (iterator stuff) is run - Callers should be careful - #[tracing::instrument(skip(self, txs), fields(module = "Mempool"))] - fn re_add_txs + 'static>(&self, txs: I) { + /// This is called by the block production after a batch of transaction is executed. + /// Mark the consumed txs as consumed, and re-add the transactions that are not consumed in the mempool. + #[tracing::instrument(skip(self, txs, consumed_txs), fields(module = "Mempool"))] + fn re_add_txs, CI: IntoIterator>( + &self, + txs: I, + consumed_txs: CI, + ) { let mut inner = self.inner.write().expect("Poisoned lock"); - inner.re_add_txs(txs) + inner.re_add_txs(txs, consumed_txs) } fn chain_id(&self) -> Felt { @@ -354,11 +378,9 @@ pub(crate) fn clone_transaction(tx: &Transaction) -> Transaction { #[cfg(test)] mod test { - use std::sync::Arc; - + use crate::{inner::MempoolLimits, Mempool, MockL1DataProvider}; use starknet_core::types::Felt; - - use crate::MockL1DataProvider; + use std::sync::Arc; #[rstest::fixture] fn backend() -> Arc { @@ -415,7 +437,7 @@ mod test { l1_data_provider: Arc, tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, ) { - let mempool = crate::Mempool::new(backend, l1_data_provider); + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); let result = mempool.accept_tx(tx_account_v0_valid, None); assert_matches::assert_matches!(result, Ok(())); } @@ -426,7 +448,7 @@ mod test { l1_data_provider: Arc, tx_account_v1_invalid: blockifier::transaction::transaction_execution::Transaction, ) { - let mempool = crate::Mempool::new(backend, l1_data_provider); + let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); let result = mempool.accept_tx(tx_account_v1_invalid, None); assert_matches::assert_matches!(result, Err(crate::Error::Validation(_))); } diff --git a/crates/node/src/cli/chain_config_overrides.rs b/crates/node/src/cli/chain_config_overrides.rs index adb765836..76450b4a2 100644 --- a/crates/node/src/cli/chain_config_overrides.rs +++ b/crates/node/src/cli/chain_config_overrides.rs @@ -46,6 +46,10 @@ pub struct ChainConfigOverridesInner { #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + pub mempool_tx_limit: usize, + pub mempool_declare_tx_limit: usize, + #[serde(deserialize_with = "deserialize_duration")] + pub mempool_tx_max_age: Duration, } impl ChainConfigOverrideParams { @@ -66,6 +70,9 @@ impl ChainConfigOverrideParams { eth_core_contract_address: chain_config.eth_core_contract_address, eth_gps_statement_verifier: chain_config.eth_gps_statement_verifier, private_key: chain_config.private_key, + mempool_tx_limit: chain_config.mempool_tx_limit, + mempool_declare_tx_limit: chain_config.mempool_declare_tx_limit, + mempool_tx_max_age: chain_config.mempool_tx_max_age, }) .context("Failed to convert ChainConfig to Value")?; @@ -115,6 +122,9 @@ impl ChainConfigOverrideParams { versioned_constants, eth_gps_statement_verifier: chain_config_overrides.eth_gps_statement_verifier, private_key: chain_config_overrides.private_key, + mempool_tx_limit: chain_config_overrides.mempool_tx_limit, + mempool_declare_tx_limit: chain_config_overrides.mempool_declare_tx_limit, + mempool_tx_max_age: chain_config_overrides.mempool_tx_max_age, }) } } diff --git a/crates/node/src/main.rs b/crates/node/src/main.rs index dd23c0e56..2392f7ba5 100644 --- a/crates/node/src/main.rs +++ b/crates/node/src/main.rs @@ -13,7 +13,7 @@ use cli::{NetworkType, RunCmd}; use mc_analytics::Analytics; use mc_block_import::BlockImporter; use mc_db::DatabaseService; -use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool}; +use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; use mc_rpc::providers::{AddTransactionProvider, ForwardToProvider, MempoolAddTxProvider}; use mc_telemetry::{SysInfo, TelemetryService}; use mp_convert::ToFelt; @@ -111,7 +111,11 @@ async fn main() -> anyhow::Result<()> { let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); // declare mempool here so that it can be used to process l1->l2 messages in the l1 service - let mempool = Arc::new(Mempool::new(Arc::clone(db_service.backend()), Arc::clone(&l1_data_provider))); + let mempool = Arc::new(Mempool::new( + Arc::clone(db_service.backend()), + Arc::clone(&l1_data_provider), + MempoolLimits::new(&chain_config), + )); let l1_service = L1SyncService::new( &run_cmd.l1_sync_params, diff --git a/crates/primitives/chain_config/src/chain_config.rs b/crates/primitives/chain_config/src/chain_config.rs index b23298c74..35aa8a391 100644 --- a/crates/primitives/chain_config/src/chain_config.rs +++ b/crates/primitives/chain_config/src/chain_config.rs @@ -130,6 +130,14 @@ pub struct ChainConfig { #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + + /// Transaction limit in the mempool. + pub mempool_tx_limit: usize, + /// Transaction limit in the mempool, we have an additionnal limit for declare transactions. + pub mempool_declare_tx_limit: usize, + /// Max age of a transaction in the mempool. + #[serde(deserialize_with = "deserialize_duration")] + pub mempool_tx_max_age: Duration, } impl ChainConfig { @@ -236,6 +244,10 @@ impl ChainConfig { ), private_key: ZeroingPrivateKey::default(), + + mempool_tx_limit: 10_000, + mempool_declare_tx_limit: 20, + mempool_tx_max_age: Duration::from_secs(60 * 60), // an hour? } } From 92d0f42642f1ec3eacf51615eb5d2d1980478617 Mon Sep 17 00:00:00 2001 From: cchudant Date: Thu, 31 Oct 2024 17:30:37 +0000 Subject: [PATCH 3/9] fix: rpc errors and chain config for mempool limits --- configs/chain_config.example.yaml | 7 +++++++ configs/presets/devnet.yaml | 3 +++ configs/presets/integration.yaml | 3 +++ configs/presets/mainnet.yaml | 3 +++ configs/presets/sepolia.yaml | 3 +++ crates/client/gateway/src/server/error.rs | 2 +- crates/client/rpc/src/errors.rs | 13 ++++++++----- crates/client/rpc/src/providers/mempool.rs | 13 ++++++++++--- crates/primitives/utils/src/parsers.rs | 3 ++- 9 files changed, 40 insertions(+), 10 deletions(-) diff --git a/configs/chain_config.example.yaml b/configs/chain_config.example.yaml index 52215c183..3338363f9 100644 --- a/configs/chain_config.example.yaml +++ b/configs/chain_config.example.yaml @@ -65,3 +65,10 @@ bouncer_config: # /!\ Only used for block production. # Address of the sequencer (0x0 for a full node). sequencer_address: "0x0" + +# Transaction limit in the mempool. +mempool_tx_limit: 10000 +# Transaction limit in the mempool, additionnal limit for declare transactions. +mempool_declare_tx_limit: 20 +# Max age of a transaction in the mempool. +mempool_tx_max_age: "5h" diff --git a/configs/presets/devnet.yaml b/configs/presets/devnet.yaml index 2ef5ab76d..00a580175 100644 --- a/configs/presets/devnet.yaml +++ b/configs/presets/devnet.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x123" eth_core_contract_address: "0xe7f1725e7734ce288f8367e1bb143e90bb3f0512" eth_gps_statement_verifier: "0xf294781D719D2F4169cE54469C28908E6FA752C1" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/configs/presets/integration.yaml b/configs/presets/integration.yaml index 981a5ae14..a1ecbdc42 100644 --- a/configs/presets/integration.yaml +++ b/configs/presets/integration.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8" eth_core_contract_address: "0x4737c0c1B4D5b1A687B42610DdabEE781152359c" eth_gps_statement_verifier: "0x2046B966994Adcb88D83f467a41b75d64C2a619F" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/configs/presets/mainnet.yaml b/configs/presets/mainnet.yaml index 716b6a85a..1d0323598 100644 --- a/configs/presets/mainnet.yaml +++ b/configs/presets/mainnet.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8" eth_core_contract_address: "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4" eth_gps_statement_verifier: "0x47312450B3Ac8b5b8e247a6bB6d523e7605bDb60" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/configs/presets/sepolia.yaml b/configs/presets/sepolia.yaml index f4aded9cf..da1b25f0b 100644 --- a/configs/presets/sepolia.yaml +++ b/configs/presets/sepolia.yaml @@ -29,3 +29,6 @@ bouncer_config: sequencer_address: "0x1176a1bd84444c89232ec27754698e5d2e7e1a7f1539f12027f28b23ec9f3d8" eth_core_contract_address: "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057" eth_gps_statement_verifier: "0xf294781D719D2F4169cE54469C28908E6FA752C1" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/crates/client/gateway/src/server/error.rs b/crates/client/gateway/src/server/error.rs index f1708e18f..afe6b2dea 100644 --- a/crates/client/gateway/src/server/error.rs +++ b/crates/client/gateway/src/server/error.rs @@ -88,7 +88,7 @@ impl From for GatewayError { "Insufficient account balance".to_string(), )), StarknetRpcApiError::ValidationFailure { error } => { - GatewayError::StarknetError(StarknetError::new(StarknetErrorCode::ValidateFailure, error)) + GatewayError::StarknetError(StarknetError::new(StarknetErrorCode::ValidateFailure, error.into())) } StarknetRpcApiError::CompilationFailed => GatewayError::StarknetError(StarknetError::new( StarknetErrorCode::CompilationFailed, diff --git a/crates/client/rpc/src/errors.rs b/crates/client/rpc/src/errors.rs index 1e5403ba3..4e2eb432f 100644 --- a/crates/client/rpc/src/errors.rs +++ b/crates/client/rpc/src/errors.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + use mc_db::MadaraStorageError; use serde_json::json; use starknet_api::StarknetApiError; @@ -19,7 +21,7 @@ pub enum StarknetTransactionExecutionError { #[derive(thiserror::Error, Debug)] pub enum StarknetRpcApiError { #[error("Failed to write transaction")] - FailedToReceiveTxn, + FailedToReceiveTxn { err: Option> }, #[error("Contract not found")] ContractNotFound, #[error("Block not found")] @@ -59,7 +61,7 @@ pub enum StarknetRpcApiError { #[error("Account balance is smaller than the transaction's max_fee")] InsufficientAccountBalance, #[error("Account validation failed")] - ValidationFailure { error: String }, + ValidationFailure { error: Cow<'static, str> }, #[error("Compilation failed")] CompilationFailed, #[error("Contract class size is too large")] @@ -87,7 +89,7 @@ pub enum StarknetRpcApiError { impl From<&StarknetRpcApiError> for i32 { fn from(err: &StarknetRpcApiError) -> Self { match err { - StarknetRpcApiError::FailedToReceiveTxn => 1, + StarknetRpcApiError::FailedToReceiveTxn { .. } => 1, StarknetRpcApiError::ContractNotFound => 20, StarknetRpcApiError::BlockNotFound => 24, StarknetRpcApiError::InvalidTxnHash => 25, @@ -128,6 +130,7 @@ impl StarknetRpcApiError { match self { StarknetRpcApiError::ErrUnexpectedError { data } => Some(json!(data)), StarknetRpcApiError::ValidationFailure { error } => Some(json!(error)), + StarknetRpcApiError::FailedToReceiveTxn { err } => err.as_ref().map(|err| json!(err)), StarknetRpcApiError::TxnExecutionError { tx_index, error } => Some(json!({ "transaction_index": tx_index, "execution_error": error, @@ -164,7 +167,7 @@ impl From for jsonrpsee::types::ErrorObjectOwned { impl From for StarknetRpcApiError { fn from(err: StarknetError) -> Self { match err { - StarknetError::FailedToReceiveTransaction => StarknetRpcApiError::FailedToReceiveTxn, + StarknetError::FailedToReceiveTransaction => StarknetRpcApiError::FailedToReceiveTxn { err: None }, StarknetError::ContractNotFound => StarknetRpcApiError::ContractNotFound, StarknetError::BlockNotFound => StarknetRpcApiError::BlockNotFound, StarknetError::InvalidTransactionIndex => StarknetRpcApiError::InvalidTxnIndex, @@ -179,7 +182,7 @@ impl From for StarknetRpcApiError { StarknetError::InvalidTransactionNonce => StarknetRpcApiError::InvalidTxnNonce, StarknetError::InsufficientMaxFee => StarknetRpcApiError::InsufficientMaxFee, StarknetError::InsufficientAccountBalance => StarknetRpcApiError::InsufficientAccountBalance, - StarknetError::ValidationFailure(error) => StarknetRpcApiError::ValidationFailure { error }, + StarknetError::ValidationFailure(error) => StarknetRpcApiError::ValidationFailure { error: error.into() }, StarknetError::CompilationFailed => StarknetRpcApiError::CompilationFailed, StarknetError::ContractClassSizeIsTooLarge => StarknetRpcApiError::ContractClassSizeTooLarge, StarknetError::NonAccount => StarknetRpcApiError::NonAccount, diff --git a/crates/client/rpc/src/providers/mempool.rs b/crates/client/rpc/src/providers/mempool.rs index 58721e082..fb03f1098 100644 --- a/crates/client/rpc/src/providers/mempool.rs +++ b/crates/client/rpc/src/providers/mempool.rs @@ -27,9 +27,16 @@ impl From for StarknetRpcApiError { mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::DuplicateTxn) => { StarknetRpcApiError::DuplicateTxn } - mc_mempool::Error::Validation(err) => StarknetRpcApiError::ValidationFailure { error: format!("{err:#}") }, - mc_mempool::Error::InnerMempool(err) => { - StarknetRpcApiError::ValidationFailure { error: format!("{err:#}") } + mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::Limit(limit)) => { + StarknetRpcApiError::FailedToReceiveTxn { err: Some(format!("{}", limit).into()) } + } + mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::NonceConflict) => { + StarknetRpcApiError::FailedToReceiveTxn { + err: Some("A transaction with this nonce and sender address already exists".into()), + } + } + mc_mempool::Error::Validation(err) => { + StarknetRpcApiError::ValidationFailure { error: format!("{err:#}").into() } } mc_mempool::Error::Exec(err) => { StarknetRpcApiError::TxnExecutionError { tx_index: 0, error: format!("{err:#}") } diff --git a/crates/primitives/utils/src/parsers.rs b/crates/primitives/utils/src/parsers.rs index 10890ffd9..e467f4741 100644 --- a/crates/primitives/utils/src/parsers.rs +++ b/crates/primitives/utils/src/parsers.rs @@ -37,7 +37,8 @@ pub fn parse_duration(s: &str) -> anyhow::Result { "ms" => Ok(Duration::from_millis(value)), "s" => Ok(Duration::from_secs(value)), "min" => Ok(Duration::from_secs(value * 60)), - _ => bail!("Invalid duration suffix: {}. Expected 'ms', 's', or 'min'.", suffix), + "h" => Ok(Duration::from_secs(value * 60 * 60)), + _ => bail!("Invalid duration suffix: {}. Expected 'ms', 's', 'min' or 'h'.", suffix), } } From 8c48aab9d4c1007d59e435dc7f10bf7f8a152a6b Mon Sep 17 00:00:00 2001 From: cchudant Date: Tue, 5 Nov 2024 11:21:53 +0000 Subject: [PATCH 4/9] feat(mempool,block_production): save mempool txs to db, continue pending block --- CHANGELOG.md | 2 + Cargo.lock | 4 +- crates/client/analytics/src/lib.rs | 2 +- .../client/block_import/src/pre_validate.rs | 2 + .../src/tests/block_import_utils.rs | 3 + crates/client/block_import/src/types.rs | 6 +- .../client/block_import/src/verify_apply.rs | 12 +- .../block_production/src/close_block.rs | 4 + .../src/finalize_execution_state.rs | 65 +++- crates/client/block_production/src/lib.rs | 150 +++++--- crates/client/db/src/block_db.rs | 25 +- crates/client/db/src/lib.rs | 32 +- crates/client/db/src/mempool_db.rs | 77 ++++ crates/client/db/src/storage_updates.rs | 9 +- crates/client/db/src/tests/test_block.rs | 32 +- crates/client/devnet/src/lib.rs | 38 +- crates/client/eth/src/l1_messaging.rs | 18 +- crates/client/exec/Cargo.toml | 2 + crates/client/exec/src/lib.rs | 1 + crates/client/exec/src/transaction.rs | 79 +++++ crates/client/mempool/src/inner/mod.rs | 25 +- .../client/mempool/src/inner/nonce_chain.rs | 4 +- crates/client/mempool/src/lib.rs | 140 +++++--- crates/client/mempool/src/tx.rs | 129 +++++++ crates/client/rpc/src/errors.rs | 4 +- crates/client/rpc/src/test_utils.rs | 8 + .../methods/read/block_hash_and_number.rs | 4 + .../v0_7_1/methods/read/estimate_fee.rs | 4 +- .../methods/read/get_block_with_receipts.rs | 1 + .../methods/trace/simulate_transactions.rs | 4 +- .../rpc/src/versions/v0_8_0/methods/ws/lib.rs | 1 + crates/client/sync/src/fetch/fetchers.rs | 1 + crates/node/src/cli/chain_config_overrides.rs | 2 +- crates/node/src/cli/mod.rs | 7 +- crates/node/src/main.rs | 6 +- crates/node/src/service/sync.rs | 2 +- crates/primitives/class/src/lib.rs | 2 +- .../src/broadcasted_to_blockifier.rs | 332 +++++++++--------- .../src/from_broadcasted_transaction.rs | 122 +++---- crates/primitives/transactions/src/lib.rs | 23 +- crates/tests/src/lib.rs | 222 +++++++++++- crates/tests/test_devnet.yaml | 34 ++ scripts/e2e-coverage.sh | 5 +- scripts/e2e-tests.sh | 4 +- 44 files changed, 1183 insertions(+), 466 deletions(-) create mode 100644 crates/client/db/src/mempool_db.rs create mode 100644 crates/client/exec/src/transaction.rs create mode 100644 crates/client/mempool/src/tx.rs create mode 100644 crates/tests/test_devnet.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 2973c77d9..7deb81b81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## Next release +- feat(block_production): continue pending block on restart +- feat(mempool): mempool transaction saving on db - feat(mempool): mempool transaction limits - feat(confg): added chain config template and fgw example - feat(v0.8.0-rc0): starknet_subscribeNewHeads diff --git a/Cargo.lock b/Cargo.lock index 409bb85a0..03464c67c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5616,7 +5616,7 @@ dependencies = [ "rstest 0.18.2", "serde_json", "starknet-core 0.11.0", - "starknet-types-core", + "starknet-types-core 0.1.5", "starknet_api", "thiserror", "tokio", @@ -5781,6 +5781,8 @@ dependencies = [ "mp-chain-config", "mp-class", "mp-convert", + "mp-receipt", + "mp-transactions", "opentelemetry", "opentelemetry-appender-tracing", "opentelemetry-otlp", diff --git a/crates/client/analytics/src/lib.rs b/crates/client/analytics/src/lib.rs index f806c4da0..42aadaa03 100644 --- a/crates/client/analytics/src/lib.rs +++ b/crates/client/analytics/src/lib.rs @@ -42,7 +42,7 @@ impl Analytics { let tracing_subscriber = tracing_subscriber::registry() .with(tracing_subscriber::filter::LevelFilter::from_level(self.log_level)) - .with(tracing_subscriber::fmt::layer().event_format(custom_formatter)); + .with(tracing_subscriber::fmt::layer().with_writer(std::io::stderr).event_format(custom_formatter)); if self.collection_endpoint.is_none() { tracing_subscriber.init(); diff --git a/crates/client/block_import/src/pre_validate.rs b/crates/client/block_import/src/pre_validate.rs index ef6657476..456a01cb3 100644 --- a/crates/client/block_import/src/pre_validate.rs +++ b/crates/client/block_import/src/pre_validate.rs @@ -77,6 +77,7 @@ pub fn pre_validate_inner( unverified_global_state_root: block.commitments.global_state_root, unverified_block_hash: block.commitments.block_hash, unverified_block_number: block.unverified_block_number, + visited_segments: block.visited_segments, }) } @@ -97,6 +98,7 @@ pub fn pre_validate_pending_inner( state_diff: block.state_diff, receipts: block.receipts, converted_classes, + visited_segments: block.visited_segments, }) } diff --git a/crates/client/block_import/src/tests/block_import_utils.rs b/crates/client/block_import/src/tests/block_import_utils.rs index d1e2a1794..5f80a30b6 100644 --- a/crates/client/block_import/src/tests/block_import_utils.rs +++ b/crates/client/block_import/src/tests/block_import_utils.rs @@ -104,6 +104,7 @@ pub fn create_dummy_block() -> PreValidatedBlock { receipts: vec![], state_diff: StateDiff::default(), converted_classes: Default::default(), + visited_segments: None, } } @@ -128,6 +129,7 @@ pub fn create_dummy_unverified_full_block() -> UnverifiedFullBlock { declared_classes: vec![], commitments: UnverifiedCommitments::default(), trusted_converted_classes: vec![], + visited_segments: None, } } @@ -151,5 +153,6 @@ pub fn create_dummy_pending_block() -> PreValidatedPendingBlock { receipts: vec![], state_diff: StateDiff::default(), converted_classes: Default::default(), + visited_segments: None, } } diff --git a/crates/client/block_import/src/types.rs b/crates/client/block_import/src/types.rs index 30ca85750..28e1e851c 100644 --- a/crates/client/block_import/src/types.rs +++ b/crates/client/block_import/src/types.rs @@ -142,13 +142,14 @@ pub struct UnverifiedCommitments { } /// An unverified pending full block as input for the block import pipeline. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Default, Serialize, Deserialize)] pub struct UnverifiedPendingFullBlock { pub header: UnverifiedHeader, pub state_diff: StateDiff, pub transactions: Vec, pub receipts: Vec, pub declared_classes: Vec, + pub visited_segments: Option)>>, } /// An unverified full block as input for the block import pipeline. @@ -165,6 +166,7 @@ pub struct UnverifiedFullBlock { #[serde(skip)] pub trusted_converted_classes: Vec, pub commitments: UnverifiedCommitments, + pub visited_segments: Option)>>, } // Pre-validate outputs. @@ -192,6 +194,7 @@ pub struct PreValidatedBlock { pub unverified_global_state_root: Option, pub unverified_block_hash: Option, pub unverified_block_number: Option, + pub visited_segments: Option)>>, } /// Output of the [`crate::pre_validate`] step. @@ -202,6 +205,7 @@ pub struct PreValidatedPendingBlock { pub state_diff: StateDiff, pub receipts: Vec, pub converted_classes: Vec, + pub visited_segments: Option)>>, } // Verify-apply output. diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index abad5fc4c..452093ced 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -100,6 +100,7 @@ pub fn verify_apply_inner( }, block.state_diff, block.converted_classes, + block.visited_segments, ) .map_err(make_db_error("storing block in db"))?; @@ -143,6 +144,7 @@ pub fn verify_apply_pending_inner( }, block.state_diff, block.converted_classes, + block.visited_segments, ) .map_err(make_db_error("storing block in db"))?; @@ -406,7 +408,7 @@ mod verify_apply_tests { if populate_db { let header = create_dummy_header(); let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None).unwrap(); } // Create a validation context with the specified ignore_block_order flag @@ -660,7 +662,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -686,7 +688,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -722,7 +724,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -768,7 +770,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); diff --git a/crates/client/block_production/src/close_block.rs b/crates/client/block_production/src/close_block.rs index c828c7b86..9a64f8999 100644 --- a/crates/client/block_production/src/close_block.rs +++ b/crates/client/block_production/src/close_block.rs @@ -6,6 +6,8 @@ use mp_class::ConvertedClass; use mp_state_update::StateDiff; use starknet_api::core::ChainId; +use crate::VisitedSegments; + /// Close the block (convert from pending to closed), and store to db. This is delegated to the block import module. #[tracing::instrument(skip(importer, state_diff, declared_classes), fields(module = "BlockProductionTask"))] pub async fn close_block( @@ -15,6 +17,7 @@ pub async fn close_block( chain_id: ChainId, block_number: u64, declared_classes: Vec, + visited_segments: VisitedSegments, ) -> Result { let validation = BlockValidationContext::new(chain_id).trust_transaction_hashes(true); @@ -48,6 +51,7 @@ pub async fn close_block( receipts: inner.receipts, trusted_converted_classes: declared_classes, commitments: Default::default(), // the block importer will compute the commitments for us + visited_segments: Some(visited_segments), ..Default::default() }, validation.clone(), diff --git a/crates/client/block_production/src/finalize_execution_state.rs b/crates/client/block_production/src/finalize_execution_state.rs index a3112b9ff..c8cc55081 100644 --- a/crates/client/block_production/src/finalize_execution_state.rs +++ b/crates/client/block_production/src/finalize_execution_state.rs @@ -1,7 +1,7 @@ use std::collections::{hash_map, HashMap}; use blockifier::{ - blockifier::transaction_executor::{TransactionExecutor, VisitedSegmentsMapping}, + blockifier::transaction_executor::{TransactionExecutor, BLOCK_STATE_ACCESS_ERR}, bouncer::BouncerWeights, state::{cached_state::StateMaps, state_api::StateReader}, transaction::errors::TransactionExecutionError, @@ -13,9 +13,59 @@ use mp_state_update::{ ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, StorageEntry, }; -use starknet_api::core::ContractAddress; +use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; -use crate::Error; +use crate::{Error, VisitedSegments}; + +#[derive(Debug, thiserror::Error)] +#[error("Error converting state diff to state map")] +pub struct StateDiffToStateMapError; + +pub fn state_diff_to_state_map(diff: StateDiff) -> Result { + Ok(StateMaps { + nonces: diff + .nonces + .into_iter() + .map(|entry| { + Ok((entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, Nonce(entry.nonce))) + }) + .collect::>()?, + class_hashes: diff + .deployed_contracts + .into_iter() + .map(|entry| { + Ok((entry.address.try_into().map_err(|_| StateDiffToStateMapError)?, ClassHash(entry.class_hash))) + }) + .chain(diff.replaced_classes.into_iter().map(|entry| { + Ok(( + entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, + ClassHash(entry.class_hash), + )) + })) + .collect::>()?, + storage: diff + .storage_diffs + .into_iter() + .flat_map(|d| { + d.storage_entries.into_iter().map(move |e| { + Ok(( + ( + d.address.try_into().map_err(|_| StateDiffToStateMapError)?, + e.key.try_into().map_err(|_| StateDiffToStateMapError)?, + ), + e.value, + )) + }) + }) + .collect::>()?, + declared_contracts: diff.declared_classes.iter().map(|d| (ClassHash(d.class_hash), true)).collect(), + compiled_class_hashes: diff + .declared_classes + .into_iter() + .map(|d| (ClassHash(d.class_hash), CompiledClassHash(d.compiled_class_hash))) + .collect(), + }) +} fn state_map_to_state_diff( backend: &MadaraBackend, @@ -96,10 +146,7 @@ fn state_map_to_state_diff( }) } -pub const BLOCK_STATE_ACCESS_ERR: &str = "Error: The block state should be `Some`."; -fn get_visited_segments( - tx_executor: &mut TransactionExecutor, -) -> Result { +fn get_visited_segments(tx_executor: &mut TransactionExecutor) -> Result { let visited_segments = tx_executor .block_state .as_ref() @@ -113,7 +160,7 @@ fn get_visited_segments( .expect(BLOCK_STATE_ACCESS_ERR) .get_compiled_contract_class(*class_hash) .map_err(TransactionExecutionError::StateError)?; - Ok((*class_hash, contract_class.get_visited_segments(class_visited_pcs)?)) + Ok((class_hash.to_felt(), contract_class.get_visited_segments(class_visited_pcs)?)) }) .collect::>()?; @@ -125,7 +172,7 @@ pub(crate) fn finalize_execution_state( tx_executor: &mut TransactionExecutor, backend: &MadaraBackend, on_top_of: &Option, -) -> Result<(StateDiff, VisitedSegmentsMapping, BouncerWeights), Error> { +) -> Result<(StateDiff, VisitedSegments, BouncerWeights), Error> { let state_map = tx_executor .block_state .as_mut() diff --git a/crates/client/block_production/src/lib.rs b/crates/client/block_production/src/lib.rs index 8a8fbd658..00b03918d 100644 --- a/crates/client/block_production/src/lib.rs +++ b/crates/client/block_production/src/lib.rs @@ -17,22 +17,27 @@ use crate::close_block::close_block; use crate::metrics::BlockProductionMetrics; -use blockifier::blockifier::transaction_executor::TransactionExecutor; -use blockifier::bouncer::{Bouncer, BouncerWeights, BuiltinCount}; +use blockifier::blockifier::transaction_executor::{TransactionExecutor, BLOCK_STATE_ACCESS_ERR}; +use blockifier::bouncer::{BouncerWeights, BuiltinCount}; +use blockifier::state::state_api::UpdatableState; use blockifier::transaction::errors::TransactionExecutionError; +use finalize_execution_state::{state_diff_to_state_map, StateDiffToStateMapError}; use mc_block_import::{BlockImportError, BlockImporter}; +use mc_db::db_block_id::DbBlockId; use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::{BlockifierStateAdapter, ExecutionContext}; use mc_mempool::header::make_pending_header; use mc_mempool::{L1DataProvider, MempoolProvider}; -use mp_block::{BlockId, BlockTag, MadaraPendingBlock}; -use mp_class::ConvertedClass; +use mp_block::{BlockId, BlockTag, MadaraMaybePendingBlockInfo, MadaraPendingBlock}; +use mp_class::compile::ClassCompilationError; +use mp_class::{ConvertedClass, LegacyConvertedClass, SierraConvertedClass}; use mp_convert::ToFelt; use mp_receipt::from_blockifier_execution_info; use mp_state_update::{ContractStorageDiffItem, StateDiff, StorageEntry}; use mp_transactions::TransactionWithHash; use mp_utils::graceful_shutdown; use opentelemetry::KeyValue; +use starknet_api::core::ClassHash; use starknet_types_core::felt::Felt; use std::borrow::Cow; use std::collections::VecDeque; @@ -44,6 +49,8 @@ mod close_block; mod finalize_execution_state; pub mod metrics; +pub type VisitedSegments = Vec<(Felt, Vec)>; + #[derive(Default, Clone)] struct ContinueBlockStats { /// Number of batches executed before reaching the bouncer capacity. @@ -68,6 +75,10 @@ pub enum Error { Import(#[from] mc_block_import::BlockImportError), #[error("Unexpected error: {0:#}")] Unexpected(Cow<'static, str>), + #[error("Class compilation error when continuing the pending block: {0:#}")] + PendingClassCompilationError(#[from] ClassCompilationError), + #[error("State diff error when continuing the pending block: {0:#}")] + PendingStateDiff(#[from] StateDiffToStateMapError), } /// The block production task consumes transactions from the mempool in batches. /// This is to allow optimistic concurrency. However, the block may get full during batch execution, @@ -94,10 +105,10 @@ impl BlockProductionTask { self.current_pending_tick = n; } - #[tracing::instrument( - skip(backend, importer, mempool, l1_data_provider, metrics), - fields(module = "BlockProductionTask") - )] + // #[tracing::instrument( + // skip(backend, importer, mempool, l1_data_provider, metrics), + // fields(module = "BlockProductionTask") + // )] pub fn new( backend: Arc, importer: Arc, @@ -105,24 +116,78 @@ impl BlockProductionTask { metrics: BlockProductionMetrics, l1_data_provider: Arc, ) -> Result { - let parent_block_hash = backend - .get_block_hash(&BlockId::Tag(BlockTag::Latest))? - .unwrap_or(/* genesis block's parent hash */ Felt::ZERO); - let pending_block = MadaraPendingBlock::new_empty(make_pending_header( - parent_block_hash, - backend.chain_config(), - l1_data_provider.as_ref(), - )); - // NB: we cannot continue a previously started pending block yet. - // let pending_block = backend.get_or_create_pending_block(|| CreatePendingBlockExtraInfo { - // l1_gas_price: l1_data_provider.get_gas_prices(), - // l1_da_mode: l1_data_provider.get_da_mode(), - // })?; + let (pending_block, state_diff, pcs) = match backend.get_block(&DbBlockId::Pending)? { + Some(pending) => { + let MadaraMaybePendingBlockInfo::Pending(info) = pending.info else { + return Err(Error::Unexpected("Get a pending block".into())); + }; + let pending_state_update = backend.get_pending_block_state_update()?; + (MadaraPendingBlock { info, inner: pending.inner }, pending_state_update, Default::default()) + } + None => { + let parent_block_hash = backend + .get_block_hash(&BlockId::Tag(BlockTag::Latest))? + .unwrap_or(/* genesis block's parent hash */ Felt::ZERO); + + ( + MadaraPendingBlock::new_empty(make_pending_header( + parent_block_hash, + backend.chain_config(), + l1_data_provider.as_ref(), + )), + StateDiff::default(), + Default::default(), + ) + } + }; + + let declared_classes: Vec = state_diff + .declared_classes + .iter() + .map(|item| { + let class_info = backend.get_class_info(&DbBlockId::Pending, &item.class_hash)?.ok_or_else(|| { + Error::Unexpected(format!("No class info for declared class {:#x}", item.class_hash).into()) + })?; + let converted_class = match class_info { + mp_class::ClassInfo::Sierra(info) => { + let compiled = + backend.get_sierra_compiled(&DbBlockId::Pending, &item.class_hash)?.ok_or_else(|| { + Error::Unexpected( + format!("No compiled class for declared class {:#x}", item.class_hash).into(), + ) + })?; + let compiled = Arc::new(compiled); + ConvertedClass::Sierra(SierraConvertedClass { class_hash: item.class_hash, info, compiled }) + } + mp_class::ClassInfo::Legacy(info) => { + ConvertedClass::Legacy(LegacyConvertedClass { class_hash: item.class_hash, info }) + } + }; + + Ok(converted_class) + }) + .collect::>()?; + + let class_hash_to_class = declared_classes + .iter() + .map(|c| { + Ok(( + ClassHash(c.class_hash()), + match c { + ConvertedClass::Legacy(class) => class.info.contract_class.to_blockifier_class()?, + ConvertedClass::Sierra(class) => class.compiled.to_blockifier_class()?, + }, + )) + }) + .collect::>()?; + let mut executor = ExecutionContext::new_in_block(Arc::clone(&backend), &pending_block.info.clone().into())?.tx_executor(); + let block_state = + executor.block_state.as_mut().expect("Block state can not be None unless we take ownership of it"); - let bouncer_config = backend.chain_config().bouncer_config.clone(); - executor.bouncer = Bouncer::new(bouncer_config); + // Apply pending state + block_state.apply_writes(&state_diff_to_state_map(state_diff)?, &class_hash_to_class, &pcs); Ok(Self { importer, @@ -131,14 +196,17 @@ impl BlockProductionTask { executor, current_pending_tick: 0, block: pending_block, - declared_classes: vec![], + declared_classes, l1_data_provider, metrics, }) } #[tracing::instrument(skip(self), fields(module = "BlockProductionTask"))] - fn continue_block(&mut self, bouncer_cap: BouncerWeights) -> Result<(StateDiff, ContinueBlockStats), Error> { + fn continue_block( + &mut self, + bouncer_cap: BouncerWeights, + ) -> Result<(StateDiff, VisitedSegments, ContinueBlockStats), Error> { let mut stats = ContinueBlockStats::default(); self.executor.bouncer.bouncer_config.block_max_capacity = bouncer_cap; @@ -150,9 +218,7 @@ impl BlockProductionTask { let mut executed_txs = Vec::with_capacity(batch_size); // Cloning transactions: That's a lot of cloning, but we're kind of forced to do that because blockifier takes - // a `&[Transaction]` slice. In addition, declare transactions have their class behind an Arc. As a result, the - // `Transaction` struct, as of blockifier 0.8.0-rc.3, is only 320 bytes. So we'll consider that cloning is - // fine! + // a `&[Transaction]` slice. In addition, declare transactions have their class behind an Arc. loop { // Take transactions from mempool. let to_take = batch_size.saturating_sub(txs_to_process.len()); @@ -180,6 +246,10 @@ impl BlockProductionTask { for exec_result in all_results { let mut mempool_tx = txs_to_process.pop_front().ok_or_else(|| Error::Unexpected("Vector length mismatch".into()))?; + + // Remove tx from mempool + self.backend.remove_mempool_transaction(&mempool_tx.tx_hash().to_felt())?; + match exec_result { Ok(execution_info) => { // Reverted transactions appear here as Ok too. @@ -223,16 +293,10 @@ impl BlockProductionTask { } } - let on_top_of = self - .executor - .block_state - .as_ref() - .expect("Block state can not be None unless we take ownership of it") - .state - .on_top_of_block_id; + let on_top_of = self.executor.block_state.as_ref().expect(BLOCK_STATE_ACCESS_ERR).state.on_top_of_block_id; // TODO: save visited segments for SNOS. - let (state_diff, _visited_segments, _weights) = finalize_execution_state::finalize_execution_state( + let (state_diff, visited_segments, _weights) = finalize_execution_state::finalize_execution_state( &executed_txs, &mut self.executor, &self.backend, @@ -250,7 +314,7 @@ impl BlockProductionTask { stats.n_re_added_to_mempool ); - Ok((state_diff, stats)) + Ok((state_diff, visited_segments, stats)) } /// Each "tick" of the block time updates the pending block but only with the appropriate fraction of the total bouncer capacity. @@ -297,7 +361,7 @@ impl BlockProductionTask { }; let start_time = Instant::now(); - let (state_diff, stats) = self.continue_block(bouncer_cap)?; + let (state_diff, visited_segments, stats) = self.continue_block(bouncer_cap)?; if stats.n_added_to_block > 0 { tracing::info!( "🧮 Executed and added {} transaction(s) to the pending block at height {} - {:?}", @@ -309,7 +373,12 @@ impl BlockProductionTask { // Store pending block // todo, prefer using the block import pipeline? - self.backend.store_block(self.block.clone().into(), state_diff, self.declared_classes.clone())?; + self.backend.store_block( + self.block.clone().into(), + state_diff, + self.declared_classes.clone(), + Some(visited_segments), + )?; // do not forget to flush :) self.backend .maybe_flush(true) @@ -326,7 +395,7 @@ impl BlockProductionTask { // Complete the block with full bouncer capacity. let start_time = Instant::now(); - let (mut new_state_diff, _n_executed) = + let (mut new_state_diff, visited_segments, _stats) = self.continue_block(self.backend.chain_config().bouncer_config.block_max_capacity)?; // SNOS requirement: For blocks >= 10, the hash of the block 10 blocks prior @@ -373,6 +442,7 @@ impl BlockProductionTask { self.backend.chain_config().chain_id.clone(), block_n, declared_classes, + visited_segments, ) .await?; self.block.info.header.parent_block_hash = import_result.block_hash; // fix temp parent block hash for new pending :) diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index 44d368140..0414a3365 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -23,10 +23,13 @@ struct ChainInfo { const ROW_CHAIN_INFO: &[u8] = b"chain_info"; const ROW_PENDING_INFO: &[u8] = b"pending_info"; const ROW_PENDING_STATE_UPDATE: &[u8] = b"pending_state_update"; +const ROW_PENDING_SEGMENTS: &[u8] = b"pending_segments"; const ROW_PENDING_INNER: &[u8] = b"pending"; const ROW_SYNC_TIP: &[u8] = b"sync_tip"; const ROW_L1_LAST_CONFIRMED_BLOCK: &[u8] = b"l1_last"; +pub type VisitedSegments = Vec<(Felt, Vec)>; + #[derive(Debug, PartialEq, Eq)] pub struct TxIndex(pub u64); @@ -188,6 +191,17 @@ impl MadaraBackend { Ok(res) } + #[tracing::instrument(skip(self), fields(module = "BlockDB"))] + pub fn get_pending_block_segments(&self) -> Result> { + let col = self.db.get_column(Column::BlockStorageMeta); + let Some(res) = self.db.get_cf(&col, ROW_PENDING_SEGMENTS)? else { + // See pending block quirk + return Ok(None); + }; + let res = Some(bincode::deserialize(&res)?); + Ok(res) + } + #[tracing::instrument(skip(self), fields(module = "BlockDB"))] pub fn get_l1_last_confirmed_block(&self) -> Result> { let col = self.db.get_column(Column::BlockStorageMeta); @@ -199,12 +213,20 @@ impl MadaraBackend { // DB write #[tracing::instrument(skip(self), fields(module = "BlockDB"))] - pub(crate) fn block_db_store_pending(&self, block: &MadaraPendingBlock, state_update: &StateDiff) -> Result<()> { + pub(crate) fn block_db_store_pending( + &self, + block: &MadaraPendingBlock, + state_update: &StateDiff, + visited_segments: Option)>>, + ) -> Result<()> { let mut tx = WriteBatchWithTransaction::default(); let col = self.db.get_column(Column::BlockStorageMeta); tx.put_cf(&col, ROW_PENDING_INFO, bincode::serialize(&block.info)?); tx.put_cf(&col, ROW_PENDING_INNER, bincode::serialize(&block.inner)?); tx.put_cf(&col, ROW_PENDING_STATE_UPDATE, bincode::serialize(&state_update)?); + if let Some(visited_segments) = visited_segments { + tx.put_cf(&col, ROW_PENDING_SEGMENTS, bincode::serialize(&visited_segments)?); + } let mut writeopts = WriteOptions::new(); writeopts.disable_wal(true); self.db.write_opt(tx, &writeopts)?; @@ -218,6 +240,7 @@ impl MadaraBackend { tx.delete_cf(&col, ROW_PENDING_INFO); tx.delete_cf(&col, ROW_PENDING_INNER); tx.delete_cf(&col, ROW_PENDING_STATE_UPDATE); + tx.delete_cf(&col, ROW_PENDING_SEGMENTS); let mut writeopts = WriteOptions::new(); writeopts.disable_wal(true); self.db.write_opt(tx, &writeopts)?; diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index 1a592ddf7..6f5dfa1c4 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -10,7 +10,7 @@ use mp_utils::service::Service; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use rocksdb::{ BoundColumnFamily, ColumnFamilyDescriptor, DBCompressionType, DBWithThreadMode, Env, FlushOptions, MultiThreaded, - Options, SliceTransform, + Options, SliceTransform, WriteOptions, }; use starknet_types_core::hash::{Pedersen, Poseidon, StarkHash}; use std::path::{Path, PathBuf}; @@ -28,6 +28,7 @@ pub mod db_metrics; pub mod devnet_db; mod error; pub mod l1_db; +pub mod mempool_db; pub mod storage_updates; pub mod tests; @@ -99,7 +100,7 @@ fn spawn_backup_db_task( if restore_from_latest_backup { tracing::info!("⏳ Restoring latest backup..."); tracing::debug!("restore path is {db_path:?}"); - fs::create_dir_all(db_path).with_context(|| format!("creating directories {:?}", db_path))?; + fs::create_dir_all(db_path).with_context(|| format!("Creating parent directories {:?}", db_path))?; let opts = rocksdb::backup::RestoreOptions::default(); engine.restore_from_latest_backup(db_path, db_path, &opts).context("Restoring database")?; @@ -178,6 +179,8 @@ pub enum Column { /// Devnet: stores the private keys for the devnet predeployed contracts Devnet, + + MempoolTransactions, } impl fmt::Debug for Column { @@ -226,6 +229,7 @@ impl Column { PendingContractToNonces, PendingContractStorage, Devnet, + MempoolTransactions, ] }; pub const NUM_COLUMNS: usize = Self::ALL.len(); @@ -263,6 +267,7 @@ impl Column { PendingContractToNonces => "pending_contract_to_nonces", PendingContractStorage => "pending_contract_storage", Devnet => "devnet", + MempoolTransactions => "mempool_transactions", } } @@ -306,8 +311,13 @@ impl DatabaseExt for DB { } } +fn make_write_opt_no_wal() -> WriteOptions { + let mut opts = WriteOptions::new(); + opts.disable_wal(true); + opts +} + /// Madara client database backend singleton. -#[derive(Debug)] pub struct MadaraBackend { backup_handle: Option>, db: Arc, @@ -315,10 +325,24 @@ pub struct MadaraBackend { chain_config: Arc, db_metrics: DbMetrics, sender_block_info: tokio::sync::broadcast::Sender, + write_opt_no_wal: WriteOptions, #[cfg(feature = "testing")] _temp_dir: Option, } +impl fmt::Debug for MadaraBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MadaraBackend") + .field("backup_handle", &self.backup_handle) + .field("db", &self.db) + .field("last_flush_time", &self.last_flush_time) + .field("chain_config", &self.chain_config) + .field("db_metrics", &self.db_metrics) + .field("sender_block_info", &self.sender_block_info) + .finish() + } +} + pub struct DatabaseService { handle: Arc, } @@ -391,6 +415,7 @@ impl MadaraBackend { chain_config, db_metrics: DbMetrics::register().unwrap(), sender_block_info: tokio::sync::broadcast::channel(100).0, + write_opt_no_wal: make_write_opt_no_wal(), _temp_dir: Some(temp_dir), }) } @@ -434,6 +459,7 @@ impl MadaraBackend { last_flush_time: Default::default(), chain_config: Arc::clone(&chain_config), sender_block_info: tokio::sync::broadcast::channel(100).0, + write_opt_no_wal: make_write_opt_no_wal(), #[cfg(feature = "testing")] _temp_dir: None, }); diff --git a/crates/client/db/src/mempool_db.rs b/crates/client/db/src/mempool_db.rs new file mode 100644 index 000000000..d55dbbabd --- /dev/null +++ b/crates/client/db/src/mempool_db.rs @@ -0,0 +1,77 @@ +use crate::DatabaseExt; +use crate::{Column, MadaraBackend, MadaraStorageError}; +use mp_class::ConvertedClass; +use rocksdb::IteratorMode; +use serde::{Deserialize, Serialize}; +use starknet_core::types::Felt; + +type Result = std::result::Result; + +#[derive(Serialize, Deserialize)] +pub struct SavedTransaction { + pub tx: mp_transactions::Transaction, + pub paid_fee_on_l1: Option, + pub contract_address: Option, + pub only_query: bool, + pub arrived_at: u128, +} + +#[derive(Serialize)] +struct TransactionWithConvertedClassRef<'a> { + tx: &'a SavedTransaction, + converted_class: &'a Option, +} +#[derive(Serialize, Deserialize)] +struct TransactionWithConvertedClass { + tx: SavedTransaction, + converted_class: Option, +} + +impl MadaraBackend { + #[tracing::instrument(skip(self), fields(module = "MempoolDB"))] + pub fn get_mempool_transactions(&self) -> Result)>> { + let col = self.db.get_column(Column::MempoolTransactions); + let txs = self + .db + .iterator_cf(&col, IteratorMode::Start) + .map(|kv| { + let (k, v) = kv?; + let hash: Felt = bincode::deserialize(&k)?; + let tx: TransactionWithConvertedClass = bincode::deserialize(&v)?; + + Result::<_>::Ok((hash, tx.tx, tx.converted_class)) + }) + .collect::, _>>()?; + tracing::debug!("get_mempool_txs {:?}", txs.iter().map(|(hash, _, _)| hash).collect::>()); + Ok(txs) + } + + #[tracing::instrument(skip(self), fields(module = "MempoolDB"))] + pub fn remove_mempool_transaction(&self, tx_hash: &Felt) -> Result<()> { + // Note: We do not use WAL here, as it will be flushed by saving the block. This is to + // ensure saving the block and removing the tx from the saved mempool are both done at once + // atomically. + + let col = self.db.get_column(Column::MempoolTransactions); + self.db.delete_cf_opt(&col, bincode::serialize(tx_hash)?, &self.write_opt_no_wal)?; + tracing::debug!("remove_mempool_tx {:?}", tx_hash); + Ok(()) + } + + #[tracing::instrument(skip(self, tx), fields(module = "MempoolDB"))] + pub fn save_mempool_transaction( + &self, + tx: &SavedTransaction, + tx_hash: Felt, + converted_class: &Option, + ) -> Result<()> { + // Note: WAL is used here + // This is because we want it to be saved even if the node crashes before the next flush + + let col = self.db.get_column(Column::MempoolTransactions); + let tx_with_class = TransactionWithConvertedClassRef { tx, converted_class }; + self.db.put_cf(&col, bincode::serialize(&tx_hash)?, bincode::serialize(&tx_with_class)?)?; + tracing::debug!("save_mempool_tx {:?}", tx_hash); + Ok(()) + } +} diff --git a/crates/client/db/src/storage_updates.rs b/crates/client/db/src/storage_updates.rs index f74ed0d4d..e34b22b46 100644 --- a/crates/client/db/src/storage_updates.rs +++ b/crates/client/db/src/storage_updates.rs @@ -15,6 +15,7 @@ impl MadaraBackend { block: MadaraMaybePendingBlock, state_diff: StateDiff, converted_classes: Vec, + visited_segments: Option)>>, ) -> Result<(), MadaraStorageError> { let block_n = block.info.block_n(); let state_diff_cpy = state_diff.clone(); @@ -23,9 +24,11 @@ impl MadaraBackend { self.clear_pending_block()?; let task_block_db = || match block.info { - MadaraMaybePendingBlockInfo::Pending(info) => { - self.block_db_store_pending(&MadaraPendingBlock { info, inner: block.inner }, &state_diff_cpy) - } + MadaraMaybePendingBlockInfo::Pending(info) => self.block_db_store_pending( + &MadaraPendingBlock { info, inner: block.inner }, + &state_diff_cpy, + visited_segments, + ), MadaraMaybePendingBlockInfo::NotPending(info) => { self.block_db_store_block(&MadaraBlock { info, inner: block.inner }, &state_diff_cpy) } diff --git a/crates/client/db/src/tests/test_block.rs b/crates/client/db/src/tests/test_block.rs index f9531b401..cd1d3a8f5 100644 --- a/crates/client/db/src/tests/test_block.rs +++ b/crates/client/db/src/tests/test_block.rs @@ -25,8 +25,8 @@ mod block_tests { let block_hash = block.info.block_hash().unwrap(); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None).unwrap(); assert_eq!(backend.resolve_block_id(&BlockId::Hash(block_hash)).unwrap().unwrap(), DbBlockId::Number(0)); assert_eq!(backend.resolve_block_id(&BlockId::Number(0)).unwrap().unwrap(), DbBlockId::Number(0)); @@ -53,7 +53,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); assert_eq!(backend.get_block_hash(&BLOCK_ID_0).unwrap().unwrap(), block.info.block_hash().unwrap()); assert_eq!(BLOCK_ID_0.resolve_db_block_id(backend).unwrap().unwrap(), BLOCK_ID_0); @@ -76,7 +76,7 @@ mod block_tests { let block = pending_block_one(); let state_diff = pending_state_diff_one(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block.info); @@ -92,8 +92,10 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![]).unwrap(); + backend + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None) + .unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None).unwrap(); backend.clear_pending_block().unwrap(); assert!(backend.get_block(&BLOCK_ID_PENDING).unwrap().unwrap().inner.transactions.is_empty()); @@ -103,11 +105,11 @@ mod block_tests { "fake pending block parent hash must match with latest block in db" ); - backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![]).unwrap(); + backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![], None).unwrap(); let block_pending = pending_block_two(); let state_diff = pending_state_diff_two(); - backend.store_block(block_pending.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block_pending.clone(), state_diff.clone(), vec![], None).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block_pending.info); @@ -121,10 +123,12 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); + backend + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None) + .unwrap(); let latest_block = finalized_block_one(); - backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![]).unwrap(); + backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![], None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); } @@ -149,7 +153,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); let tx_hash_1 = block.info.tx_hashes()[1]; assert_eq!(backend.find_tx_hash_block_info(&tx_hash_1).unwrap().unwrap(), (block.info.clone(), TxIndex(1))); @@ -161,10 +165,12 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); + backend + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None) + .unwrap(); let block_pending = pending_block_one(); - backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![]).unwrap(); + backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![], None).unwrap(); let tx_hash_1 = block_pending.info.tx_hashes()[1]; assert_eq!( diff --git a/crates/client/devnet/src/lib.rs b/crates/client/devnet/src/lib.rs index 737add303..c9f0a0d05 100644 --- a/crates/client/devnet/src/lib.rs +++ b/crates/client/devnet/src/lib.rs @@ -198,8 +198,8 @@ mod tests { use mp_class::ClassInfo; use mp_convert::felt_to_u128; use mp_receipt::{Event, ExecutionResult, FeePayment, InvokeTransactionReceipt, PriceUnit, TransactionReceipt}; - use mp_transactions::broadcasted_to_blockifier; use mp_transactions::compute_hash::calculate_contract_address; + use mp_transactions::BroadcastedTransactionExt; use rstest::{fixture, rstest}; use starknet_core::types::contract::SierraClass; use starknet_core::types::{ @@ -224,12 +224,12 @@ mod tests { mut tx: BroadcastedInvokeTransaction, contract: &DevnetPredeployedContract, ) -> Result { - let (blockifier_tx, _classes) = broadcasted_to_blockifier( - BroadcastedTransaction::Invoke(tx.clone()), - self.backend.chain_config().chain_id.to_felt(), - self.backend.chain_config().latest_protocol_version, - ) - .unwrap(); + let (blockifier_tx, _classes) = BroadcastedTransaction::Invoke(tx.clone()) + .into_blockifier( + self.backend.chain_config().chain_id.to_felt(), + self.backend.chain_config().latest_protocol_version, + ) + .unwrap(); let signature = contract.secret.sign(&transaction_hash(&blockifier_tx)).unwrap(); let tx_signature = match &mut tx { @@ -248,12 +248,12 @@ mod tests { mut tx: BroadcastedDeclareTransaction, contract: &DevnetPredeployedContract, ) -> Result { - let (blockifier_tx, _classes) = broadcasted_to_blockifier( - BroadcastedTransaction::Declare(tx.clone()), - self.backend.chain_config().chain_id.to_felt(), - self.backend.chain_config().latest_protocol_version, - ) - .unwrap(); + let (blockifier_tx, _classes) = BroadcastedTransaction::Declare(tx.clone()) + .into_blockifier( + self.backend.chain_config().chain_id.to_felt(), + self.backend.chain_config().latest_protocol_version, + ) + .unwrap(); let signature = contract.secret.sign(&transaction_hash(&blockifier_tx)).unwrap(); let tx_signature = match &mut tx { @@ -271,12 +271,12 @@ mod tests { mut tx: BroadcastedDeployAccountTransaction, contract: &DevnetPredeployedContract, ) -> Result { - let (blockifier_tx, _classes) = broadcasted_to_blockifier( - BroadcastedTransaction::DeployAccount(tx.clone()), - self.backend.chain_config().chain_id.to_felt(), - self.backend.chain_config().latest_protocol_version, - ) - .unwrap(); + let (blockifier_tx, _classes) = BroadcastedTransaction::DeployAccount(tx.clone()) + .into_blockifier( + self.backend.chain_config().chain_id.to_felt(), + self.backend.chain_config().latest_protocol_version, + ) + .unwrap(); let signature = contract.secret.sign(&transaction_hash(&blockifier_tx)).unwrap(); let tx_signature = match &mut tx { diff --git a/crates/client/eth/src/l1_messaging.rs b/crates/client/eth/src/l1_messaging.rs index 16d14ff56..00c35e878 100644 --- a/crates/client/eth/src/l1_messaging.rs +++ b/crates/client/eth/src/l1_messaging.rs @@ -5,14 +5,12 @@ use alloy::eips::BlockNumberOrTag; use alloy::primitives::{keccak256, FixedBytes, U256}; use alloy::sol_types::SolValue; use anyhow::Context; -use blockifier::transaction::transaction_execution::Transaction as BlockifierTransation; use futures::StreamExt; use mc_db::{l1_db::LastSyncedEventBlock, MadaraBackend}; use mc_mempool::{Mempool, MempoolProvider}; use mp_utils::channel_wait_or_graceful_shutdown; use starknet_api::core::{ChainId, ContractAddress, EntryPointSelector, Nonce}; -use starknet_api::transaction::{Calldata, Fee, L1HandlerTransaction, Transaction, TransactionVersion}; -use starknet_api::transaction_hash::get_transaction_hash; +use starknet_api::transaction::{Calldata, L1HandlerTransaction, TransactionVersion}; use starknet_types_core::felt::Felt; use std::sync::Arc; @@ -131,11 +129,12 @@ async fn process_l1_message( event: &LogMessageToL2, l1_block_number: &Option, event_index: &Option, - chain_id: &ChainId, + _chain_id: &ChainId, mempool: Arc, ) -> anyhow::Result> { let transaction = parse_handle_l1_message_transaction(event)?; let tx_nonce = transaction.nonce; + let fees: u128 = event.fee.try_into()?; // Ensure that L1 message has not been executed match backend.has_l1_messaging_nonce(tx_nonce) { @@ -152,16 +151,7 @@ async fn process_l1_message( } }; - let tx_hash = get_transaction_hash(&Transaction::L1Handler(transaction.clone()), chain_id, &transaction.version)?; - let blockifier_transaction = BlockifierTransation::from_api( - Transaction::L1Handler(transaction), - tx_hash, - None, - Some(Fee(event.fee.try_into()?)), - None, - false, - )?; - let res = mempool.accept_l1_handler_tx(blockifier_transaction)?; + let res = mempool.accept_l1_handler_tx(transaction.into(), fees)?; // TODO: remove unwraps // Ques: shall it panic if no block number of event_index? diff --git a/crates/client/exec/Cargo.toml b/crates/client/exec/Cargo.toml index a247a2ef3..1de9240fe 100644 --- a/crates/client/exec/Cargo.toml +++ b/crates/client/exec/Cargo.toml @@ -22,6 +22,8 @@ mp-block = { workspace = true } mp-chain-config = { workspace = true } mp-class = { workspace = true } mp-convert = { workspace = true } +mp-receipt = { workspace = true } +mp-transactions = { workspace = true } # Starknet blockifier = { workspace = true } diff --git a/crates/client/exec/src/lib.rs b/crates/client/exec/src/lib.rs index 2bc1880b7..8581156c2 100644 --- a/crates/client/exec/src/lib.rs +++ b/crates/client/exec/src/lib.rs @@ -16,6 +16,7 @@ mod call; pub mod execution; mod fee; mod trace; +// pub mod transaction; pub use block_context::ExecutionContext; pub use blockifier_state_adapter::BlockifierStateAdapter; diff --git a/crates/client/exec/src/transaction.rs b/crates/client/exec/src/transaction.rs new file mode 100644 index 000000000..744921c0d --- /dev/null +++ b/crates/client/exec/src/transaction.rs @@ -0,0 +1,79 @@ +use std::{borrow::Cow, sync::Arc}; + +use blockifier::execution::{contract_class::ClassInfo, errors::ContractClassError}; +use blockifier::transaction::transaction_execution as btx; +use mc_db::{MadaraBackend, MadaraStorageError}; +use mp_block::BlockId; +use mp_class::compile::ClassCompilationError; +use mp_convert::ToFelt; +use starknet_api::transaction::{Transaction, TransactionHash}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Class not found")] + ClassNotFound, + #[error(transparent)] + Storage(#[from] MadaraStorageError), + #[error("Class compilation error: {0:#}")] + ClassCompilationError(#[from] ClassCompilationError), + #[error("Contract class error: {0:#}")] + ContractClassError(#[from] ContractClassError), + #[error("{0}")] + Internal(Cow<'static, str>), + TransactionExecutionError(#[from] ) +} + +/// Convert an starknet-api Transaction to a blockifier Transaction +/// +/// **note:** this function does not support deploy transaction +/// because it is not supported by blockifier +pub fn to_blockifier_transactions( + backend: Arc, + block_id: BlockId, + transaction: mp_transactions::Transaction, + tx_hash: &TransactionHash, +) -> Result { + let transaction: Transaction = + transaction.try_into().map_err(|_| Error("Converting to starknet api transaction".into()))?; + + let paid_fee_on_l1 = match transaction { + Transaction::L1Handler(_) => Some(starknet_api::transaction::Fee(1_000_000_000_000)), + _ => None, + }; + + let class_info = match &transaction { + Transaction::Declare(declare_tx) => { + let class_hash = declare_tx.class_hash(); + let class_info = backend.get_class_info(&block_id, &class_hash.to_felt())?.ok_or(Error::ClassNotFound)?; + + match class_info { + mp_class::ClassInfo::Sierra(info) => { + let compiled_class = + backend.get_sierra_compiled(&block_id, &info.compiled_class_hash)?.ok_or_else(|| { + Error::Internal( + "Inconsistent state: compiled sierra class from class_hash '{class_hash}' not found" + .into(), + ) + })?; + + let blockifier_class = compiled_class.to_blockifier_class()?; + Some(ClassInfo::new( + &blockifier_class, + info.contract_class.program_length(), + info.contract_class.abi_length(), + )?) + } + mp_class::ClassInfo::Legacy(info) => { + let blockifier_class = info.contract_class.to_blockifier_class()?; + Some(ClassInfo::new(&blockifier_class, 0, 0)?) + } + } + } + _ => None, + }; + + btx::Transaction::from_api(transaction.clone(), *tx_hash, class_info, paid_fee_on_l1, None, false).map_err(|_| { + tracing::error!("Failed to convert transaction to blockifier transaction"); + StarknetRpcApiError::InternalServerError + }) +} diff --git a/crates/client/mempool/src/inner/mod.rs b/crates/client/mempool/src/inner/mod.rs index b1599a945..773ea75fa 100644 --- a/crates/client/mempool/src/inner/mod.rs +++ b/crates/client/mempool/src/inner/mod.rs @@ -6,8 +6,10 @@ use blockifier::transaction::account_transaction::AccountTransaction; use blockifier::transaction::transaction_execution::Transaction; use deployed_contracts::DeployedContracts; +use mp_convert::ToFelt; use nonce_chain::{InsertedPosition, NonceChain, NonceChainNewState, ReplacedState}; use starknet_api::core::ContractAddress; +use starknet_core::types::Felt; use std::{ cmp, collections::{hash_map, BTreeSet, HashMap}, @@ -24,7 +26,7 @@ pub use tx::*; #[derive(Clone, Debug)] struct AccountOrderedByTimestamp { - contract_addr: ContractAddress, + contract_addr: Felt, timestamp: ArrivedAtTimestamp, } @@ -54,7 +56,7 @@ impl PartialOrd for AccountOrderedByTimestamp { /// - See [`NonceChain`] invariants. pub(crate) struct MempoolInner { /// We have one nonce chain per contract address. - nonce_chains: HashMap, + nonce_chains: HashMap, /// FCFS queue. tx_queue: BTreeSet, deployed_contracts: DeployedContracts, @@ -88,7 +90,7 @@ impl MempoolInner { for (k, v) in &self.nonce_chains { assert!(tx_queue.remove(&AccountOrderedByTimestamp { contract_addr: *k, timestamp: v.front_arrived_at })) } - assert!(tx_queue.is_empty()); + assert_eq!(tx_queue, Default::default()); let mut deployed_contracts = self.deployed_contracts.clone(); for (contract, _) in self.nonce_chains.values().flat_map(|chain| &chain.transactions) { if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &contract.0.tx { @@ -110,7 +112,7 @@ impl MempoolInner { self.limiter.check_insert_limits(&limits_for_tx)?; } - let contract_addr = mempool_tx.contract_address(); + let contract_addr = mempool_tx.contract_address().to_felt(); let arrived_at = mempool_tx.arrived_at; let deployed_contract_address = if let Transaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) = &mempool_tx.tx { @@ -211,11 +213,16 @@ impl MempoolInner { // too bad there's no first_entry api, we should check if hashbrown has it to avoid the double lookup. while let Some(tx_queue_account) = self.tx_queue.first() { let tx_queue_account = tx_queue_account.clone(); // clone is cheap for this struct - let mempool_tx = self.pop_tx_queue_account(&tx_queue_account); - - if self.limiter.tx_age_exceeded(&TransactionCheckedLimits::limits_for(&mempool_tx)) { - let _res = self.tx_queue.pop_first().expect("cannot be empty, checked just above"); - self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&mempool_tx)); + let nonce_chain = self + .nonce_chains + .get_mut(&tx_queue_account.contract_addr) + .expect("Nonce chain does not match tx queue"); + let (k, _v) = nonce_chain.transactions.first_key_value().expect("Nonce chain without a tx"); + + if self.limiter.tx_age_exceeded(&TransactionCheckedLimits::limits_for(&k.0)) { + let tx = self.pop_tx_queue_account(&tx_queue_account); + let _res = self.tx_queue.pop_first().expect("Cannot be empty, checked just above"); + self.limiter.mark_removed(&TransactionCheckedLimits::limits_for(&tx)); } else { break; } diff --git a/crates/client/mempool/src/inner/nonce_chain.rs b/crates/client/mempool/src/inner/nonce_chain.rs index 72bb77222..4f48b3ec3 100644 --- a/crates/client/mempool/src/inner/nonce_chain.rs +++ b/crates/client/mempool/src/inner/nonce_chain.rs @@ -88,10 +88,10 @@ impl NonceChain { // double lookup here unfortunately.. that's because we're using the keys in a hacky way and can't update the // entry key using the entry api. let mempool_tx = OrderMempoolTransactionByNonce(mempool_tx); - if let Some((previous, _)) = self.transactions.get_key_value(&mempool_tx) { + if let Some((previous, _)) = self.transactions.remove_entry(&mempool_tx) { let previous = previous.0.clone(); let inserted = self.transactions.insert(mempool_tx, ()); - debug_assert!(inserted.is_some()); + debug_assert!(inserted.is_none()); ReplacedState::Replaced { previous } } else { let inserted = self.transactions.insert(mempool_tx, ()); diff --git a/crates/client/mempool/src/lib.rs b/crates/client/mempool/src/lib.rs index c9330d716..038927116 100644 --- a/crates/client/mempool/src/lib.rs +++ b/crates/client/mempool/src/lib.rs @@ -1,9 +1,10 @@ +use anyhow::Context; use blockifier::blockifier::stateful_validator::StatefulValidatorError; use blockifier::transaction::account_transaction::AccountTransaction; use blockifier::transaction::transaction_execution::Transaction; use blockifier::transaction::transactions::DeployAccountTransaction; use blockifier::transaction::transactions::InvokeTransaction; -use blockifier::transaction::transactions::{DeclareTransaction, L1HandlerTransaction}; +use blockifier::transaction::transactions::{DeclareTransaction, L1HandlerTransaction as BL1HandlerTransaction}; use header::make_pending_header; use inner::MempoolInner; use mc_db::db_block_id::DbBlockId; @@ -16,10 +17,11 @@ use mp_block::BlockTag; use mp_block::MadaraPendingBlockInfo; use mp_class::ConvertedClass; use mp_convert::ToFelt; -use mp_transactions::{ - broadcasted_declare_v0_to_blockifier, broadcasted_to_blockifier, BroadcastedDeclareTransactionV0, -}; -use mp_transactions::{BroadcastedToBlockifierError, L1HandlerTransactionResult}; +use mp_transactions::BroadcastedDeclareTransactionV0; +use mp_transactions::BroadcastedToBlockifierError; +use mp_transactions::BroadcastedTransactionExt; +use mp_transactions::L1HandlerTransaction; +use mp_transactions::L1HandlerTransactionResult; use starknet_api::core::{ContractAddress, Nonce}; use starknet_api::transaction::TransactionHash; use starknet_core::types::BroadcastedDeclareTransaction; @@ -32,6 +34,9 @@ use starknet_core::types::InvokeTransactionResult; use starknet_types_core::felt::Felt; use std::sync::Arc; use std::sync::RwLock; +use std::time::SystemTime; +use tx::blockifier_to_saved_tx; +use tx::saved_to_blockifier_tx; pub use inner::TxInsersionError; pub use inner::{ArrivedAtTimestamp, MempoolTransaction}; @@ -43,6 +48,7 @@ pub mod header; mod inner; mod l1; pub mod metrics; +mod tx; pub use inner::*; @@ -74,7 +80,11 @@ pub trait MempoolProvider: Send + Sync { &self, tx: BroadcastedDeployAccountTransaction, ) -> Result; - fn accept_l1_handler_tx(&self, tx: Transaction) -> Result; + fn accept_l1_handler_tx( + &self, + tx: L1HandlerTransaction, + paid_fees_on_l1: u128, + ) -> Result; fn take_txs_chunk + 'static>(&self, dest: &mut I, n: usize) where Self: Sized; @@ -108,11 +118,30 @@ impl Mempool { } } - #[tracing::instrument(skip(self), fields(module = "Mempool"))] - fn accept_tx(&self, tx: Transaction, converted_class: Option) -> Result<(), Error> { - // The timestamp *does not* take the transaction validation time into account. - let arrived_at = ArrivedAtTimestamp::now(); + pub fn load_txs_from_db(&mut self) -> Result<(), anyhow::Error> { + for (tx_hash, saved_tx, converted_class) in + self.backend.get_mempool_transactions().context("Getting mempool transactions")? + { + let (tx, arrived_at) = saved_to_blockifier_tx(saved_tx, tx_hash, &converted_class) + .context("Converting saved tx to blockifier")?; + + if let Err(err) = self.accept_tx(tx, converted_class, arrived_at) { + match err { + Error::InnerMempool(TxInsersionError::Limit(MempoolLimitReached::Age { .. })) => {} // do nothing + err => log::warn!("Could not re-add mempool transaction from db: {err:#}"), + } + } + } + Ok(()) + } + #[tracing::instrument(skip(self), fields(module = "Mempool"))] + fn accept_tx( + &self, + tx: Transaction, + converted_class: Option, + arrived_at: SystemTime, + ) -> Result<(), Error> { // Get pending block. let pending_block_info = if let Some(block) = self.backend.get_block_info(&DbBlockId::Pending)? { block @@ -143,7 +172,8 @@ impl Mempool { None }; - tracing::debug!("Mempool verify tx_hash={:#x}", tx_hash(&tx).to_felt()); + let tx_hash = tx_hash(&tx).to_felt(); + tracing::debug!("Mempool verify tx_hash={:#x}", tx_hash); // Perform validations let exec_context = ExecutionContext::new_in_block(Arc::clone(&self.backend), &pending_block_info)?; @@ -154,16 +184,20 @@ impl Mempool { } if !is_only_query(&tx) { - tracing::debug!("Adding to mempool tx_hash={:#x}", tx_hash(&tx).to_felt()); - // Finally, add it to the nonce chain for the account nonce + tracing::debug!("Adding to inner mempool tx_hash={:#x}", tx_hash); + // Add to db + let saved_tx = blockifier_to_saved_tx(&tx, arrived_at); + self.backend.save_mempool_transaction(&saved_tx, tx_hash, &converted_class)?; + + // Add it to the inner mempool let force = false; self.inner .write() .expect("Poisoned lock") - .insert_tx(MempoolTransaction { tx, arrived_at, converted_class }, force)? - } + .insert_tx(MempoolTransaction { tx, arrived_at, converted_class }, force)?; - self.metrics.accepted_transaction_counter.add(1, &[]); + self.metrics.accepted_transaction_counter.add(1, &[]); + } Ok(()) } @@ -202,53 +236,50 @@ fn deployed_contract_address(tx: &Transaction) -> Option { impl MempoolProvider for Mempool { #[tracing::instrument(skip(self), fields(module = "Mempool"))] fn accept_invoke_tx(&self, tx: BroadcastedInvokeTransaction) -> Result { - let (tx, classes) = broadcasted_to_blockifier( - BroadcastedTransaction::Invoke(tx), - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; - - let res = InvokeTransactionResult { transaction_hash: transaction_hash(&tx) }; - self.accept_tx(tx, classes)?; + let tx = BroadcastedTransaction::Invoke(tx); + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; + + let res = InvokeTransactionResult { transaction_hash: transaction_hash(&btx) }; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] fn accept_declare_v0_tx(&self, tx: BroadcastedDeclareTransactionV0) -> Result { - let (tx, classes) = broadcasted_declare_v0_to_blockifier( - tx, - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = DeclareTransactionResult { - transaction_hash: transaction_hash(&tx), - class_hash: declare_class_hash(&tx).expect("Created transaction should be declare"), + transaction_hash: transaction_hash(&btx), + class_hash: declare_class_hash(&btx).expect("Created transaction should be declare"), }; - - self.accept_tx(tx, classes)?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } - fn accept_l1_handler_tx(&self, tx: Transaction) -> Result { - let res = L1HandlerTransactionResult { transaction_hash: transaction_hash(&tx) }; - self.accept_tx(tx, None)?; + #[tracing::instrument(skip(self), fields(module = "Mempool"))] + fn accept_l1_handler_tx( + &self, + tx: L1HandlerTransaction, + paid_fees_on_l1: u128, + ) -> Result { + let (btx, class) = + tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version, paid_fees_on_l1)?; + + let res = L1HandlerTransactionResult { transaction_hash: transaction_hash(&btx) }; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } #[tracing::instrument(skip(self), fields(module = "Mempool"))] fn accept_declare_tx(&self, tx: BroadcastedDeclareTransaction) -> Result { - let (tx, classes) = broadcasted_to_blockifier( - BroadcastedTransaction::Declare(tx), - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; + let tx = BroadcastedTransaction::Declare(tx); + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = DeclareTransactionResult { - transaction_hash: transaction_hash(&tx), - class_hash: declare_class_hash(&tx).expect("Created transaction should be declare"), + transaction_hash: transaction_hash(&btx), + class_hash: declare_class_hash(&btx).expect("Created transaction should be declare"), }; - self.accept_tx(tx, classes)?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } @@ -257,17 +288,14 @@ impl MempoolProvider for Mempool { &self, tx: BroadcastedDeployAccountTransaction, ) -> Result { - let (tx, classes) = broadcasted_to_blockifier( - BroadcastedTransaction::DeployAccount(tx), - self.chain_id(), - self.backend.chain_config().latest_protocol_version, - )?; + let tx = BroadcastedTransaction::DeployAccount(tx); + let (btx, class) = tx.into_blockifier(self.chain_id(), self.backend.chain_config().latest_protocol_version)?; let res = DeployAccountTransactionResult { - transaction_hash: transaction_hash(&tx), - contract_address: deployed_contract_address(&tx).expect("Created transaction should be deploy account"), + transaction_hash: transaction_hash(&btx), + contract_address: deployed_contract_address(&btx).expect("Created transaction should be deploy account"), }; - self.accept_tx(tx, classes)?; + self.accept_tx(btx, class, ArrivedAtTimestamp::now())?; Ok(res) } @@ -368,7 +396,7 @@ pub(crate) fn clone_transaction(tx: &Transaction) -> Transaction { only_query: tx.only_query, }), }), - Transaction::L1HandlerTransaction(tx) => Transaction::L1HandlerTransaction(L1HandlerTransaction { + Transaction::L1HandlerTransaction(tx) => Transaction::L1HandlerTransaction(BL1HandlerTransaction { tx: tx.tx.clone(), tx_hash: tx.tx_hash, paid_fee_on_l1: tx.paid_fee_on_l1, @@ -378,7 +406,7 @@ pub(crate) fn clone_transaction(tx: &Transaction) -> Transaction { #[cfg(test)] mod test { - use crate::{inner::MempoolLimits, Mempool, MockL1DataProvider}; + use crate::{inner::MempoolLimits, ArrivedAtTimestamp, Mempool, MockL1DataProvider}; use starknet_core::types::Felt; use std::sync::Arc; @@ -438,7 +466,7 @@ mod test { tx_account_v0_valid: blockifier::transaction::transaction_execution::Transaction, ) { let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); - let result = mempool.accept_tx(tx_account_v0_valid, None); + let result = mempool.accept_tx(tx_account_v0_valid, None, ArrivedAtTimestamp::now()); assert_matches::assert_matches!(result, Ok(())); } @@ -449,7 +477,7 @@ mod test { tx_account_v1_invalid: blockifier::transaction::transaction_execution::Transaction, ) { let mempool = Mempool::new(backend, l1_data_provider, MempoolLimits::for_testing()); - let result = mempool.accept_tx(tx_account_v1_invalid, None); + let result = mempool.accept_tx(tx_account_v1_invalid, None, ArrivedAtTimestamp::now()); assert_matches::assert_matches!(result, Err(crate::Error::Validation(_))); } } diff --git a/crates/client/mempool/src/tx.rs b/crates/client/mempool/src/tx.rs new file mode 100644 index 000000000..420e6cc2c --- /dev/null +++ b/crates/client/mempool/src/tx.rs @@ -0,0 +1,129 @@ +use std::time::{Duration, SystemTime}; + +use blockifier::{ + execution::{contract_class::ClassInfo as BClassInfo, errors::ContractClassError}, + transaction::{ + account_transaction::AccountTransaction, + errors::TransactionExecutionError, + transaction_execution::Transaction as BTransaction, + transactions::{DeclareTransaction, DeployAccountTransaction, InvokeTransaction, L1HandlerTransaction}, + }, +}; +use mc_db::mempool_db::SavedTransaction; +use mp_class::{compile::ClassCompilationError, ConvertedClass}; +use mp_convert::ToFelt; +use starknet_api::{ + core::ContractAddress, + transaction::{Fee, Transaction as StarknetApiTransaction, TransactionHash}, +}; +use starknet_core::types::Felt; + +pub fn blockifier_to_saved_tx(tx: &BTransaction, arrived_at: SystemTime) -> SavedTransaction { + let arrived_at = arrived_at.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default().as_millis(); + match tx { + BTransaction::AccountTransaction(AccountTransaction::Declare(tx)) => SavedTransaction { + only_query: tx.only_query(), + tx: StarknetApiTransaction::Declare(tx.tx.clone()).into(), + paid_fee_on_l1: None, + contract_address: None, + arrived_at, + }, + BTransaction::AccountTransaction(AccountTransaction::DeployAccount(tx)) => SavedTransaction { + only_query: tx.only_query, + tx: StarknetApiTransaction::DeployAccount(tx.tx.clone()).into(), + paid_fee_on_l1: None, + contract_address: Some(tx.contract_address.to_felt()), + arrived_at, + }, + BTransaction::AccountTransaction(AccountTransaction::Invoke(tx)) => SavedTransaction { + only_query: tx.only_query, + tx: StarknetApiTransaction::Invoke(tx.tx.clone()).into(), + paid_fee_on_l1: None, + contract_address: None, + arrived_at, + }, + BTransaction::L1HandlerTransaction(tx) => SavedTransaction { + only_query: false, + tx: StarknetApiTransaction::L1Handler(tx.tx.clone()).into(), + paid_fee_on_l1: Some(*tx.paid_fee_on_l1), + contract_address: None, + arrived_at, + }, + } +} + +#[derive(Debug, thiserror::Error)] +pub enum SavedToBlockifierTxError { + #[error(transparent)] + Blockifier(#[from] TransactionExecutionError), + #[error("Missing field {0}")] + MissingField(&'static str), + #[error("Invalid contract address")] + InvalidContractAddress, + #[error("Deploy not supported")] + DeployNotSupported, + #[error("Converting class {0:#}")] + ClassCompilationError(#[from] ClassCompilationError), + #[error("Converting class {0:#}")] + ContractClassError(#[from] ContractClassError), +} + +pub fn saved_to_blockifier_tx( + saved_tx: SavedTransaction, + tx_hash: Felt, + converted_class: &Option, +) -> Result<(BTransaction, SystemTime), SavedToBlockifierTxError> { + let tx_hash = TransactionHash(tx_hash); + let arrived_at = SystemTime::UNIX_EPOCH + Duration::from_millis(saved_tx.arrived_at as u64); + let tx = match saved_tx.tx { + mp_transactions::Transaction::L1Handler(tx) => BTransaction::L1HandlerTransaction(L1HandlerTransaction { + tx: tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + tx_hash, + paid_fee_on_l1: Fee(saved_tx + .paid_fee_on_l1 + .ok_or(SavedToBlockifierTxError::MissingField("paid_fee_on_l1"))?), + }), + mp_transactions::Transaction::Declare(tx) => { + let converted_class = + converted_class.as_ref().ok_or(SavedToBlockifierTxError::MissingField("class_info"))?; + + let class_info = match converted_class { + ConvertedClass::Legacy(class) => { + BClassInfo::new(&class.info.contract_class.to_blockifier_class()?, 0, 0)? + } + ConvertedClass::Sierra(class) => BClassInfo::new( + &class.compiled.to_blockifier_class()?, + class.info.contract_class.sierra_program.len(), + class.info.contract_class.abi.len(), + )?, + }; + let tx = tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?; + let declare_tx = match saved_tx.only_query { + true => DeclareTransaction::new_for_query(tx, tx_hash, class_info)?, + false => DeclareTransaction::new(tx, tx_hash, class_info)?, + }; + BTransaction::AccountTransaction(AccountTransaction::Declare(declare_tx)) + } + mp_transactions::Transaction::DeployAccount(tx) => { + BTransaction::AccountTransaction(AccountTransaction::DeployAccount(DeployAccountTransaction { + tx: tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + tx_hash, + contract_address: ContractAddress::try_from( + saved_tx.contract_address.ok_or(SavedToBlockifierTxError::MissingField("contract_address"))?, + ) + .map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + only_query: saved_tx.only_query, + })) + } + mp_transactions::Transaction::Invoke(tx) => { + BTransaction::AccountTransaction(AccountTransaction::Invoke(InvokeTransaction { + tx: tx.try_into().map_err(|_| SavedToBlockifierTxError::InvalidContractAddress)?, + tx_hash, + only_query: saved_tx.only_query, + })) + } + mp_transactions::Transaction::Deploy(_) => return Err(SavedToBlockifierTxError::DeployNotSupported), + }; + + Ok((tx, arrived_at)) +} diff --git a/crates/client/rpc/src/errors.rs b/crates/client/rpc/src/errors.rs index c034e42e8..1cd331224 100644 --- a/crates/client/rpc/src/errors.rs +++ b/crates/client/rpc/src/errors.rs @@ -1,9 +1,9 @@ -use std::borrow::Cow; -use std::fmt::Display; use mc_db::MadaraStorageError; use serde_json::json; use starknet_api::StarknetApiError; use starknet_core::types::StarknetError; +use std::borrow::Cow; +use std::fmt::Display; use crate::utils::display_internal_server_error; diff --git a/crates/client/rpc/src/test_utils.rs b/crates/client/rpc/src/test_utils.rs index def5ef190..dcb725225 100644 --- a/crates/client/rpc/src/test_utils.rs +++ b/crates/client/rpc/src/test_utils.rs @@ -221,6 +221,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, ) .unwrap(); @@ -244,6 +245,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, ) .unwrap(); @@ -311,6 +313,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, ) .unwrap(); @@ -347,6 +350,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], + None, ) .unwrap(); } @@ -512,6 +516,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[0].clone(), vec![], + None, ) .unwrap(); @@ -534,6 +539,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[1].clone(), vec![], + None, ) .unwrap(); @@ -556,6 +562,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[2].clone(), vec![], + None, ) .unwrap(); @@ -575,6 +582,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[3].clone(), vec![], + None, ) .unwrap(); } diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs b/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs index b616c40f1..fe0dac6c2 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs @@ -52,6 +52,7 @@ mod tests { }, StateDiff::default(), vec![], + None, ) .unwrap(); @@ -69,6 +70,7 @@ mod tests { }, StateDiff::default(), vec![], + None, ) .unwrap(); @@ -89,6 +91,7 @@ mod tests { }, StateDiff::default(), vec![], + None, ) .unwrap(); @@ -116,6 +119,7 @@ mod tests { }, StateDiff::default(), vec![], + None, ) .unwrap(); diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/read/estimate_fee.rs b/crates/client/rpc/src/versions/v0_7_1/methods/read/estimate_fee.rs index c981e283b..1b62cd751 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/read/estimate_fee.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/read/estimate_fee.rs @@ -1,9 +1,9 @@ use std::sync::Arc; +use mp_transactions::BroadcastedTransactionExt; use starknet_core::types::{BlockId, BroadcastedTransaction, FeeEstimate, SimulationFlagForEstimateFee}; use mc_exec::ExecutionContext; -use mp_transactions::broadcasted_to_blockifier; use crate::errors::StarknetRpcApiError; use crate::errors::StarknetRpcResult; @@ -38,7 +38,7 @@ pub async fn estimate_fee( let transactions = request .into_iter() - .map(|tx| broadcasted_to_blockifier(tx, starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) + .map(|tx| tx.into_blockifier(starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) .collect::, _>>() .or_internal_server_error("Failed to convert BroadcastedTransaction to AccountTransaction")?; diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs b/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs index dd7bb76a8..62e35e13d 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs @@ -231,6 +231,7 @@ mod tests { }, StateDiff::default(), vec![], + None, ) .unwrap(); diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/trace/simulate_transactions.rs b/crates/client/rpc/src/versions/v0_7_1/methods/trace/simulate_transactions.rs index eef9af8b0..4163320c4 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/trace/simulate_transactions.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/trace/simulate_transactions.rs @@ -3,7 +3,7 @@ use crate::errors::{StarknetRpcApiError, StarknetRpcResult}; use crate::utils::ResultExt; use crate::Starknet; use mc_exec::{execution_result_to_tx_trace, ExecutionContext}; -use mp_transactions::broadcasted_to_blockifier; +use mp_transactions::BroadcastedTransactionExt; use starknet_core::types::{BlockId, BroadcastedTransaction, SimulatedTransaction, SimulationFlag}; use std::sync::Arc; @@ -26,7 +26,7 @@ pub async fn simulate_transactions( let user_transactions = transactions .into_iter() - .map(|tx| broadcasted_to_blockifier(tx, starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) + .map(|tx| tx.into_blockifier(starknet.chain_id(), starknet_version).map(|(tx, _)| tx)) .collect::, _>>() .or_internal_server_error("Failed to convert broadcasted transaction to blockifier")?; diff --git a/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs b/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs index a8ead391d..e088b7961 100644 --- a/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs +++ b/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs @@ -167,6 +167,7 @@ mod test { }, mp_state_update::StateDiff::default(), vec![], + None, ) .expect("Storing block"); diff --git a/crates/client/sync/src/fetch/fetchers.rs b/crates/client/sync/src/fetch/fetchers.rs index 3f2e26d6a..cd81b56c1 100644 --- a/crates/client/sync/src/fetch/fetchers.rs +++ b/crates/client/sync/src/fetch/fetchers.rs @@ -291,6 +291,7 @@ fn convert_sequencer_block_pending( .collect(), transactions: block.transactions.into_iter().map(Into::into).collect(), declared_classes: class_update.into_iter().map(Into::into).collect(), + ..Default::default() }) } diff --git a/crates/node/src/cli/chain_config_overrides.rs b/crates/node/src/cli/chain_config_overrides.rs index 76450b4a2..08c946358 100644 --- a/crates/node/src/cli/chain_config_overrides.rs +++ b/crates/node/src/cli/chain_config_overrides.rs @@ -48,7 +48,7 @@ pub struct ChainConfigOverridesInner { pub private_key: ZeroingPrivateKey, pub mempool_tx_limit: usize, pub mempool_declare_tx_limit: usize, - #[serde(deserialize_with = "deserialize_duration")] + #[serde(deserialize_with = "deserialize_duration", serialize_with = "serialize_duration")] pub mempool_tx_max_age: Duration, } diff --git a/crates/node/src/cli/mod.rs b/crates/node/src/cli/mod.rs index f08fc99b5..08df9a477 100644 --- a/crates/node/src/cli/mod.rs +++ b/crates/node/src/cli/mod.rs @@ -9,6 +9,7 @@ pub mod sync; pub mod telemetry; use crate::cli::l1::L1SyncParams; use analytics::AnalyticsParams; +use anyhow::Context; pub use block_production::*; pub use chain_config_overrides::*; pub use db::*; @@ -130,10 +131,8 @@ impl RunCmd { // Read from the preset if provided (Some(preset), _, _) => ChainConfig::from(preset), // Read the config path if provided - (_, Some(path), _) => ChainConfig::from_yaml(path).map_err(|err| { - tracing::error!("Failed to load config from YAML at path '{}': {}", path.display(), err); - anyhow::anyhow!("Failed to load chain config from file") - })?, + (_, Some(path), _) => ChainConfig::from_yaml(path) + .with_context(|| format!("Failed to load config from YAML at path '{}'", path.display()))?, // Devnet default preset is Devnet if not provided by CLI (_, _, true) => ChainConfig::from(&ChainPreset::Devnet), _ => { diff --git a/crates/node/src/main.rs b/crates/node/src/main.rs index 2392f7ba5..08b92b9d1 100644 --- a/crates/node/src/main.rs +++ b/crates/node/src/main.rs @@ -111,11 +111,13 @@ async fn main() -> anyhow::Result<()> { let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); // declare mempool here so that it can be used to process l1->l2 messages in the l1 service - let mempool = Arc::new(Mempool::new( + let mut mempool = Mempool::new( Arc::clone(db_service.backend()), Arc::clone(&l1_data_provider), MempoolLimits::new(&chain_config), - )); + ); + mempool.load_txs_from_db().context("Loading mempool transactions")?; + let mempool = Arc::new(mempool); let l1_service = L1SyncService::new( &run_cmd.l1_sync_params, diff --git a/crates/node/src/service/sync.rs b/crates/node/src/service/sync.rs index de0513968..d86adda9e 100644 --- a/crates/node/src/service/sync.rs +++ b/crates/node/src/service/sync.rs @@ -32,7 +32,7 @@ impl SyncService { ) -> anyhow::Result { let fetch_config = config.block_fetch_config(chain_config.chain_id.clone(), chain_config.clone()); - tracing::info!("🛰️ Using feeder gateway URL: {}", fetch_config.feeder_gateway.as_str()); + tracing::info!("🛰️ Using feeder gateway URL: {}", fetch_config.feeder_gateway.as_str()); Ok(Self { db_backend: Arc::clone(db.backend()), diff --git a/crates/primitives/class/src/lib.rs b/crates/primitives/class/src/lib.rs index d84b73b1f..f403ca4d9 100644 --- a/crates/primitives/class/src/lib.rs +++ b/crates/primitives/class/src/lib.rs @@ -8,7 +8,7 @@ pub mod compile; pub mod convert; mod into_starknet_core; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub enum ConvertedClass { Legacy(LegacyConvertedClass), Sierra(SierraConvertedClass), diff --git a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs index 8049fa4d1..f02b36d6a 100644 --- a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs +++ b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs @@ -1,16 +1,134 @@ -use std::sync::Arc; - -use crate::{ - into_starknet_api::TransactionApiError, BroadcastedDeclareTransactionV0, Transaction, TransactionWithHash, +use crate::{into_starknet_api::TransactionApiError, L1HandlerTransaction, Transaction, TransactionWithHash}; +use crate::{BroadcastedDeclareTransactionV0, DeclareTransaction}; +use blockifier::{ + execution::contract_class::ClassInfo as BClassInfo, transaction::transaction_execution::Transaction as BTransaction, }; +use starknet_core::types::{ + BroadcastedDeclareTransaction, BroadcastedDeployAccountTransaction, BroadcastedInvokeTransaction, + BroadcastedTransaction, +}; + use blockifier::{execution::errors::ContractClassError, transaction::errors::TransactionExecutionError}; use mp_chain_config::StarknetVersion; use mp_class::{ - compile::ClassCompilationError, CompressedLegacyContractClass, ConvertedClass, FlattenedSierraClass, - LegacyClassInfo, LegacyConvertedClass, SierraClassInfo, SierraConvertedClass, + class_hash::ComputeClassHashError, compile::ClassCompilationError, CompressedLegacyContractClass, ConvertedClass, + FlattenedSierraClass, LegacyClassInfo, LegacyConvertedClass, SierraClassInfo, SierraConvertedClass, }; -use starknet_api::transaction::TransactionHash; +use starknet_api::transaction::{Fee, TransactionHash}; use starknet_types_core::felt::Felt; +use std::sync::Arc; + +pub trait BroadcastedTransactionExt { + fn into_blockifier( + self, + chain_id: Felt, + starknet_version: StarknetVersion, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError>; +} + +pub fn is_query(tx: &BroadcastedTransaction) -> bool { + match tx { + BroadcastedTransaction::Invoke(tx) => match tx { + BroadcastedInvokeTransaction::V1(tx) => tx.is_query, + BroadcastedInvokeTransaction::V3(tx) => tx.is_query, + }, + BroadcastedTransaction::Declare(tx) => match tx { + BroadcastedDeclareTransaction::V1(tx) => tx.is_query, + BroadcastedDeclareTransaction::V2(tx) => tx.is_query, + BroadcastedDeclareTransaction::V3(tx) => tx.is_query, + }, + BroadcastedTransaction::DeployAccount(tx) => match tx { + BroadcastedDeployAccountTransaction::V1(tx) => tx.is_query, + BroadcastedDeployAccountTransaction::V3(tx) => tx.is_query, + }, + } +} + +impl BroadcastedTransactionExt for BroadcastedTransaction { + fn into_blockifier( + self, + chain_id: Felt, + starknet_version: StarknetVersion, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError> { + let (class_info, converted_class, class_hash) = match &self { + BroadcastedTransaction::Declare(tx) => match tx { + BroadcastedDeclareTransaction::V1(tx) => { + handle_class_legacy(Arc::new((*tx.contract_class).clone().into()))? + } + BroadcastedDeclareTransaction::V2(tx) => { + handle_class_sierra(Arc::new((*tx.contract_class).clone().into()), tx.compiled_class_hash)? + } + BroadcastedDeclareTransaction::V3(tx) => { + handle_class_sierra(Arc::new((*tx.contract_class).clone().into()), tx.compiled_class_hash)? + } + }, + _ => (None, None, None), + }; + + let is_query = is_query(&self); + let TransactionWithHash { transaction, hash } = + TransactionWithHash::from_broadcasted(self, chain_id, starknet_version, class_hash); + let deployed_address = match &transaction { + Transaction::DeployAccount(tx) => Some(tx.calculate_contract_address()), + _ => None, + }; + let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; + + Ok(( + BTransaction::from_api( + transaction, + TransactionHash(hash), + class_info, + None, + deployed_address.map(|address| address.try_into().expect("Address conversion should never fail")), + is_query, + )?, + converted_class, + )) + } +} + +impl L1HandlerTransaction { + pub fn into_blockifier( + self, + chain_id: Felt, + _starknet_version: StarknetVersion, + paid_fees_on_l1: u128, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError> { + let transaction = Transaction::L1Handler(self.clone()); + // TODO: check self.version + let hash = self.compute_hash(chain_id, false, false); + let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; + + Ok(( + BTransaction::from_api(transaction, TransactionHash(hash), None, Some(Fee(paid_fees_on_l1)), None, false)?, + None, + )) + } +} + +impl BroadcastedDeclareTransactionV0 { + pub fn into_blockifier( + self, + chain_id: Felt, + starknet_version: StarknetVersion, + ) -> Result<(BTransaction, Option), BroadcastedToBlockifierError> { + let (class_info, converted_class, class_hash) = handle_class_legacy(Arc::clone(&self.contract_class))?; + + let is_query = self.is_query; + let transaction = Transaction::Declare(DeclareTransaction::from_broadcasted_declare_v0( + self, + class_hash.expect("Class hash must be provided for DeclareTransaction"), + )); + let hash = transaction.compute_hash(chain_id, starknet_version, is_query); + let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; + + Ok(( + BTransaction::from_api(transaction, TransactionHash(hash), class_info, None, None, is_query)?, + converted_class, + )) + } +} #[derive(thiserror::Error, Debug)] pub enum BroadcastedToBlockifierError { @@ -19,182 +137,58 @@ pub enum BroadcastedToBlockifierError { #[error("Failed to convert program: {0}")] ProgramError(#[from] cairo_vm::types::errors::program_errors::ProgramError), #[error("Failed to compute legacy class hash: {0}")] - ComputeLegacyClassHashFailed(anyhow::Error), + ComputeLegacyClassHashFailed(#[from] ComputeClassHashError), #[error("Failed to convert transaction to starkneti-api: {0}")] ConvertToTxApiError(#[from] TransactionApiError), #[error("Failed to convert transaction to blockifier: {0}")] ConvertTxBlockifierError(#[from] TransactionExecutionError), #[error("Failed to convert contract class: {0}")] ConvertContractClassError(#[from] ContractClassError), - #[error("Declare legacy contract classes are not supported")] - LegacyContractClassesNotSupported, + // #[error("Declare legacy contract classes are not supported")] + // LegacyContractClassesNotSupported, #[error("Compiled class hash mismatch: expected {expected}, actual {compilation}")] CompiledClassHashMismatch { expected: Felt, compilation: Felt }, } -pub fn broadcasted_declare_v0_to_blockifier( - transaction: BroadcastedDeclareTransactionV0, - chain_id: Felt, - starknet_version: StarknetVersion, -) -> Result< - (blockifier::transaction::transaction_execution::Transaction, Option), - BroadcastedToBlockifierError, -> { - let (class_info, class_hash, extra_class_info) = { - let compressed_legacy_class: CompressedLegacyContractClass = (*transaction.contract_class).clone(); - let class_hash = compressed_legacy_class.compute_class_hash().unwrap(); - let compressed_legacy_class: CompressedLegacyContractClass = (*transaction.contract_class).clone(); - let class_blockifier = - compressed_legacy_class.to_blockifier_class().map_err(BroadcastedToBlockifierError::CompilationFailed)?; - - let class_info = LegacyClassInfo { contract_class: Arc::new(compressed_legacy_class) }; - - ( - Some(blockifier::execution::contract_class::ClassInfo::new(&class_blockifier, 0, 0)?), - Some(class_hash), - Some(ConvertedClass::Legacy(LegacyConvertedClass { class_hash, info: class_info })), - ) - }; - - let is_query = transaction.is_query; - let TransactionWithHash { transaction, hash } = - TransactionWithHash::from_broadcasted_declare_v0(transaction, chain_id, starknet_version, class_hash); - - let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; - +#[allow(clippy::type_complexity)] +fn handle_class_legacy( + contract_class: Arc, +) -> Result<(Option, Option, Option), BroadcastedToBlockifierError> { + let class_hash = contract_class.compute_class_hash()?; + tracing::debug!("Computed legacy class hash: {:?}", class_hash); + let class_blockifier = + contract_class.to_blockifier_class().map_err(BroadcastedToBlockifierError::CompilationFailed)?; Ok(( - blockifier::transaction::transaction_execution::Transaction::from_api( - transaction, - TransactionHash(hash), - class_info, - None, - None, - is_query, - )?, - extra_class_info, + Some(BClassInfo::new(&class_blockifier, 0, 0)?), + Some(ConvertedClass::Legacy(LegacyConvertedClass { class_hash, info: LegacyClassInfo { contract_class } })), + Some(class_hash), )) } -pub fn broadcasted_to_blockifier( - transaction: starknet_core::types::BroadcastedTransaction, - chain_id: Felt, - starknet_version: StarknetVersion, -) -> Result< - (blockifier::transaction::transaction_execution::Transaction, Option), - BroadcastedToBlockifierError, -> { - let (class_info, class_hash, extra_class_info) = match &transaction { - starknet_core::types::BroadcastedTransaction::Declare(tx) => match tx { - starknet_core::types::BroadcastedDeclareTransaction::V1(tx) => { - let compressed_legacy_class: CompressedLegacyContractClass = (*tx.contract_class).clone().into(); - let class_hash = compressed_legacy_class.compute_class_hash().unwrap(); - tracing::debug!("Computed legacy class hash: {:?}", class_hash); - let compressed_legacy_class: CompressedLegacyContractClass = (*tx.contract_class).clone().into(); - let class_blockifier = compressed_legacy_class - .to_blockifier_class() - .map_err(BroadcastedToBlockifierError::CompilationFailed)?; - let class_info = LegacyClassInfo { contract_class: Arc::new(compressed_legacy_class) }; - - ( - Some(blockifier::execution::contract_class::ClassInfo::new(&class_blockifier, 0, 0)?), - Some(class_hash), - Some(ConvertedClass::Legacy(LegacyConvertedClass { class_hash, info: class_info })), - ) - } - starknet_core::types::BroadcastedDeclareTransaction::V2(tx) => { - let class_hash = tx.contract_class.class_hash(); - let flatten_sierra_class: FlattenedSierraClass = (*tx.contract_class).clone().into(); - let (compiled_class_hash, compiled) = flatten_sierra_class.compile_to_casm()?; - if tx.compiled_class_hash != compiled_class_hash { - return Err(BroadcastedToBlockifierError::CompiledClassHashMismatch { - expected: tx.compiled_class_hash, - compilation: compiled_class_hash, - }); - } - let class_info = - SierraClassInfo { contract_class: Arc::new(flatten_sierra_class), compiled_class_hash }; - - ( - Some(blockifier::execution::contract_class::ClassInfo::new( - &compiled.to_blockifier_class()?, - tx.contract_class.sierra_program.len(), - tx.contract_class.abi.len(), - )?), - Some(class_hash), - Some(ConvertedClass::Sierra(SierraConvertedClass { - class_hash, - info: class_info, - compiled: Arc::new(compiled), - })), - ) - } - starknet_core::types::BroadcastedDeclareTransaction::V3(tx) => { - let class_hash = tx.contract_class.class_hash(); - let flatten_sierra_class: FlattenedSierraClass = (*tx.contract_class).clone().into(); - let (compiled_class_hash, compiled) = flatten_sierra_class.compile_to_casm()?; - if tx.compiled_class_hash != compiled_class_hash { - return Err(BroadcastedToBlockifierError::CompiledClassHashMismatch { - expected: tx.compiled_class_hash, - compilation: compiled_class_hash, - }); - } - let class_info = - SierraClassInfo { contract_class: Arc::new(flatten_sierra_class), compiled_class_hash }; - - ( - Some(blockifier::execution::contract_class::ClassInfo::new( - &compiled.to_blockifier_class()?, - tx.contract_class.sierra_program.len(), - tx.contract_class.abi.len(), - )?), - Some(class_hash), - Some(ConvertedClass::Sierra(SierraConvertedClass { - class_hash, - info: class_info, - compiled: Arc::new(compiled), - })), - ) - } - }, - _ => (None, None, None), - }; - - let is_query = is_query(&transaction); - let TransactionWithHash { transaction, hash } = - TransactionWithHash::from_broadcasted(transaction, chain_id, starknet_version, class_hash); - let deployed_address = match &transaction { - Transaction::DeployAccount(tx) => Some(tx.calculate_contract_address()), - _ => None, - }; - let transaction: starknet_api::transaction::Transaction = transaction.try_into()?; - +#[allow(clippy::type_complexity)] +fn handle_class_sierra( + contract_class: Arc, + expected_compiled_class_hash: Felt, +) -> Result<(Option, Option, Option), BroadcastedToBlockifierError> { + let class_hash = contract_class.compute_class_hash()?; + let (compiled_class_hash, compiled) = contract_class.compile_to_casm()?; + if expected_compiled_class_hash != compiled_class_hash { + return Err(BroadcastedToBlockifierError::CompiledClassHashMismatch { + expected: expected_compiled_class_hash, + compilation: compiled_class_hash, + }); + } Ok(( - blockifier::transaction::transaction_execution::Transaction::from_api( - transaction, - TransactionHash(hash), - class_info, - None, - deployed_address.map(|address| address.try_into().expect("Address conversion should never fail")), - is_query, - )?, - extra_class_info, + Some(BClassInfo::new( + &compiled.to_blockifier_class()?, + contract_class.sierra_program.len(), + contract_class.abi.len(), + )?), + Some(ConvertedClass::Sierra(SierraConvertedClass { + class_hash, + info: SierraClassInfo { contract_class, compiled_class_hash }, + compiled: Arc::new(compiled), + })), + Some(class_hash), )) } - -fn is_query(transaction: &starknet_core::types::BroadcastedTransaction) -> bool { - match transaction { - starknet_core::types::BroadcastedTransaction::Invoke(tx) => match tx { - starknet_core::types::BroadcastedInvokeTransaction::V1(tx) => tx.is_query, - starknet_core::types::BroadcastedInvokeTransaction::V3(tx) => tx.is_query, - }, - starknet_core::types::BroadcastedTransaction::Declare(tx) => match tx { - starknet_core::types::BroadcastedDeclareTransaction::V1(tx) => tx.is_query, - starknet_core::types::BroadcastedDeclareTransaction::V2(tx) => tx.is_query, - starknet_core::types::BroadcastedDeclareTransaction::V3(tx) => tx.is_query, - }, - starknet_core::types::BroadcastedTransaction::DeployAccount(tx) => match tx { - starknet_core::types::BroadcastedDeployAccountTransaction::V1(tx) => tx.is_query, - starknet_core::types::BroadcastedDeployAccountTransaction::V3(tx) => tx.is_query, - }, - } -} diff --git a/crates/primitives/transactions/src/from_broadcasted_transaction.rs b/crates/primitives/transactions/src/from_broadcasted_transaction.rs index a6f5e54cc..3c5ee71b9 100644 --- a/crates/primitives/transactions/src/from_broadcasted_transaction.rs +++ b/crates/primitives/transactions/src/from_broadcasted_transaction.rs @@ -1,63 +1,51 @@ -use mp_chain_config::StarknetVersion; -use starknet_types_core::felt::Felt; - use crate::{ - BroadcastedDeclareTransactionV0, DeclareTransaction, DeclareTransactionV0, DeclareTransactionV1, - DeclareTransactionV2, DeclareTransactionV3, DeployAccountTransaction, DeployAccountTransactionV1, - DeployAccountTransactionV3, InvokeTransaction, InvokeTransactionV1, InvokeTransactionV3, Transaction, - TransactionWithHash, + broadcasted_to_blockifier::is_query, BroadcastedDeclareTransactionV0, DeclareTransaction, DeclareTransactionV0, + DeclareTransactionV1, DeclareTransactionV2, DeclareTransactionV3, DeployAccountTransaction, + DeployAccountTransactionV1, DeployAccountTransactionV3, InvokeTransaction, InvokeTransactionV1, + InvokeTransactionV3, Transaction, TransactionWithHash, }; +use mp_chain_config::StarknetVersion; +use starknet_core::types::{ + BroadcastedDeclareTransaction, BroadcastedDeclareTransactionV1, BroadcastedDeclareTransactionV2, + BroadcastedDeclareTransactionV3, BroadcastedDeployAccountTransaction, BroadcastedDeployAccountTransactionV1, + BroadcastedDeployAccountTransactionV3, BroadcastedInvokeTransaction, BroadcastedInvokeTransactionV1, + BroadcastedInvokeTransactionV3, BroadcastedTransaction, +}; +use starknet_types_core::felt::Felt; // class_hash is required for DeclareTransaction impl TransactionWithHash { pub fn from_broadcasted( - tx: starknet_core::types::BroadcastedTransaction, + tx: BroadcastedTransaction, chain_id: Felt, starknet_version: StarknetVersion, class_hash: Option, ) -> Self { let is_query = is_query(&tx); - let transaction: Transaction = match tx { - starknet_core::types::BroadcastedTransaction::Invoke(tx) => Transaction::Invoke(tx.into()), - starknet_core::types::BroadcastedTransaction::Declare(tx) => { - Transaction::Declare(DeclareTransaction::from_broadcasted( - tx, - class_hash.expect("Class hash must be provided for DeclareTransaction"), - )) - } - starknet_core::types::BroadcastedTransaction::DeployAccount(tx) => Transaction::DeployAccount(tx.into()), + let transaction = match tx { + BroadcastedTransaction::Invoke(tx) => Transaction::Invoke(tx.into()), + BroadcastedTransaction::DeployAccount(tx) => Transaction::DeployAccount(tx.into()), + BroadcastedTransaction::Declare(tx) => Transaction::Declare(DeclareTransaction::from_broadcasted( + tx, + class_hash.expect("Class hash must be provided for DeclareTransaction"), + )), }; let hash = transaction.compute_hash(chain_id, starknet_version, is_query); Self { hash, transaction } } - - pub fn from_broadcasted_declare_v0( - tx: BroadcastedDeclareTransactionV0, - chain_id: Felt, - starknet_version: StarknetVersion, - class_hash: Option, - ) -> Self { - let is_query = tx.is_query; - let transaction: Transaction = Transaction::Declare(DeclareTransaction::from_broadcasted_declare_v0( - tx, - class_hash.expect("Class hash must be provided for DeclareTransactionV0"), - )); - let hash = transaction.compute_hash(chain_id, starknet_version, is_query); - Self { hash, transaction } - } } -impl From for InvokeTransaction { - fn from(tx: starknet_core::types::BroadcastedInvokeTransaction) -> Self { +impl From for InvokeTransaction { + fn from(tx: BroadcastedInvokeTransaction) -> Self { match tx { - starknet_core::types::BroadcastedInvokeTransaction::V1(tx) => InvokeTransaction::V1(tx.into()), - starknet_core::types::BroadcastedInvokeTransaction::V3(tx) => InvokeTransaction::V3(tx.into()), + BroadcastedInvokeTransaction::V1(tx) => InvokeTransaction::V1(tx.into()), + BroadcastedInvokeTransaction::V3(tx) => InvokeTransaction::V3(tx.into()), } } } -impl From for InvokeTransactionV1 { - fn from(tx: starknet_core::types::BroadcastedInvokeTransactionV1) -> Self { +impl From for InvokeTransactionV1 { + fn from(tx: BroadcastedInvokeTransactionV1) -> Self { Self { sender_address: tx.sender_address, calldata: tx.calldata, @@ -68,8 +56,8 @@ impl From for InvokeTransa } } -impl From for InvokeTransactionV3 { - fn from(tx: starknet_core::types::BroadcastedInvokeTransactionV3) -> Self { +impl From for InvokeTransactionV3 { + fn from(tx: BroadcastedInvokeTransactionV3) -> Self { Self { sender_address: tx.sender_address, calldata: tx.calldata, @@ -86,21 +74,21 @@ impl From for InvokeTransa } impl DeclareTransaction { - fn from_broadcasted(tx: starknet_core::types::BroadcastedDeclareTransaction, class_hash: Felt) -> Self { + fn from_broadcasted(tx: BroadcastedDeclareTransaction, class_hash: Felt) -> Self { match tx { - starknet_core::types::BroadcastedDeclareTransaction::V1(tx) => { + BroadcastedDeclareTransaction::V1(tx) => { DeclareTransaction::V1(DeclareTransactionV1::from_broadcasted(tx, class_hash)) } - starknet_core::types::BroadcastedDeclareTransaction::V2(tx) => { + BroadcastedDeclareTransaction::V2(tx) => { DeclareTransaction::V2(DeclareTransactionV2::from_broadcasted(tx, class_hash)) } - starknet_core::types::BroadcastedDeclareTransaction::V3(tx) => { + BroadcastedDeclareTransaction::V3(tx) => { DeclareTransaction::V3(DeclareTransactionV3::from_broadcasted(tx, class_hash)) } } } - fn from_broadcasted_declare_v0(tx: BroadcastedDeclareTransactionV0, class_hash: Felt) -> Self { + pub fn from_broadcasted_declare_v0(tx: BroadcastedDeclareTransactionV0, class_hash: Felt) -> Self { DeclareTransaction::V0(DeclareTransactionV0::from_broadcasted_declare_v0(tx, class_hash)) } } @@ -112,7 +100,7 @@ impl DeclareTransactionV0 { } impl DeclareTransactionV1 { - fn from_broadcasted(tx: starknet_core::types::BroadcastedDeclareTransactionV1, class_hash: Felt) -> Self { + fn from_broadcasted(tx: BroadcastedDeclareTransactionV1, class_hash: Felt) -> Self { Self { sender_address: tx.sender_address, max_fee: tx.max_fee, @@ -124,7 +112,7 @@ impl DeclareTransactionV1 { } impl DeclareTransactionV2 { - fn from_broadcasted(tx: starknet_core::types::BroadcastedDeclareTransactionV2, class_hash: Felt) -> Self { + fn from_broadcasted(tx: BroadcastedDeclareTransactionV2, class_hash: Felt) -> Self { Self { sender_address: tx.sender_address, compiled_class_hash: tx.compiled_class_hash, @@ -137,7 +125,7 @@ impl DeclareTransactionV2 { } impl DeclareTransactionV3 { - fn from_broadcasted(tx: starknet_core::types::BroadcastedDeclareTransactionV3, class_hash: Felt) -> Self { + fn from_broadcasted(tx: BroadcastedDeclareTransactionV3, class_hash: Felt) -> Self { Self { sender_address: tx.sender_address, compiled_class_hash: tx.compiled_class_hash, @@ -154,21 +142,17 @@ impl DeclareTransactionV3 { } } -impl From for DeployAccountTransaction { - fn from(tx: starknet_core::types::BroadcastedDeployAccountTransaction) -> Self { +impl From for DeployAccountTransaction { + fn from(tx: BroadcastedDeployAccountTransaction) -> Self { match tx { - starknet_core::types::BroadcastedDeployAccountTransaction::V1(tx) => { - DeployAccountTransaction::V1(tx.into()) - } - starknet_core::types::BroadcastedDeployAccountTransaction::V3(tx) => { - DeployAccountTransaction::V3(tx.into()) - } + BroadcastedDeployAccountTransaction::V1(tx) => DeployAccountTransaction::V1(tx.into()), + BroadcastedDeployAccountTransaction::V3(tx) => DeployAccountTransaction::V3(tx.into()), } } } -impl From for DeployAccountTransactionV1 { - fn from(tx: starknet_core::types::BroadcastedDeployAccountTransactionV1) -> Self { +impl From for DeployAccountTransactionV1 { + fn from(tx: BroadcastedDeployAccountTransactionV1) -> Self { Self { max_fee: tx.max_fee, signature: tx.signature, @@ -180,8 +164,8 @@ impl From for Deplo } } -impl From for DeployAccountTransactionV3 { - fn from(tx: starknet_core::types::BroadcastedDeployAccountTransactionV3) -> Self { +impl From for DeployAccountTransactionV3 { + fn from(tx: BroadcastedDeployAccountTransactionV3) -> Self { Self { signature: tx.signature, nonce: tx.nonce, @@ -196,21 +180,3 @@ impl From for Deplo } } } - -fn is_query(tx: &starknet_core::types::BroadcastedTransaction) -> bool { - match tx { - starknet_core::types::BroadcastedTransaction::Invoke(tx) => match tx { - starknet_core::types::BroadcastedInvokeTransaction::V1(tx) => tx.is_query, - starknet_core::types::BroadcastedInvokeTransaction::V3(tx) => tx.is_query, - }, - starknet_core::types::BroadcastedTransaction::Declare(tx) => match tx { - starknet_core::types::BroadcastedDeclareTransaction::V1(tx) => tx.is_query, - starknet_core::types::BroadcastedDeclareTransaction::V2(tx) => tx.is_query, - starknet_core::types::BroadcastedDeclareTransaction::V3(tx) => tx.is_query, - }, - starknet_core::types::BroadcastedTransaction::DeployAccount(tx) => match tx { - starknet_core::types::BroadcastedDeployAccountTransaction::V1(tx) => tx.is_query, - starknet_core::types::BroadcastedDeployAccountTransaction::V3(tx) => tx.is_query, - }, - } -} diff --git a/crates/primitives/transactions/src/lib.rs b/crates/primitives/transactions/src/lib.rs index 141461fef..ec141965f 100644 --- a/crates/primitives/transactions/src/lib.rs +++ b/crates/primitives/transactions/src/lib.rs @@ -1,4 +1,8 @@ +use mp_class::CompressedLegacyContractClass; +use mp_convert::{hex_serde::U128AsHex, hex_serde::U64AsHex, ToFelt}; use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use starknet_api::transaction::TransactionVersion; use starknet_types_core::{felt::Felt, hash::StarkHash}; use std::sync::Arc; @@ -10,17 +14,11 @@ mod from_starknet_provider; mod into_starknet_api; mod to_starknet_core; +// pub mod broadcasted; pub mod compute_hash; pub mod utils; -use mp_convert::{hex_serde::U128AsHex, hex_serde::U64AsHex, ToFelt}; -// pub use from_starknet_provider::TransactionTypeError; -pub use broadcasted_to_blockifier::{ - broadcasted_declare_v0_to_blockifier, broadcasted_to_blockifier, BroadcastedToBlockifierError, -}; -use mp_class::CompressedLegacyContractClass; -use serde_with::serde_as; -use starknet_api::transaction::TransactionVersion; +pub use broadcasted_to_blockifier::{BroadcastedToBlockifierError, BroadcastedTransactionExt}; const SIMULATE_TX_VERSION_OFFSET: Felt = Felt::from_hex_unchecked("0x100000000000000000000000000000000"); @@ -46,17 +44,12 @@ impl TransactionWithHash { } } -#[derive(Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct BroadcastedDeclareTransactionV0 { - /// The address of the account contract sending the declaration transaction pub sender_address: Felt, - /// The maximal fee that can be charged for including the transaction pub max_fee: Felt, - /// Signature pub signature: Vec, - /// The class to be declared pub contract_class: Arc, - /// If set to `true`, uses a query-only transaction version that's invalid for execution pub is_query: bool, } @@ -725,6 +718,8 @@ impl From for DataAvailabilityMode { #[cfg(test)] mod tests { + use std::sync::Arc; + use super::*; use mp_class::{CompressedLegacyContractClass, LegacyEntryPointsByType}; diff --git a/crates/tests/src/lib.rs b/crates/tests/src/lib.rs index 7c4eef216..9ba0700b0 100644 --- a/crates/tests/src/lib.rs +++ b/crates/tests/src/lib.rs @@ -1,13 +1,18 @@ //! End to end tests for madara. +#![cfg(test)] mod rpc; use anyhow::bail; use rstest::rstest; +use starknet::accounts::{Account, Call, ExecutionEncoding, SingleOwnerAccount}; +use starknet::signers::{LocalWallet, SigningKey}; +use starknet_core::types::{BlockId, BlockTag, Felt}; +use starknet_core::utils::starknet_keccak; use starknet_providers::Provider; use starknet_providers::{jsonrpc::HttpTransport, JsonRpcClient, Url}; -use std::ops::Range; -use std::sync::Mutex; +use std::ops::{Deref, Range}; +use std::sync::{Arc, Mutex}; use std::{ collections::HashMap, env, @@ -38,9 +43,9 @@ async fn wait_for_cond>>(mut cond: pub struct MadaraCmd { process: Option, ready: bool, - json_rpc: Option>, + json_rpc: JsonRpcClient, rpc_url: Url, - tempdir: TempDir, + tempdir: Arc, _port: MadaraPortNum, } @@ -49,8 +54,8 @@ impl MadaraCmd { self.process.take().unwrap().wait_with_output().unwrap() } - pub fn json_rpc(&mut self) -> &JsonRpcClient { - self.json_rpc.get_or_insert_with(|| JsonRpcClient::new(HttpTransport::new(self.rpc_url.clone()))) + pub fn json_rpc(&self) -> &JsonRpcClient { + &self.json_rpc } pub fn db_dir(&self) -> &Path { @@ -121,6 +126,7 @@ lazy_static::lazy_static! { static ref AVAILABLE_PORTS: Mutex>> = Mutex::new(AvailablePorts { to_reuse: vec![], next: PORT_RANGE }); } +#[derive(Clone)] pub struct MadaraPortNum(pub u16); impl Drop for MadaraPortNum { fn drop(&mut self) { @@ -138,10 +144,11 @@ pub fn get_port() -> MadaraPortNum { MadaraPortNum(port) } +#[derive(Clone)] pub struct MadaraCmdBuilder { args: Vec, env: HashMap, - tempdir: TempDir, + tempdir: Arc, port: MadaraPortNum, } @@ -156,7 +163,7 @@ impl MadaraCmdBuilder { Self { args: Default::default(), env: Default::default(), - tempdir: TempDir::with_prefix("madara-test").unwrap(), + tempdir: Arc::new(TempDir::with_prefix("madara-test").unwrap()), port: get_port(), } } @@ -188,7 +195,7 @@ impl MadaraCmdBuilder { .into_iter() .chain([ "--base-path".into(), - format!("{}", self.tempdir.as_ref().display()), + format!("{}", self.tempdir.deref().as_ref().display()), "--rpc-port".into(), format!("{}", self.port.0), ]) @@ -198,11 +205,12 @@ impl MadaraCmdBuilder { .spawn() .unwrap(); + let rpc_url = Url::parse(&format!("http://127.0.0.1:{}/", self.port.0)).unwrap(); MadaraCmd { process: Some(process), ready: false, - json_rpc: None, - rpc_url: Url::parse(&format!("http://127.0.0.1:{}/", self.port.0)).unwrap(), + json_rpc: JsonRpcClient::new(HttpTransport::new(rpc_url.clone())), + rpc_url, tempdir: self.tempdir, _port: self.port, } @@ -250,3 +258,195 @@ async fn madara_can_sync_a_few_blocks() { } ); } + +const ERC20_STRK_CONTRACT_ADDRESS: Felt = + Felt::from_hex_unchecked("0x04718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d"); +#[allow(unused)] +const ERC20_ETH_CONTRACT_ADDRESS: Felt = + Felt::from_hex_unchecked("0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7"); + +const ACCOUNT_SECRET: Felt = + Felt::from_hex_unchecked("0x077e56c6dc32d40a67f6f7e6625c8dc5e570abe49c0a24e9202e4ae906abcc07"); +const ACCOUNT_ADDRESS: Felt = + Felt::from_hex_unchecked("0x055be462e718c4166d656d11f89e341115b8bc82389c3762a10eade04fcb225d"); + +#[rstest] +#[tokio::test] +async fn madara_devnet_add_transaction() { + let _ = env_logger::builder().is_test(true).try_init(); + + let args = &[ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // only produce blocks no pending txs + "--chain-config-override", + "block_time=1s,pending_block_update_time=1s", + ]; + + let cmd_builder = MadaraCmdBuilder::new().args(*args); + let mut node = cmd_builder.run(); + node.wait_for_ready().await; + + tokio::time::sleep(Duration::from_secs(3)).await; + + let chain_id = node.json_rpc().chain_id().await.unwrap(); + + let signer = LocalWallet::from_signing_key(SigningKey::from_secret_scalar(ACCOUNT_SECRET)); + let mut account = + SingleOwnerAccount::new(node.json_rpc(), signer, ACCOUNT_ADDRESS, chain_id, ExecutionEncoding::New); + account.set_block_id(BlockId::Tag(BlockTag::Latest)); + + let res = account + .execute_v3(vec![Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: starknet_keccak(b"transfer"), + calldata: vec![ACCOUNT_ADDRESS, 15.into(), Felt::ZERO], + }]) + .send() + .await + .unwrap(); + + wait_for_cond( + || async { + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await?; + Ok(()) + }, + Duration::from_millis(500), + ) + .await; +} + +#[rstest] +#[tokio::test] +async fn madara_devnet_mempool_saving() { + let _ = env_logger::builder().is_test(true).try_init(); + + let cmd_builder = MadaraCmdBuilder::new().args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks & pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=5min", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + let chain_id = node.json_rpc().chain_id().await.unwrap(); + + let signer = LocalWallet::from_signing_key(SigningKey::from_secret_scalar(ACCOUNT_SECRET)); + let mut account = + SingleOwnerAccount::new(node.json_rpc(), signer, ACCOUNT_ADDRESS, chain_id, ExecutionEncoding::New); + account.set_block_id(BlockId::Tag(BlockTag::Pending)); + + let res = account + .execute_v3(vec![Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: starknet_keccak(b"transfer"), + calldata: vec![ACCOUNT_ADDRESS, 15.into(), Felt::ZERO], + }]) + .send() + .await + .unwrap(); + + drop(node); + + // tx should be in saved mempool + + let cmd_builder = cmd_builder.args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks but produce pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=500ms", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + // tx should be in mempool + + wait_for_cond( + || async { + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await?; + Ok(()) + }, + Duration::from_secs(1), + ) + .await; +} + +#[rstest] +#[tokio::test] +async fn madara_devnet_continue_pending() { + let _ = env_logger::builder().is_test(true).try_init(); + + let cmd_builder = MadaraCmdBuilder::new().args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks but produce pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=500ms", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + let chain_id = node.json_rpc().chain_id().await.unwrap(); + + let signer = LocalWallet::from_signing_key(SigningKey::from_secret_scalar(ACCOUNT_SECRET)); + let mut account = + SingleOwnerAccount::new(node.json_rpc(), signer, ACCOUNT_ADDRESS, chain_id, ExecutionEncoding::New); + account.set_block_id(BlockId::Tag(BlockTag::Pending)); + + let res = account + .execute_v3(vec![Call { + to: ERC20_STRK_CONTRACT_ADDRESS, + selector: starknet_keccak(b"transfer"), + calldata: vec![ACCOUNT_ADDRESS, 15.into(), Felt::ZERO], + }]) + .send() + .await + .unwrap(); + + wait_for_cond( + || async { + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await?; + Ok(()) + }, + Duration::from_secs(1), + ) + .await; + + drop(node); + + // tx should appear in saved pending block + + let cmd_builder = cmd_builder.args([ + "--devnet", + "--no-l1-sync", + "--gas-price", + "0", + // never produce blocks never produce pending txs + "--chain-config-path", + "test_devnet.yaml", + "--chain-config-override", + "block_time=5min,pending_block_update_time=5min", + ]); + let mut node = cmd_builder.clone().run(); + node.wait_for_ready().await; + + // should find receipt + let _receipt = node.json_rpc().get_transaction_receipt(res.transaction_hash).await.unwrap(); +} diff --git a/crates/tests/test_devnet.yaml b/crates/tests/test_devnet.yaml new file mode 100644 index 000000000..eb50e6487 --- /dev/null +++ b/crates/tests/test_devnet.yaml @@ -0,0 +1,34 @@ +chain_name: "Test devnet" +chain_id: "MADARA_DEVNET" +feeder_gateway_url: "http://localhost:8080/feeder_gateway/" +gateway_url: "http://localhost:8080/gateway/" +native_fee_token_address: "0x04718f5a0fc34cc1af16a1cdee98ffb20c31f5cd61d6ab07201858f4287c938d" +parent_fee_token_address: "0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7" +latest_protocol_version: "0.13.2" +block_time: "10s" +pending_block_update_time: "500ms" +execution_batch_size: 16 +bouncer_config: + block_max_capacity: + builtin_count: + add_mod: 18446744073709551615 + bitwise: 18446744073709551615 + ecdsa: 18446744073709551615 + ec_op: 18446744073709551615 + keccak: 18446744073709551615 + mul_mod: 18446744073709551615 + pedersen: 18446744073709551615 + poseidon: 18446744073709551615 + range_check: 18446744073709551615 + range_check96: 18446744073709551615 + gas: 18446744073709551615 + n_steps: 18446744073709551615 + message_segment_length: 18446744073709551615 + n_events: 18446744073709551615 + state_diff_size: 131072 +sequencer_address: "0x123" +eth_core_contract_address: "0xe7f1725e7734ce288f8367e1bb143e90bb3f0512" +eth_gps_statement_verifier: "0xf294781D719D2F4169cE54469C28908E6FA752C1" +mempool_tx_limit: 10000 +mempool_declare_tx_limit: 20 +mempool_tx_max_age: "5h" diff --git a/scripts/e2e-coverage.sh b/scripts/e2e-coverage.sh index 3022de323..acfed971e 100755 --- a/scripts/e2e-coverage.sh +++ b/scripts/e2e-coverage.sh @@ -19,7 +19,10 @@ subshell() { sleep 1 done - cargo test --profile dev --workspace $@ + + ARGS=$@ + export PROPTEST_CASES=5 + cargo test --profile dev ${ARGS:=--workspace} cargo llvm-cov report --lcov --output-path lcov.info cargo llvm-cov report diff --git a/scripts/e2e-tests.sh b/scripts/e2e-tests.sh index 352fa5652..47bb46805 100755 --- a/scripts/e2e-tests.sh +++ b/scripts/e2e-tests.sh @@ -20,7 +20,9 @@ subshell() { sleep 1 done - cargo test --profile dev --workspace $@ + ARGS=$@ + export PROPTEST_CASES=5 + cargo test --profile dev ${ARGS:=--workspace} } (subshell $@ && r=$?) || r=$? From dd38df16283db24968b4c93013d7abd9d4f16c7a Mon Sep 17 00:00:00 2001 From: cchudant Date: Tue, 5 Nov 2024 11:41:22 +0000 Subject: [PATCH 5/9] fix: test, and add bouncer weights to db --- Cargo.lock | 1 + .../client/block_import/src/verify_apply.rs | 12 +++++---- crates/client/block_production/src/lib.rs | 11 ++++---- crates/client/db/Cargo.toml | 1 + crates/client/db/src/block_db.rs | 17 ++++++++++++ crates/client/db/src/storage_updates.rs | 3 +++ crates/client/db/src/tests/test_block.rs | 26 +++++++++---------- crates/client/rpc/src/test_utils.rs | 8 ++++++ .../methods/read/block_hash_and_number.rs | 4 +++ .../methods/read/get_block_with_receipts.rs | 1 + .../rpc/src/versions/v0_8_0/methods/ws/lib.rs | 1 + crates/primitives/utils/src/parsers.rs | 2 +- 12 files changed, 63 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 03464c67c..8f52b55b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5632,6 +5632,7 @@ version = "0.7.0" dependencies = [ "anyhow", "bincode 1.3.3", + "blockifier", "bonsai-trie", "lazy_static", "log", diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index 452093ced..480b46b19 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -101,6 +101,7 @@ pub fn verify_apply_inner( block.state_diff, block.converted_classes, block.visited_segments, + None, ) .map_err(make_db_error("storing block in db"))?; @@ -145,6 +146,7 @@ pub fn verify_apply_pending_inner( block.state_diff, block.converted_classes, block.visited_segments, + None, ) .map_err(make_db_error("storing block in db"))?; @@ -408,7 +410,7 @@ mod verify_apply_tests { if populate_db { let header = create_dummy_header(); let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); } // Create a validation context with the specified ignore_block_order flag @@ -662,7 +664,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -688,7 +690,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -724,7 +726,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -770,7 +772,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); diff --git a/crates/client/block_production/src/lib.rs b/crates/client/block_production/src/lib.rs index 00b03918d..34690ce44 100644 --- a/crates/client/block_production/src/lib.rs +++ b/crates/client/block_production/src/lib.rs @@ -206,7 +206,7 @@ impl BlockProductionTask { fn continue_block( &mut self, bouncer_cap: BouncerWeights, - ) -> Result<(StateDiff, VisitedSegments, ContinueBlockStats), Error> { + ) -> Result<(StateDiff, VisitedSegments, BouncerWeights, ContinueBlockStats), Error> { let mut stats = ContinueBlockStats::default(); self.executor.bouncer.bouncer_config.block_max_capacity = bouncer_cap; @@ -296,7 +296,7 @@ impl BlockProductionTask { let on_top_of = self.executor.block_state.as_ref().expect(BLOCK_STATE_ACCESS_ERR).state.on_top_of_block_id; // TODO: save visited segments for SNOS. - let (state_diff, visited_segments, _weights) = finalize_execution_state::finalize_execution_state( + let (state_diff, visited_segments, bouncer_weights) = finalize_execution_state::finalize_execution_state( &executed_txs, &mut self.executor, &self.backend, @@ -314,7 +314,7 @@ impl BlockProductionTask { stats.n_re_added_to_mempool ); - Ok((state_diff, visited_segments, stats)) + Ok((state_diff, visited_segments, bouncer_weights, stats)) } /// Each "tick" of the block time updates the pending block but only with the appropriate fraction of the total bouncer capacity. @@ -361,7 +361,7 @@ impl BlockProductionTask { }; let start_time = Instant::now(); - let (state_diff, visited_segments, stats) = self.continue_block(bouncer_cap)?; + let (state_diff, visited_segments, bouncer_weights, stats) = self.continue_block(bouncer_cap)?; if stats.n_added_to_block > 0 { tracing::info!( "🧮 Executed and added {} transaction(s) to the pending block at height {} - {:?}", @@ -378,6 +378,7 @@ impl BlockProductionTask { state_diff, self.declared_classes.clone(), Some(visited_segments), + Some(bouncer_weights), )?; // do not forget to flush :) self.backend @@ -395,7 +396,7 @@ impl BlockProductionTask { // Complete the block with full bouncer capacity. let start_time = Instant::now(); - let (mut new_state_diff, visited_segments, _stats) = + let (mut new_state_diff, visited_segments, _weights, _stats) = self.continue_block(self.backend.chain_config().bouncer_config.block_max_capacity)?; // SNOS requirement: For blocks >= 10, the hash of the block 10 blocks prior diff --git a/crates/client/db/Cargo.toml b/crates/client/db/Cargo.toml index 0440e2103..f8042aea7 100644 --- a/crates/client/db/Cargo.toml +++ b/crates/client/db/Cargo.toml @@ -27,6 +27,7 @@ mp-transactions = { workspace = true } mp-utils = { workspace = true } # Starknet +blockifier = { workspace = true } bonsai-trie = { workspace = true } starknet-core = { workspace = true } starknet-types-core = { workspace = true } diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index 0414a3365..ee06f0fba 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -2,6 +2,7 @@ use crate::db_block_id::{DbBlockId, DbBlockIdResolvable}; use crate::MadaraStorageError; use crate::{Column, DatabaseExt, MadaraBackend, WriteBatchWithTransaction}; use anyhow::Context; +use blockifier::bouncer::BouncerWeights; use mp_block::header::{GasPrices, PendingHeader}; use mp_block::{ BlockId, BlockTag, MadaraBlock, MadaraBlockInfo, MadaraBlockInner, MadaraMaybePendingBlock, @@ -24,6 +25,7 @@ const ROW_CHAIN_INFO: &[u8] = b"chain_info"; const ROW_PENDING_INFO: &[u8] = b"pending_info"; const ROW_PENDING_STATE_UPDATE: &[u8] = b"pending_state_update"; const ROW_PENDING_SEGMENTS: &[u8] = b"pending_segments"; +const ROW_PENDING_BOUNCER_WEIGHTS: &[u8] = b"pending_bouncer_weights"; const ROW_PENDING_INNER: &[u8] = b"pending"; const ROW_SYNC_TIP: &[u8] = b"sync_tip"; const ROW_L1_LAST_CONFIRMED_BLOCK: &[u8] = b"l1_last"; @@ -202,6 +204,17 @@ impl MadaraBackend { Ok(res) } + #[tracing::instrument(skip(self), fields(module = "BlockDB"))] + pub fn get_pending_block_bouncer_weights(&self) -> Result> { + let col = self.db.get_column(Column::BlockStorageMeta); + let Some(res) = self.db.get_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS)? else { + // See pending block quirk + return Ok(None); + }; + let res = Some(bincode::deserialize(&res)?); + Ok(res) + } + #[tracing::instrument(skip(self), fields(module = "BlockDB"))] pub fn get_l1_last_confirmed_block(&self) -> Result> { let col = self.db.get_column(Column::BlockStorageMeta); @@ -218,6 +231,7 @@ impl MadaraBackend { block: &MadaraPendingBlock, state_update: &StateDiff, visited_segments: Option)>>, + bouncer_weights: Option, ) -> Result<()> { let mut tx = WriteBatchWithTransaction::default(); let col = self.db.get_column(Column::BlockStorageMeta); @@ -227,6 +241,9 @@ impl MadaraBackend { if let Some(visited_segments) = visited_segments { tx.put_cf(&col, ROW_PENDING_SEGMENTS, bincode::serialize(&visited_segments)?); } + if let Some(bouncer_weights) = bouncer_weights { + tx.put_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS, bincode::serialize(&bouncer_weights)?); + } let mut writeopts = WriteOptions::new(); writeopts.disable_wal(true); self.db.write_opt(tx, &writeopts)?; diff --git a/crates/client/db/src/storage_updates.rs b/crates/client/db/src/storage_updates.rs index e34b22b46..367c8a9ef 100644 --- a/crates/client/db/src/storage_updates.rs +++ b/crates/client/db/src/storage_updates.rs @@ -1,5 +1,6 @@ use crate::MadaraBackend; use crate::MadaraStorageError; +use blockifier::bouncer::BouncerWeights; use mp_block::{MadaraBlock, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo, MadaraPendingBlock}; use mp_class::ConvertedClass; use mp_state_update::{ @@ -16,6 +17,7 @@ impl MadaraBackend { state_diff: StateDiff, converted_classes: Vec, visited_segments: Option)>>, + bouncer_weights: Option, ) -> Result<(), MadaraStorageError> { let block_n = block.info.block_n(); let state_diff_cpy = state_diff.clone(); @@ -28,6 +30,7 @@ impl MadaraBackend { &MadaraPendingBlock { info, inner: block.inner }, &state_diff_cpy, visited_segments, + bouncer_weights, ), MadaraMaybePendingBlockInfo::NotPending(info) => { self.block_db_store_block(&MadaraBlock { info, inner: block.inner }, &state_diff_cpy) diff --git a/crates/client/db/src/tests/test_block.rs b/crates/client/db/src/tests/test_block.rs index cd1d3a8f5..006c392c6 100644 --- a/crates/client/db/src/tests/test_block.rs +++ b/crates/client/db/src/tests/test_block.rs @@ -25,8 +25,8 @@ mod block_tests { let block_hash = block.info.block_hash().unwrap(); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None, None).unwrap(); assert_eq!(backend.resolve_block_id(&BlockId::Hash(block_hash)).unwrap().unwrap(), DbBlockId::Number(0)); assert_eq!(backend.resolve_block_id(&BlockId::Number(0)).unwrap().unwrap(), DbBlockId::Number(0)); @@ -53,7 +53,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); assert_eq!(backend.get_block_hash(&BLOCK_ID_0).unwrap().unwrap(), block.info.block_hash().unwrap()); assert_eq!(BLOCK_ID_0.resolve_db_block_id(backend).unwrap().unwrap(), BLOCK_ID_0); @@ -76,7 +76,7 @@ mod block_tests { let block = pending_block_one(); let state_diff = pending_state_diff_one(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block.info); @@ -93,9 +93,9 @@ mod block_tests { let backend = db.backend(); backend - .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None) + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) .unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None).unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None, None).unwrap(); backend.clear_pending_block().unwrap(); assert!(backend.get_block(&BLOCK_ID_PENDING).unwrap().unwrap().inner.transactions.is_empty()); @@ -105,11 +105,11 @@ mod block_tests { "fake pending block parent hash must match with latest block in db" ); - backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![], None).unwrap(); + backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![], None, None).unwrap(); let block_pending = pending_block_two(); let state_diff = pending_state_diff_two(); - backend.store_block(block_pending.clone(), state_diff.clone(), vec![], None).unwrap(); + backend.store_block(block_pending.clone(), state_diff.clone(), vec![], None, None).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block_pending.info); @@ -124,11 +124,11 @@ mod block_tests { let backend = db.backend(); backend - .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None) + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) .unwrap(); let latest_block = finalized_block_one(); - backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![], None).unwrap(); + backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![], None, None).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); } @@ -153,7 +153,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); let tx_hash_1 = block.info.tx_hashes()[1]; assert_eq!(backend.find_tx_hash_block_info(&tx_hash_1).unwrap().unwrap(), (block.info.clone(), TxIndex(1))); @@ -166,11 +166,11 @@ mod block_tests { let backend = db.backend(); backend - .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None) + .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) .unwrap(); let block_pending = pending_block_one(); - backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![], None).unwrap(); + backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![], None, None).unwrap(); let tx_hash_1 = block_pending.info.tx_hashes()[1]; assert_eq!( diff --git a/crates/client/rpc/src/test_utils.rs b/crates/client/rpc/src/test_utils.rs index dcb725225..52f2a835b 100644 --- a/crates/client/rpc/src/test_utils.rs +++ b/crates/client/rpc/src/test_utils.rs @@ -222,6 +222,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha StateDiff::default(), vec![], None, + None, ) .unwrap(); @@ -246,6 +247,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha StateDiff::default(), vec![], None, + None, ) .unwrap(); @@ -314,6 +316,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha StateDiff::default(), vec![], None, + None, ) .unwrap(); @@ -351,6 +354,7 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha StateDiff::default(), vec![], None, + None, ) .unwrap(); } @@ -517,6 +521,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha state_diffs[0].clone(), vec![], None, + None, ) .unwrap(); @@ -540,6 +545,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha state_diffs[1].clone(), vec![], None, + None, ) .unwrap(); @@ -563,6 +569,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha state_diffs[2].clone(), vec![], None, + None, ) .unwrap(); @@ -583,6 +590,7 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha state_diffs[3].clone(), vec![], None, + None, ) .unwrap(); } diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs b/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs index fe0dac6c2..fd6de817b 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/read/block_hash_and_number.rs @@ -53,6 +53,7 @@ mod tests { StateDiff::default(), vec![], None, + None, ) .unwrap(); @@ -71,6 +72,7 @@ mod tests { StateDiff::default(), vec![], None, + None, ) .unwrap(); @@ -92,6 +94,7 @@ mod tests { StateDiff::default(), vec![], None, + None, ) .unwrap(); @@ -120,6 +123,7 @@ mod tests { StateDiff::default(), vec![], None, + None, ) .unwrap(); diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs b/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs index 62e35e13d..8c976fe4b 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/read/get_block_with_receipts.rs @@ -232,6 +232,7 @@ mod tests { StateDiff::default(), vec![], None, + None, ) .unwrap(); diff --git a/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs b/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs index e088b7961..0fe760489 100644 --- a/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs +++ b/crates/client/rpc/src/versions/v0_8_0/methods/ws/lib.rs @@ -168,6 +168,7 @@ mod test { mp_state_update::StateDiff::default(), vec![], None, + None, ) .expect("Storing block"); diff --git a/crates/primitives/utils/src/parsers.rs b/crates/primitives/utils/src/parsers.rs index e467f4741..3f7688027 100644 --- a/crates/primitives/utils/src/parsers.rs +++ b/crates/primitives/utils/src/parsers.rs @@ -57,10 +57,10 @@ mod tests { assert_eq!(parse_duration("200ms").unwrap(), Duration::from_millis(200)); assert_eq!(parse_duration("5min").unwrap(), Duration::from_secs(300)); assert_eq!(parse_duration("1 min").unwrap(), Duration::from_secs(60)); + assert_eq!(parse_duration("5h").unwrap(), Duration::from_secs(5 * 60 * 60)); assert_eq!(parse_duration("10 s").unwrap(), Duration::from_secs(10)); assert!(parse_duration("2x").is_err()); assert!(parse_duration("200").is_err()); - assert!(parse_duration("5h").is_err()); assert!(parse_duration("ms200").is_err()); assert!(parse_duration("-5s").is_err()); assert!(parse_duration("5.5s").is_err()); From c08d3afa71bd3f5b80575c751f8e56b485e520ce Mon Sep 17 00:00:00 2001 From: cchudant Date: Tue, 5 Nov 2024 11:46:33 +0000 Subject: [PATCH 6/9] fix(db): clear pending bouncer weights --- crates/client/db/src/block_db.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index ee06f0fba..79081b709 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -258,6 +258,7 @@ impl MadaraBackend { tx.delete_cf(&col, ROW_PENDING_INNER); tx.delete_cf(&col, ROW_PENDING_STATE_UPDATE); tx.delete_cf(&col, ROW_PENDING_SEGMENTS); + tx.delete_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS); let mut writeopts = WriteOptions::new(); writeopts.disable_wal(true); self.db.write_opt(tx, &writeopts)?; From 029e0b6c2c24c5e0f45292ffd67742b3a6136a16 Mon Sep 17 00:00:00 2001 From: cchudant Date: Tue, 19 Nov 2024 14:37:31 +0000 Subject: [PATCH 7/9] fix: review comments --- configs/chain_config.example.yaml | 2 +- crates/client/block_import/src/types.rs | 10 +-- .../block_production/src/close_block.rs | 4 +- .../src/finalize_execution_state.rs | 86 +++++++++---------- crates/client/block_production/src/lib.rs | 8 +- crates/client/db/src/block_db.rs | 6 +- crates/client/db/src/storage_updates.rs | 3 +- crates/primitives/block/src/lib.rs | 9 ++ .../chain_config/src/chain_config.rs | 2 +- 9 files changed, 62 insertions(+), 68 deletions(-) diff --git a/configs/chain_config.example.yaml b/configs/chain_config.example.yaml index 0235b019d..5e4d35143 100644 --- a/configs/chain_config.example.yaml +++ b/configs/chain_config.example.yaml @@ -72,7 +72,7 @@ sequencer_address: "0x0" # Transaction limit in the mempool. mempool_tx_limit: 10000 -# Transaction limit in the mempool, additionnal limit for declare transactions. +# Transaction limit in the mempool, additional limit for declare transactions. mempool_declare_tx_limit: 20 # Max age of a transaction in the mempool. mempool_tx_max_age: "5h" diff --git a/crates/client/block_import/src/types.rs b/crates/client/block_import/src/types.rs index 28e1e851c..230389906 100644 --- a/crates/client/block_import/src/types.rs +++ b/crates/client/block_import/src/types.rs @@ -3,7 +3,7 @@ use mp_block::{ header::{GasPrices, L1DataAvailabilityMode}, - Header, + Header, VisitedSegments, }; use mp_chain_config::StarknetVersion; use mp_class::{ @@ -149,7 +149,7 @@ pub struct UnverifiedPendingFullBlock { pub transactions: Vec, pub receipts: Vec, pub declared_classes: Vec, - pub visited_segments: Option)>>, + pub visited_segments: Option, } /// An unverified full block as input for the block import pipeline. @@ -166,7 +166,7 @@ pub struct UnverifiedFullBlock { #[serde(skip)] pub trusted_converted_classes: Vec, pub commitments: UnverifiedCommitments, - pub visited_segments: Option)>>, + pub visited_segments: Option, } // Pre-validate outputs. @@ -194,7 +194,7 @@ pub struct PreValidatedBlock { pub unverified_global_state_root: Option, pub unverified_block_hash: Option, pub unverified_block_number: Option, - pub visited_segments: Option)>>, + pub visited_segments: Option, } /// Output of the [`crate::pre_validate`] step. @@ -205,7 +205,7 @@ pub struct PreValidatedPendingBlock { pub state_diff: StateDiff, pub receipts: Vec, pub converted_classes: Vec, - pub visited_segments: Option)>>, + pub visited_segments: Option, } // Verify-apply output. diff --git a/crates/client/block_production/src/close_block.rs b/crates/client/block_production/src/close_block.rs index 9a64f8999..ffc520497 100644 --- a/crates/client/block_production/src/close_block.rs +++ b/crates/client/block_production/src/close_block.rs @@ -1,13 +1,11 @@ use mc_block_import::{ BlockImportError, BlockImportResult, BlockImporter, BlockValidationContext, UnverifiedFullBlock, UnverifiedHeader, }; -use mp_block::{header::PendingHeader, MadaraPendingBlock, MadaraPendingBlockInfo}; +use mp_block::{header::PendingHeader, MadaraPendingBlock, MadaraPendingBlockInfo, VisitedSegments}; use mp_class::ConvertedClass; use mp_state_update::StateDiff; use starknet_api::core::ChainId; -use crate::VisitedSegments; - /// Close the block (convert from pending to closed), and store to db. This is delegated to the block import module. #[tracing::instrument(skip(importer, state_diff, declared_classes), fields(module = "BlockProductionTask"))] pub async fn close_block( diff --git a/crates/client/block_production/src/finalize_execution_state.rs b/crates/client/block_production/src/finalize_execution_state.rs index c8cc55081..f882b7e99 100644 --- a/crates/client/block_production/src/finalize_execution_state.rs +++ b/crates/client/block_production/src/finalize_execution_state.rs @@ -1,5 +1,4 @@ -use std::collections::{hash_map, HashMap}; - +use crate::Error; use blockifier::{ blockifier::transaction_executor::{TransactionExecutor, BLOCK_STATE_ACCESS_ERR}, bouncer::BouncerWeights, @@ -8,63 +7,55 @@ use blockifier::{ }; use mc_db::{db_block_id::DbBlockId, MadaraBackend}; use mc_mempool::MempoolTransaction; +use mp_block::{VisitedSegmentEntry, VisitedSegments}; use mp_convert::ToFelt; use mp_state_update::{ ContractStorageDiffItem, DeclaredClassItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, StorageEntry, }; use starknet_api::core::{ClassHash, CompiledClassHash, ContractAddress, Nonce}; - -use crate::{Error, VisitedSegments}; +use std::collections::{hash_map, HashMap}; #[derive(Debug, thiserror::Error)] #[error("Error converting state diff to state map")] pub struct StateDiffToStateMapError; pub fn state_diff_to_state_map(diff: StateDiff) -> Result { - Ok(StateMaps { - nonces: diff - .nonces - .into_iter() - .map(|entry| { - Ok((entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, Nonce(entry.nonce))) - }) - .collect::>()?, - class_hashes: diff - .deployed_contracts - .into_iter() - .map(|entry| { - Ok((entry.address.try_into().map_err(|_| StateDiffToStateMapError)?, ClassHash(entry.class_hash))) - }) - .chain(diff.replaced_classes.into_iter().map(|entry| { + let nonces = diff + .nonces + .into_iter() + .map(|entry| Ok((entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, Nonce(entry.nonce)))) + .collect::>()?; + let class_hashes = diff + .deployed_contracts + .into_iter() + .map(|entry| Ok((entry.address.try_into().map_err(|_| StateDiffToStateMapError)?, ClassHash(entry.class_hash)))) + .chain(diff.replaced_classes.into_iter().map(|entry| { + Ok((entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, ClassHash(entry.class_hash))) + })) + .collect::>()?; + let storage = diff + .storage_diffs + .into_iter() + .flat_map(|d| { + d.storage_entries.into_iter().map(move |e| { Ok(( - entry.contract_address.try_into().map_err(|_| StateDiffToStateMapError)?, - ClassHash(entry.class_hash), + ( + d.address.try_into().map_err(|_| StateDiffToStateMapError)?, + e.key.try_into().map_err(|_| StateDiffToStateMapError)?, + ), + e.value, )) - })) - .collect::>()?, - storage: diff - .storage_diffs - .into_iter() - .flat_map(|d| { - d.storage_entries.into_iter().map(move |e| { - Ok(( - ( - d.address.try_into().map_err(|_| StateDiffToStateMapError)?, - e.key.try_into().map_err(|_| StateDiffToStateMapError)?, - ), - e.value, - )) - }) }) - .collect::>()?, - declared_contracts: diff.declared_classes.iter().map(|d| (ClassHash(d.class_hash), true)).collect(), - compiled_class_hashes: diff - .declared_classes - .into_iter() - .map(|d| (ClassHash(d.class_hash), CompiledClassHash(d.compiled_class_hash))) - .collect(), - }) + }) + .collect::>()?; + let declared_contracts = diff.declared_classes.iter().map(|d| (ClassHash(d.class_hash), true)).collect(); + let compiled_class_hashes = diff + .declared_classes + .into_iter() + .map(|d| (ClassHash(d.class_hash), CompiledClassHash(d.compiled_class_hash))) + .collect(); + Ok(StateMaps { nonces, class_hashes, storage, declared_contracts, compiled_class_hashes }) } fn state_map_to_state_diff( @@ -160,11 +151,14 @@ fn get_visited_segments(tx_executor: &mut TransactionExecutor .expect(BLOCK_STATE_ACCESS_ERR) .get_compiled_contract_class(*class_hash) .map_err(TransactionExecutionError::StateError)?; - Ok((class_hash.to_felt(), contract_class.get_visited_segments(class_visited_pcs)?)) + Ok(VisitedSegmentEntry { + class_hash: class_hash.to_felt(), + segments: contract_class.get_visited_segments(class_visited_pcs)?, + }) }) .collect::>()?; - Ok(visited_segments) + Ok(VisitedSegments(visited_segments)) } pub(crate) fn finalize_execution_state( diff --git a/crates/client/block_production/src/lib.rs b/crates/client/block_production/src/lib.rs index 34690ce44..0271b2506 100644 --- a/crates/client/block_production/src/lib.rs +++ b/crates/client/block_production/src/lib.rs @@ -28,7 +28,7 @@ use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::{BlockifierStateAdapter, ExecutionContext}; use mc_mempool::header::make_pending_header; use mc_mempool::{L1DataProvider, MempoolProvider}; -use mp_block::{BlockId, BlockTag, MadaraMaybePendingBlockInfo, MadaraPendingBlock}; +use mp_block::{BlockId, BlockTag, MadaraMaybePendingBlockInfo, MadaraPendingBlock, VisitedSegments}; use mp_class::compile::ClassCompilationError; use mp_class::{ConvertedClass, LegacyConvertedClass, SierraConvertedClass}; use mp_convert::ToFelt; @@ -49,8 +49,6 @@ mod close_block; mod finalize_execution_state; pub mod metrics; -pub type VisitedSegments = Vec<(Felt, Vec)>; - #[derive(Default, Clone)] struct ContinueBlockStats { /// Number of batches executed before reaching the bouncer capacity. @@ -105,10 +103,6 @@ impl BlockProductionTask { self.current_pending_tick = n; } - // #[tracing::instrument( - // skip(backend, importer, mempool, l1_data_provider, metrics), - // fields(module = "BlockProductionTask") - // )] pub fn new( backend: Arc, importer: Arc, diff --git a/crates/client/db/src/block_db.rs b/crates/client/db/src/block_db.rs index 79081b709..33bb6a395 100644 --- a/crates/client/db/src/block_db.rs +++ b/crates/client/db/src/block_db.rs @@ -6,7 +6,7 @@ use blockifier::bouncer::BouncerWeights; use mp_block::header::{GasPrices, PendingHeader}; use mp_block::{ BlockId, BlockTag, MadaraBlock, MadaraBlockInfo, MadaraBlockInner, MadaraMaybePendingBlock, - MadaraMaybePendingBlockInfo, MadaraPendingBlock, MadaraPendingBlockInfo, + MadaraMaybePendingBlockInfo, MadaraPendingBlock, MadaraPendingBlockInfo, VisitedSegments, }; use mp_state_update::StateDiff; use rocksdb::WriteOptions; @@ -30,8 +30,6 @@ const ROW_PENDING_INNER: &[u8] = b"pending"; const ROW_SYNC_TIP: &[u8] = b"sync_tip"; const ROW_L1_LAST_CONFIRMED_BLOCK: &[u8] = b"l1_last"; -pub type VisitedSegments = Vec<(Felt, Vec)>; - #[derive(Debug, PartialEq, Eq)] pub struct TxIndex(pub u64); @@ -230,7 +228,7 @@ impl MadaraBackend { &self, block: &MadaraPendingBlock, state_update: &StateDiff, - visited_segments: Option)>>, + visited_segments: Option, bouncer_weights: Option, ) -> Result<()> { let mut tx = WriteBatchWithTransaction::default(); diff --git a/crates/client/db/src/storage_updates.rs b/crates/client/db/src/storage_updates.rs index 367c8a9ef..624a327be 100644 --- a/crates/client/db/src/storage_updates.rs +++ b/crates/client/db/src/storage_updates.rs @@ -1,6 +1,7 @@ use crate::MadaraBackend; use crate::MadaraStorageError; use blockifier::bouncer::BouncerWeights; +use mp_block::VisitedSegments; use mp_block::{MadaraBlock, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo, MadaraPendingBlock}; use mp_class::ConvertedClass; use mp_state_update::{ @@ -16,7 +17,7 @@ impl MadaraBackend { block: MadaraMaybePendingBlock, state_diff: StateDiff, converted_classes: Vec, - visited_segments: Option)>>, + visited_segments: Option, bouncer_weights: Option, ) -> Result<(), MadaraStorageError> { let block_n = block.info.block_n(); diff --git a/crates/primitives/block/src/lib.rs b/crates/primitives/block/src/lib.rs index 354109bab..16317c018 100644 --- a/crates/primitives/block/src/lib.rs +++ b/crates/primitives/block/src/lib.rs @@ -341,6 +341,15 @@ impl From for MadaraMaybePendingBlock { } } +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct VisitedSegments(pub Vec); + +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct VisitedSegmentEntry { + pub class_hash: Felt, + pub segments: Vec, +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/chain_config/src/chain_config.rs b/crates/primitives/chain_config/src/chain_config.rs index 35aa8a391..043649077 100644 --- a/crates/primitives/chain_config/src/chain_config.rs +++ b/crates/primitives/chain_config/src/chain_config.rs @@ -133,7 +133,7 @@ pub struct ChainConfig { /// Transaction limit in the mempool. pub mempool_tx_limit: usize, - /// Transaction limit in the mempool, we have an additionnal limit for declare transactions. + /// Transaction limit in the mempool, we have an additional limit for declare transactions. pub mempool_declare_tx_limit: usize, /// Max age of a transaction in the mempool. #[serde(deserialize_with = "deserialize_duration")] From 002870d51962c75300dd9c6212edb34fe18b0a12 Mon Sep 17 00:00:00 2001 From: cchudant Date: Mon, 2 Dec 2024 12:04:44 +0000 Subject: [PATCH 8/9] fix: revert starknet-types-core patch removal --- .../src/finalize_execution_state.rs | 2 -- crates/client/block_production/src/lib.rs | 9 ++------ crates/client/db/src/mempool_db.rs | 22 ++++++++----------- crates/client/devnet/src/lib.rs | 10 ++++----- crates/client/exec/src/lib.rs | 2 +- crates/client/exec/src/transaction.rs | 11 +++++----- .../mempool/src/inner/deployed_contracts.rs | 1 + crates/client/mempool/src/inner/limits.rs | 15 ++++++------- crates/client/mempool/src/inner/mod.rs | 8 +------ .../client/mempool/src/inner/nonce_chain.rs | 2 ++ crates/client/mempool/src/lib.rs | 5 ++--- crates/client/rpc/src/utils/mod.rs | 2 +- crates/client/rpc/src/utils/transaction.rs | 2 +- .../methods/trace/trace_block_transactions.rs | 4 ++-- .../v0_7_1/methods/trace/trace_transaction.rs | 4 ++-- crates/primitives/block/src/lib.rs | 2 ++ .../src/broadcasted_to_blockifier.rs | 2 -- 17 files changed, 43 insertions(+), 60 deletions(-) diff --git a/crates/client/block_production/src/finalize_execution_state.rs b/crates/client/block_production/src/finalize_execution_state.rs index 0a3ad7e9d..601933e30 100644 --- a/crates/client/block_production/src/finalize_execution_state.rs +++ b/crates/client/block_production/src/finalize_execution_state.rs @@ -6,7 +6,6 @@ use blockifier::{ transaction::errors::TransactionExecutionError, }; use mc_db::{db_block_id::DbBlockId, MadaraBackend}; -use mc_mempool::MempoolTransaction; use mp_block::{VisitedSegmentEntry, VisitedSegments}; use mp_convert::ToFelt; use mp_state_update::{ @@ -162,7 +161,6 @@ fn get_visited_segments(tx_executor: &mut TransactionExecutor } pub(crate) fn finalize_execution_state( - _executed_txs: &[MempoolTransaction], tx_executor: &mut TransactionExecutor, backend: &MadaraBackend, on_top_of: &Option, diff --git a/crates/client/block_production/src/lib.rs b/crates/client/block_production/src/lib.rs index f3ecb726f..b68c67770 100644 --- a/crates/client/block_production/src/lib.rs +++ b/crates/client/block_production/src/lib.rs @@ -289,13 +289,8 @@ impl BlockProductionTask { let on_top_of = self.executor.block_state.as_ref().expect(BLOCK_STATE_ACCESS_ERR).state.on_top_of_block_id; - // TODO: save visited segments for SNOS. - let (state_diff, visited_segments, bouncer_weights) = finalize_execution_state::finalize_execution_state( - &executed_txs, - &mut self.executor, - &self.backend, - &on_top_of, - )?; + let (state_diff, visited_segments, bouncer_weights) = + finalize_execution_state::finalize_execution_state(&mut self.executor, &self.backend, &on_top_of)?; // Add back the unexecuted transactions to the mempool. stats.n_re_added_to_mempool = txs_to_process.len(); diff --git a/crates/client/db/src/mempool_db.rs b/crates/client/db/src/mempool_db.rs index d55dbbabd..a94f6a0c0 100644 --- a/crates/client/db/src/mempool_db.rs +++ b/crates/client/db/src/mempool_db.rs @@ -29,21 +29,17 @@ struct TransactionWithConvertedClass { impl MadaraBackend { #[tracing::instrument(skip(self), fields(module = "MempoolDB"))] - pub fn get_mempool_transactions(&self) -> Result)>> { + pub fn get_mempool_transactions( + &self, + ) -> impl Iterator)>> + '_ { let col = self.db.get_column(Column::MempoolTransactions); - let txs = self - .db - .iterator_cf(&col, IteratorMode::Start) - .map(|kv| { - let (k, v) = kv?; - let hash: Felt = bincode::deserialize(&k)?; - let tx: TransactionWithConvertedClass = bincode::deserialize(&v)?; + self.db.iterator_cf(&col, IteratorMode::Start).map(|kv| { + let (k, v) = kv?; + let hash: Felt = bincode::deserialize(&k)?; + let tx: TransactionWithConvertedClass = bincode::deserialize(&v)?; - Result::<_>::Ok((hash, tx.tx, tx.converted_class)) - }) - .collect::, _>>()?; - tracing::debug!("get_mempool_txs {:?}", txs.iter().map(|(hash, _, _)| hash).collect::>()); - Ok(txs) + Result::<_>::Ok((hash, tx.tx, tx.converted_class)) + }) } #[tracing::instrument(skip(self), fields(module = "MempoolDB"))] diff --git a/crates/client/devnet/src/lib.rs b/crates/client/devnet/src/lib.rs index e9d868289..3d8560dc8 100644 --- a/crates/client/devnet/src/lib.rs +++ b/crates/client/devnet/src/lib.rs @@ -309,7 +309,7 @@ mod tests { let backend = MadaraBackend::open_for_testing(Arc::clone(&chain_config)); let importer = Arc::new(BlockImporter::new(Arc::clone(&backend), None, true).unwrap()); - println!("{:?}", block.state_diff); + tracing::debug!("{:?}", block.state_diff); tokio::runtime::Runtime::new() .unwrap() .block_on( @@ -349,7 +349,7 @@ mod tests { #[rstest] #[case(m_cairo_test_contracts::TEST_CONTRACT_SIERRA)] fn test_erc_20_declare(mut chain: DevnetForTesting, #[case] contract: &[u8]) { - println!("{}", chain.contracts); + tracing::info!("{}", chain.contracts); let sender_address = &chain.contracts.0[0]; @@ -509,7 +509,7 @@ mod tests { #[case(9_999u128 * STRK_FRI_DECIMALS, false)] #[case(10_001u128 * STRK_FRI_DECIMALS, true)] fn test_basic_transfer(mut chain: DevnetForTesting, #[case] transfer_amount: u128, #[case] expect_reverted: bool) { - println!("{}", chain.contracts); + tracing::info!("{}", chain.contracts); let sequencer_address = chain.backend.chain_config().sequencer_address.to_felt(); let contract_0 = &chain.contracts.0[0]; @@ -645,7 +645,7 @@ mod tests { max_declare_transactions: 2, max_transactions: 5, }); - println!("{}", chain.contracts); + tracing::info!("{}", chain.contracts); let contract_0 = &chain.contracts.0[0]; let contract_1 = &chain.contracts.0[1]; @@ -721,7 +721,7 @@ mod tests { let max_age = Duration::from_millis(1000); let mut chain = chain_with_mempool_limits(MempoolLimits { max_age, max_declare_transactions: 2, max_transactions: 5 }); - println!("{}", chain.contracts); + tracing::info!("{}", chain.contracts); let contract_0 = &chain.contracts.0[0]; let contract_1 = &chain.contracts.0[1]; diff --git a/crates/client/exec/src/lib.rs b/crates/client/exec/src/lib.rs index 8581156c2..05b4ba805 100644 --- a/crates/client/exec/src/lib.rs +++ b/crates/client/exec/src/lib.rs @@ -16,7 +16,7 @@ mod call; pub mod execution; mod fee; mod trace; -// pub mod transaction; +pub mod transaction; pub use block_context::ExecutionContext; pub use blockifier_state_adapter::BlockifierStateAdapter; diff --git a/crates/client/exec/src/transaction.rs b/crates/client/exec/src/transaction.rs index 744921c0d..0f3522060 100644 --- a/crates/client/exec/src/transaction.rs +++ b/crates/client/exec/src/transaction.rs @@ -20,7 +20,6 @@ pub enum Error { ContractClassError(#[from] ContractClassError), #[error("{0}")] Internal(Cow<'static, str>), - TransactionExecutionError(#[from] ) } /// Convert an starknet-api Transaction to a blockifier Transaction @@ -33,8 +32,9 @@ pub fn to_blockifier_transactions( transaction: mp_transactions::Transaction, tx_hash: &TransactionHash, ) -> Result { - let transaction: Transaction = - transaction.try_into().map_err(|_| Error("Converting to starknet api transaction".into()))?; + let transaction: Transaction = transaction + .try_into() + .map_err(|err| Error::Internal(format!("Converting to starknet api transaction {:#}", err).into()))?; let paid_fee_on_l1 = match transaction { Transaction::L1Handler(_) => Some(starknet_api::transaction::Fee(1_000_000_000_000)), @@ -72,8 +72,7 @@ pub fn to_blockifier_transactions( _ => None, }; - btx::Transaction::from_api(transaction.clone(), *tx_hash, class_info, paid_fee_on_l1, None, false).map_err(|_| { - tracing::error!("Failed to convert transaction to blockifier transaction"); - StarknetRpcApiError::InternalServerError + btx::Transaction::from_api(transaction.clone(), *tx_hash, class_info, paid_fee_on_l1, None, false).map_err(|err| { + Error::Internal(format!("Failed to convert transaction to blockifier transaction {:#}", err).into()) }) } diff --git a/crates/client/mempool/src/inner/deployed_contracts.rs b/crates/client/mempool/src/inner/deployed_contracts.rs index e9f307394..546d94566 100644 --- a/crates/client/mempool/src/inner/deployed_contracts.rs +++ b/crates/client/mempool/src/inner/deployed_contracts.rs @@ -12,6 +12,7 @@ impl DeployedContracts { hash_map::Entry::Occupied(mut entry) => { *entry.get_mut() -= 1; if entry.get() == &0 { + // Count is now 0, we can delete the entry. entry.remove(); } } diff --git a/crates/client/mempool/src/inner/limits.rs b/crates/client/mempool/src/inner/limits.rs index 845439839..4749a0caa 100644 --- a/crates/client/mempool/src/inner/limits.rs +++ b/crates/client/mempool/src/inner/limits.rs @@ -60,7 +60,7 @@ pub(crate) struct TransactionCheckedLimits { impl TransactionCheckedLimits { // Returns which limits apply for this transaction. - // This struct is also used to update the limits after insretion, without having to keep a clone of the transaction around. + // This struct is also used to update the limits after insertion, without having to keep a clone of the transaction around. // We can add more limits here as needed :) pub fn limits_for(tx: &MempoolTransaction) -> Self { match tx.tx.tx_type() { @@ -82,8 +82,10 @@ impl TransactionCheckedLimits { check_age: true, tx_arrived_at: tx.arrived_at, }, + // L1 handler transactions are transactions added into the L1 core contract. We don't want to miss + // any of those if possible. TransactionType::L1Handler => TransactionCheckedLimits { - check_tx_limit: true, + check_tx_limit: false, check_declare_limit: false, check_age: false, tx_arrived_at: tx.arrived_at, @@ -128,9 +130,8 @@ impl MempoolLimiter { } pub fn update_tx_limits(&mut self, limits: &TransactionCheckedLimits) { - if limits.check_tx_limit { - self.current_transactions += 1; - } + // We want all transactions to count toward the limit, not just those where the limit is checked. + self.current_transactions += 1; if limits.check_declare_limit { self.current_declare_transactions += 1; } @@ -138,9 +139,7 @@ impl MempoolLimiter { pub fn mark_removed(&mut self, to_update: &TransactionCheckedLimits) { // These should not overflow unless block prod marks transactions as consumed even though they have not been popped. - if to_update.check_tx_limit { - self.current_transactions -= 1; - } + self.current_transactions -= 1; if to_update.check_declare_limit { self.current_declare_transactions -= 1; } diff --git a/crates/client/mempool/src/inner/mod.rs b/crates/client/mempool/src/inner/mod.rs index 773ea75fa..63d4a3a45 100644 --- a/crates/client/mempool/src/inner/mod.rs +++ b/crates/client/mempool/src/inner/mod.rs @@ -24,18 +24,12 @@ mod tx; pub use limits::*; pub use tx::*; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] struct AccountOrderedByTimestamp { contract_addr: Felt, timestamp: ArrivedAtTimestamp, } -impl PartialEq for AccountOrderedByTimestamp { - fn eq(&self, other: &Self) -> bool { - self.cmp(other).is_eq() - } -} -impl Eq for AccountOrderedByTimestamp {} impl Ord for AccountOrderedByTimestamp { fn cmp(&self, other: &Self) -> cmp::Ordering { // Important: Fallback on contract addr here. diff --git a/crates/client/mempool/src/inner/nonce_chain.rs b/crates/client/mempool/src/inner/nonce_chain.rs index 4f48b3ec3..501d4cfbd 100644 --- a/crates/client/mempool/src/inner/nonce_chain.rs +++ b/crates/client/mempool/src/inner/nonce_chain.rs @@ -30,6 +30,8 @@ impl PartialOrd for OrderMempoolTransactionByNonce { #[derive(Debug)] pub struct NonceChain { /// Use a BTreeMap to so that we can use the entry api. + // TODO(perf): to avoid some double lookups here, we should remove the `OrderMempoolTransactionByNonce` struct + // and make this a BTreeMap pub(crate) transactions: BTreeMap, pub(crate) front_arrived_at: ArrivedAtTimestamp, pub(crate) front_nonce: Nonce, diff --git a/crates/client/mempool/src/lib.rs b/crates/client/mempool/src/lib.rs index 2094c82ae..131075138 100644 --- a/crates/client/mempool/src/lib.rs +++ b/crates/client/mempool/src/lib.rs @@ -109,9 +109,8 @@ impl Mempool { } pub fn load_txs_from_db(&mut self) -> Result<(), anyhow::Error> { - for (tx_hash, saved_tx, converted_class) in - self.backend.get_mempool_transactions().context("Getting mempool transactions")? - { + for res in self.backend.get_mempool_transactions() { + let (tx_hash, saved_tx, converted_class) = res.context("Getting mempool transactions")?; let (tx, arrived_at) = saved_to_blockifier_tx(saved_tx, tx_hash, &converted_class) .context("Converting saved tx to blockifier")?; diff --git a/crates/client/rpc/src/utils/mod.rs b/crates/client/rpc/src/utils/mod.rs index 8e337869d..6326dd7d3 100644 --- a/crates/client/rpc/src/utils/mod.rs +++ b/crates/client/rpc/src/utils/mod.rs @@ -3,7 +3,7 @@ pub(crate) mod transaction; use std::fmt; use crate::StarknetRpcApiError; -pub use transaction::to_blockifier_transactions; +pub use transaction::to_blockifier_transaction; pub fn display_internal_server_error(err: impl fmt::Display) { tracing::error!(target: "rpc_errors", "{:#}", err); diff --git a/crates/client/rpc/src/utils/transaction.rs b/crates/client/rpc/src/utils/transaction.rs index ef4e5ef97..bbc7c114f 100644 --- a/crates/client/rpc/src/utils/transaction.rs +++ b/crates/client/rpc/src/utils/transaction.rs @@ -13,7 +13,7 @@ use crate::errors::{StarknetRpcApiError, StarknetRpcResult}; /// /// **note:** this function does not support deploy transaction /// because it is not supported by blockifier -pub fn to_blockifier_transactions( +pub fn to_blockifier_transaction( backend: Arc, block_id: BlockId, transaction: mp_transactions::Transaction, diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_block_transactions.rs b/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_block_transactions.rs index cae8bc41a..629150d8d 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_block_transactions.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_block_transactions.rs @@ -1,6 +1,6 @@ use super::trace_transaction::FALLBACK_TO_SEQUENCER_WHEN_VERSION_BELOW; use crate::errors::{StarknetRpcApiError, StarknetRpcResult}; -use crate::utils::transaction::to_blockifier_transactions; +use crate::utils::transaction::to_blockifier_transaction; use crate::utils::ResultExt; use crate::Starknet; use mc_exec::{execution_result_to_tx_trace, ExecutionContext}; @@ -28,7 +28,7 @@ pub async fn trace_block_transactions( .into_iter() .zip(block.info.tx_hashes()) .map(|(tx, hash)| { - to_blockifier_transactions(starknet.clone_backend(), block_id.clone(), tx, &TransactionHash(*hash)) + to_blockifier_transaction(starknet.clone_backend(), block_id.clone(), tx, &TransactionHash(*hash)) }) .collect::>()?; diff --git a/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_transaction.rs b/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_transaction.rs index 07a9d9d6d..d134bc9d2 100644 --- a/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_transaction.rs +++ b/crates/client/rpc/src/versions/v0_7_1/methods/trace/trace_transaction.rs @@ -1,6 +1,6 @@ use crate::errors::StarknetRpcApiError; use crate::errors::StarknetRpcResult; -use crate::utils::transaction::to_blockifier_transactions; +use crate::utils::transaction::to_blockifier_transaction; use crate::utils::{OptionExt, ResultExt}; use crate::Starknet; use mc_exec::execution_result_to_tx_trace; @@ -32,7 +32,7 @@ pub async fn trace_transaction( let mut block_txs = Iterator::zip(block.inner.transactions.into_iter(), block.info.tx_hashes()).map(|(tx, hash)| { - to_blockifier_transactions(starknet.clone_backend(), block.info.as_block_id(), tx, &TransactionHash(*hash)) + to_blockifier_transaction(starknet.clone_backend(), block.info.as_block_id(), tx, &TransactionHash(*hash)) }); // takes up until not including last tx diff --git a/crates/primitives/block/src/lib.rs b/crates/primitives/block/src/lib.rs index 828d12ceb..dc64fb828 100644 --- a/crates/primitives/block/src/lib.rs +++ b/crates/primitives/block/src/lib.rs @@ -274,6 +274,8 @@ impl From for MadaraMaybePendingBlock { } } +/// Visited segments are the class segments that are visited during the execution of the block. +/// This info is an input of SNOS and used for proving. #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct VisitedSegments(pub Vec); diff --git a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs index f02b36d6a..3b66e78e2 100644 --- a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs +++ b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs @@ -144,8 +144,6 @@ pub enum BroadcastedToBlockifierError { ConvertTxBlockifierError(#[from] TransactionExecutionError), #[error("Failed to convert contract class: {0}")] ConvertContractClassError(#[from] ContractClassError), - // #[error("Declare legacy contract classes are not supported")] - // LegacyContractClassesNotSupported, #[error("Compiled class hash mismatch: expected {expected}, actual {compilation}")] CompiledClassHashMismatch { expected: Felt, compilation: Felt }, } From d0b0ec8538a5914287d53e8fd80cfbb7590b406d Mon Sep 17 00:00:00 2001 From: jbcaron Date: Sat, 7 Dec 2024 23:30:04 +0100 Subject: [PATCH 9/9] fix: CI --- crates/client/eth/src/client.rs | 3 +- crates/client/eth/src/l1_gas_price.rs | 44 ++++----------------------- 2 files changed, 8 insertions(+), 39 deletions(-) diff --git a/crates/client/eth/src/client.rs b/crates/client/eth/src/client.rs index 8b40c9726..21c20a5b1 100644 --- a/crates/client/eth/src/client.rs +++ b/crates/client/eth/src/client.rs @@ -210,12 +210,13 @@ pub mod eth_client_getter_test { AnvilPortNum(guard.next.next().expect("no more port to use")) } - fn create_anvil_instance() -> AnvilInstance { + pub fn create_anvil_instance() -> AnvilInstance { let port = get_port(); let anvil = Anvil::new() .fork(FORK_URL.clone()) .fork_block_number(L1_BLOCK_NUMBER) .port(port.0) + .timeout(20_000) .try_spawn() .expect("failed to spawn anvil instance"); println!("Anvil started and running at `{}`", anvil.endpoint()); diff --git a/crates/client/eth/src/l1_gas_price.rs b/crates/client/eth/src/l1_gas_price.rs index 33a7f7a9d..e6ca2b321 100644 --- a/crates/client/eth/src/l1_gas_price.rs +++ b/crates/client/eth/src/l1_gas_price.rs @@ -99,30 +99,18 @@ async fn update_l1_block_metrics(eth_client: &EthereumClient, l1_gas_provider: G #[cfg(test)] mod eth_client_gas_price_worker_test { use super::*; - use crate::client::eth_client_getter_test::create_ethereum_client; - use alloy::node_bindings::Anvil; + use crate::client::eth_client_getter_test::{create_anvil_instance, create_ethereum_client}; use httpmock::{MockServer, Regex}; use mc_mempool::GasPriceProvider; use serial_test::serial; use std::time::SystemTime; use tokio::task::JoinHandle; use tokio::time::{timeout, Duration}; - const ANOTHER_ANVIL_PORT: u16 = 8546; - const L1_BLOCK_NUMBER: u64 = 20395662; - - lazy_static::lazy_static! { - static ref FORK_URL: String = std::env::var("ETH_FORK_URL").expect("ETH_FORK_URL not set"); - } #[serial] #[tokio::test] async fn gas_price_worker_when_infinite_loop_true_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); @@ -166,12 +154,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn gas_price_worker_when_infinite_loop_false_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); @@ -190,12 +173,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn gas_price_worker_when_gas_price_fix_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); l1_gas_provider.update_eth_l1_gas_price(20); @@ -216,12 +194,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn gas_price_worker_when_data_gas_price_fix_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new(); l1_gas_provider.update_eth_l1_data_gas_price(20); @@ -306,12 +279,7 @@ mod eth_client_gas_price_worker_test { #[serial] #[tokio::test] async fn update_gas_price_works() { - let anvil = Anvil::new() - .fork(FORK_URL.clone()) - .fork_block_number(L1_BLOCK_NUMBER) - .port(ANOTHER_ANVIL_PORT) - .try_spawn() - .expect("issue while forking for the anvil"); + let anvil = create_anvil_instance(); let eth_client = create_ethereum_client(Some(anvil.endpoint().as_str())); let l1_gas_provider = GasPriceProvider::new();