diff --git a/Cargo.lock b/Cargo.lock index 1366316eb2..1530e2a290 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4464,11 +4464,12 @@ dependencies = [ "ics23", "masp_primitives", "namada_core", + "namada_gas", "namada_governance", "namada_parameters", "namada_state", "namada_storage", - "namada_trans_token", + "namada_token", "primitive-types", "proptest", "prost 0.12.3", diff --git a/crates/apps/src/lib/bench_utils.rs b/crates/apps/src/lib/bench_utils.rs index 9a242a920c..d02951e83b 100644 --- a/crates/apps/src/lib/bench_utils.rs +++ b/crates/apps/src/lib/bench_utils.rs @@ -1,6 +1,7 @@ //! Library code for benchmarks provides a wrapper of the ledger's shell //! `BenchShell` and helper functions to generate transactions. +use std::cell::RefCell; use std::collections::BTreeSet; use std::fs::{File, OpenOptions}; use std::io::{Read, Write}; @@ -78,6 +79,7 @@ use namada::{proof_of_stake, tendermint}; use namada_sdk::masp::{ self, ShieldedContext, ShieldedTransfer, ShieldedUtils, }; +use namada_sdk::storage::StorageWrite; pub use namada_sdk::tx::{ TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, TX_CHANGE_COMMISSION_WASM as TX_CHANGE_VALIDATOR_COMMISSION_WASM, @@ -224,7 +226,7 @@ impl Default for BenchShell { source: Some(defaults::albert_address()), }; let params = - proof_of_stake::storage::read_pos_params(&bench_shell.wl_storage) + proof_of_stake::storage::read_pos_params(&bench_shell.state) .unwrap(); let signed_tx = bench_shell.generate_tx( TX_BOND_WASM, @@ -235,7 +237,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx); - bench_shell.wl_storage.commit_tx(); + bench_shell.state.commit_tx(); // Initialize governance proposal let content_section = Section::ExtraData(Code::new( @@ -261,7 +263,7 @@ impl Default for BenchShell { ); bench_shell.execute_tx(&signed_tx); - bench_shell.wl_storage.commit_tx(); + bench_shell.state.commit_tx(); bench_shell.commit_block(); // Advance epoch for pos benches @@ -270,7 +272,7 @@ impl Default for BenchShell { } // Must start after current epoch debug_assert_eq!( - bench_shell.wl_storage.get_block_epoch().unwrap().next(), + bench_shell.state.get_block_epoch().unwrap().next(), voting_start_epoch ); @@ -386,10 +388,11 @@ impl BenchShell { } pub fn execute_tx(&mut self, tx: &Tx) { + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(u64::MAX.into())); run::tx( - &self.inner.wl_storage.storage, - &mut self.inner.wl_storage.write_log, - &mut TxGasMeter::new_from_sub_limit(u64::MAX.into()), + &mut self.inner.state, + &gas_meter, &TxIndex(0), tx, &mut self.inner.vp_wasm_cache, @@ -400,31 +403,29 @@ impl BenchShell { pub fn advance_epoch(&mut self) { let params = - proof_of_stake::storage::read_pos_params(&self.inner.wl_storage) + proof_of_stake::storage::read_pos_params(&self.inner.state) .unwrap(); - self.wl_storage.storage.block.epoch = - self.wl_storage.storage.block.epoch.next(); - let current_epoch = self.wl_storage.storage.block.epoch; + self.state.in_mem_mut().block.epoch = + self.state.in_mem().block.epoch.next(); + let current_epoch = self.state.in_mem().block.epoch; proof_of_stake::validator_set_update::copy_validator_sets_and_positions( - &mut self.wl_storage, + &mut self.state, ¶ms, current_epoch, current_epoch + params.pipeline_len, ) .unwrap(); - namada::token::conversion::update_allowed_conversions( - &mut self.wl_storage, - ) - .unwrap(); + namada::token::conversion::update_allowed_conversions(&mut self.state) + .unwrap(); } pub fn init_ibc_client_state(&mut self, addr_key: Key) -> ClientId { // Set a dummy header - self.wl_storage - .storage + self.state + .in_mem_mut() .set_header(get_dummy_header()) .unwrap(); // Set client state @@ -453,8 +454,7 @@ impl BenchShell { .unwrap() .into(); let bytes = >::encode_vec(client_state); - self.wl_storage - .storage + self.state .write(&client_state_key, bytes) .expect("write failed"); @@ -480,10 +480,7 @@ impl BenchShell { let bytes = >::encode_vec(consensus_state); - self.wl_storage - .storage - .write(&consensus_key, bytes) - .unwrap(); + self.state.write(&consensus_key, bytes).unwrap(); client_id } @@ -509,8 +506,7 @@ impl BenchShell { .unwrap(); let connection_key = connection_key(&NamadaConnectionId::new(1)); - self.wl_storage - .storage + self.state .write(&connection_key, connection.encode_vec()) .unwrap(); @@ -519,18 +515,11 @@ impl BenchShell { let index_key = addr_key .join(&Key::from("capabilities/index".to_string().to_db_key())); - self.wl_storage - .storage - .write(&index_key, 1u64.to_be_bytes()) - .unwrap(); - self.wl_storage - .storage - .write(&port_key, 1u64.to_be_bytes()) - .unwrap(); + self.state.write(&index_key, 1u64.to_be_bytes()).unwrap(); + self.state.write(&port_key, 1u64.to_be_bytes()).unwrap(); let cap_key = addr_key.join(&Key::from("capabilities/1".to_string().to_db_key())); - self.wl_storage - .storage + self.state .write(&cap_key, PortId::transfer().as_bytes()) .unwrap(); @@ -555,8 +544,7 @@ impl BenchShell { .unwrap(); let channel_key = channel_key(&NamadaPortId::transfer(), &NamadaChannelId::new(5)); - self.wl_storage - .storage + self.state .write(&channel_key, channel.encode_vec()) .unwrap(); } @@ -564,13 +552,11 @@ impl BenchShell { // Update the block height in state to guarantee a valid response to the // client queries pub fn commit_block(&mut self) { + let last_height = self.inner.state.in_mem().get_last_block_height(); self.inner - .wl_storage - .storage - .begin_block( - Hash::default().into(), - self.inner.wl_storage.storage.get_last_block_height() + 1, - ) + .state + .in_mem_mut() + .begin_block(Hash::default().into(), last_height + 1) .unwrap(); self.inner.commit(); @@ -580,8 +566,8 @@ impl BenchShell { // client queries pub fn commit_masp_tx(&mut self, masp_tx: Tx) { self.last_block_masp_txs - .push((masp_tx, self.wl_storage.write_log.get_keys())); - self.wl_storage.commit_tx(); + .push((masp_tx, self.state.write_log().get_keys())); + self.state.commit_tx(); } } @@ -733,7 +719,7 @@ impl Client for BenchShell { }; let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: &self.state, event_log: self.event_log(), vp_wasm_cache: self.vp_wasm_cache.read_only(), tx_wasm_cache: self.tx_wasm_cache.read_only(), @@ -774,13 +760,12 @@ impl Client for BenchShell { // Given the way we setup and run benchmarks, the masp transactions can // only present in the last block, we can mock the previous // responses with an empty set of transactions - let last_block_txs = if height - == self.inner.wl_storage.storage.get_last_block_height() - { - self.last_block_masp_txs.clone() - } else { - vec![] - }; + let last_block_txs = + if height == self.inner.state.in_mem().get_last_block_height() { + self.last_block_masp_txs.clone() + } else { + vec![] + }; Ok(tendermint_rpc::endpoint::block::Response { block_id: tendermint::block::Id { hash: tendermint::Hash::None, @@ -839,7 +824,7 @@ impl Client for BenchShell { // We can expect all the masp tranfers to have happened only in the last // block let end_block_events = if height.value() - == self.inner.wl_storage.storage.get_last_block_height().0 + == self.inner.state.in_mem().get_last_block_height().0 { Some( self.last_block_masp_txs @@ -991,7 +976,7 @@ impl BenchShieldedCtx { &[], )) .unwrap(); - let native_token = self.shell.wl_storage.storage.native_token.clone(); + let native_token = self.shell.state.in_mem().native_token.clone(); let namada = NamadaImpl::native_new( self.shell, self.wallet, diff --git a/crates/apps/src/lib/node/ledger/mod.rs b/crates/apps/src/lib/node/ledger/mod.rs index 5b531aa11a..5c21f687e5 100644 --- a/crates/apps/src/lib/node/ledger/mod.rs +++ b/crates/apps/src/lib/node/ledger/mod.rs @@ -18,7 +18,8 @@ use namada::core::storage::Key; use namada::core::time::DateTimeUtc; use namada::eth_bridge::ethers::providers::{Http, Provider}; use namada::governance::storage::keys as governance_storage; -use namada_sdk::tendermint::abci::request::CheckTxKind; +use namada::tendermint::abci::request::CheckTxKind; +use namada_sdk::state::StateRead; use once_cell::unsync::Lazy; use sysinfo::{RefreshKind, System, SystemExt}; use tokio::sync::mpsc; @@ -66,16 +67,15 @@ const ENV_VAR_RAYON_THREADS: &str = "NAMADA_RAYON_THREADS"; impl Shell { fn load_proposals(&mut self) { let proposals_key = governance_storage::get_commiting_proposals_prefix( - self.wl_storage.storage.last_epoch.0, + self.state.in_mem().last_epoch.0, ); - let (proposal_iter, _) = - self.wl_storage.storage.iter_prefix(&proposals_key); + let (proposal_iter, _) = self.state.db_iter_prefix(&proposals_key); for (key, _, _) in proposal_iter { let key = Key::from_str(key.as_str()).expect("Key should be parsable"); if governance_storage::get_commit_proposal_epoch(&key).unwrap() - != self.wl_storage.storage.last_epoch.0 + != self.state.in_mem().last_epoch.0 { // NOTE: `iter_prefix` iterate over the matching prefix. In this // case a proposal with grace_epoch 110 will be diff --git a/crates/apps/src/lib/node/ledger/shell/block_alloc.rs b/crates/apps/src/lib/node/ledger/shell/block_alloc.rs index 1fa09fa870..09bb6d7847 100644 --- a/crates/apps/src/lib/node/ledger/shell/block_alloc.rs +++ b/crates/apps/src/lib/node/ledger/shell/block_alloc.rs @@ -56,7 +56,7 @@ pub mod states; use std::marker::PhantomData; use namada::proof_of_stake::pos_queries::PosQueries; -use namada::state::{self, WlStorage}; +use namada::state::{self, WlState}; #[allow(unused_imports)] use crate::facade::tendermint_proto::abci::RequestPrepareProposal; @@ -141,14 +141,14 @@ pub struct BlockAllocator { decrypted_txs: TxBin, } -impl From<&WlStorage> +impl From<&WlState> for BlockAllocator> where D: 'static + state::DB + for<'iter> state::DBIter<'iter>, H: 'static + state::StorageHasher, { #[inline] - fn from(storage: &WlStorage) -> Self { + fn from(storage: &WlState) -> Self { Self::init( storage.pos_queries().get_max_proposal_bytes().get(), namada::parameters::get_max_block_gas(storage).unwrap(), diff --git a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs index 8fc12578ea..2162b0cfbb 100644 --- a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1,5 +1,7 @@ //! Implementation of the `FinalizeBlock` ABCI++ method for the Shell +use std::cell::RefCell; + use data_encoding::HEXUPPER; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; @@ -14,7 +16,6 @@ use namada::proof_of_stake; use namada::proof_of_stake::storage::{ find_validator_by_raw_hash, write_last_block_proposer_address, }; -use namada::state::wl_storage::WriteLogAndStorage; use namada::state::write_log::StorageModification; use namada::state::{ ResultExt, StorageRead, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY, @@ -62,9 +63,9 @@ where // Begin the new block and check if a new epoch has begun let (height, new_epoch) = self.update_state(req.header, req.hash); - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); let update_for_tendermint = matches!( - self.wl_storage.storage.update_epoch_blocks_delay, + self.state.in_mem().update_epoch_blocks_delay, Some(EPOCH_SWITCH_BLOCKS_DELAY) ); @@ -75,32 +76,33 @@ where tracing::debug!( "New epoch block delay for updating the Tendermint validator set: \ {:?}", - self.wl_storage.storage.update_epoch_blocks_delay + self.state.in_mem().update_epoch_blocks_delay ); // Finalize the transactions' hashes from the previous block - for hash in self.wl_storage.storage.iter_replay_protection() { - self.wl_storage - .write_log + let (write_log, _in_mem, db) = self.state.split_borrow(); + for (raw_key, _, _) in db.iter_replay_protection() { + let hash = raw_key.parse().expect("Failed hash conversion"); + write_log .finalize_tx_hash(hash) .expect("Failed tx hashes finalization") } let emit_events = &mut response.events; // Get the actual votes from cometBFT in the preferred format - let votes = pos_votes_from_abci(&self.wl_storage, &req.votes); + let votes = pos_votes_from_abci(&self.state, &req.votes); let validator_set_update_epoch = self.get_validator_set_update_epoch(current_epoch); // Sub-system updates: // - Token - token::finalize_block(&mut self.wl_storage, emit_events, new_epoch)?; + token::finalize_block(&mut self.state, emit_events, new_epoch)?; // - Governance governance::finalize_block(self, emit_events, new_epoch)?; // - PoS // - Must be applied after governance in case it changes PoS params proof_of_stake::finalize_block( - &mut self.wl_storage, + &mut self.state, emit_events, new_epoch, validator_set_update_epoch, @@ -118,7 +120,7 @@ where let native_block_proposer_address = { let tm_raw_hash_string = tm_raw_hash_to_string(req.proposer_address); - find_validator_by_raw_hash(&self.wl_storage, tm_raw_hash_string) + find_validator_by_raw_hash(&self.state, tm_raw_hash_string) .unwrap() .expect( "Unable to find native validator address of block \ @@ -127,7 +129,7 @@ where }; // Tracks the accepted transactions - self.wl_storage.storage.block.results = BlockResults::default(); + self.state.in_mem_mut().block.results = BlockResults::default(); let mut changed_keys = BTreeSet::new(); for (tx_index, processed_tx) in req.txs.iter().enumerate() { let tx = if let Ok(tx) = Tx::try_from(processed_tx.tx.as_ref()) { @@ -189,8 +191,8 @@ where // if the rejected tx was decrypted, remove it // from the queue of txs to be processed if let TxType::Decrypted(_) = &tx_header.tx_type { - self.wl_storage - .storage + self.state + .in_mem_mut() .tx_queue .pop() .expect("Missing wrapper tx in queue"); @@ -202,7 +204,7 @@ where let ( mut tx_event, embedding_wrapper, - mut tx_gas_meter, + tx_gas_meter, wrapper, mut wrapper_args, ) = match &tx_header.tx_type { @@ -224,8 +226,8 @@ where TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue let tx_in_queue = self - .wl_storage - .storage + .state + .in_mem_mut() .tx_queue .pop() .expect("Missing wrapper tx in queue"); @@ -318,7 +320,7 @@ where { let this_signer = &( address, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ); for MultiSignedEthEvent { event, signers } in &digest.events @@ -338,8 +340,8 @@ where } }, }; - - let tx_result = protocol::check_tx_allowed(&tx, &self.wl_storage) + let tx_gas_meter = RefCell::new(tx_gas_meter); + let tx_result = protocol::check_tx_allowed(&tx, &self.state) .and_then(|()| { protocol::dispatch_tx( tx, @@ -349,14 +351,15 @@ where .try_into() .expect("transaction index out of bounds"), ), - &mut tx_gas_meter, - &mut self.wl_storage, + &tx_gas_meter, + &mut self.state, &mut self.vp_wasm_cache, &mut self.tx_wasm_cache, wrapper_args.as_mut(), ) }) .map_err(Error::TxApply); + let tx_gas_meter = tx_gas_meter.into_inner(); match tx_result { Ok(result) => { if result.is_accepted() { @@ -373,7 +376,7 @@ where tx_event["is_valid_masp_tx"] = format!("{}", tx_index); } - self.wl_storage.storage.tx_queue.push(TxInQueue { + self.state.in_mem_mut().tx_queue.push(TxInQueue { tx: wrapper.expect("Missing expected wrapper"), gas: tx_gas_meter.get_available_gas(), }); @@ -399,11 +402,11 @@ where self.commit_inner_tx_hash(wrapper); } } - self.wl_storage.commit_tx(); + self.state.commit_tx(); if !tx_event.contains_key("code") { tx_event["code"] = ResultCode::Ok.into(); - self.wl_storage - .storage + self.state + .in_mem_mut() .block .results .accept(tx_index); @@ -447,7 +450,7 @@ where } stats.increment_rejected_txs(); - self.wl_storage.drop_tx(); + self.state.drop_tx(); tx_event["code"] = ResultCode::InvalidTx.into(); } tx_event["gas_used"] = result.gas_used.to_string(); @@ -484,7 +487,7 @@ where // hash. A replay of the wrapper is impossible since // the inner tx hash is committed to storage and // we validate the wrapper against that hash too - self.wl_storage + self.state .delete_tx_hash(wrapper.header_hash()) .expect( "Error while deleting tx hash from storage", @@ -493,7 +496,7 @@ where } stats.increment_errored_txs(); - self.wl_storage.drop_tx(); + self.state.drop_tx(); tx_event["gas_used"] = tx_gas_meter.get_tx_consumed_gas().to_string(); @@ -533,14 +536,14 @@ where // Update the MASP commitment tree anchor if the tree was updated let tree_key = token::storage_key::masp_commitment_tree_key(); if let Some(StorageModification::Write { value }) = - self.wl_storage.write_log.read(&tree_key).0 + self.state.write_log().read(&tree_key).0 { let updated_tree = CommitmentTree::::try_from_slice(value) .into_storage_result()?; let anchor_key = token::storage_key::masp_commitment_anchor_key( updated_tree.root(), ); - self.wl_storage.write(&anchor_key, ())?; + self.state.write(&anchor_key, ())?; } if update_for_tendermint { @@ -551,7 +554,7 @@ where } write_last_block_proposer_address( - &mut self.wl_storage, + &mut self.state, native_block_proposer_address, )?; @@ -571,21 +574,21 @@ where header: Header, hash: BlockHash, ) -> (BlockHeight, bool) { - let height = self.wl_storage.storage.get_last_block_height() + 1; + let height = self.state.in_mem().get_last_block_height() + 1; - self.wl_storage - .storage + self.state + .in_mem_mut() .begin_block(hash, height) .expect("Beginning a block shouldn't fail"); let header_time = header.time; - self.wl_storage - .storage + self.state + .in_mem_mut() .set_header(header) .expect("Setting a header shouldn't fail"); let new_epoch = self - .wl_storage + .state .update_epoch(height, header_time) .expect("Must be able to update epoch"); (height, new_epoch) @@ -622,32 +625,29 @@ where let last_epoch = current_epoch.prev(); // Get the number of blocks in the last epoch - let first_block_of_last_epoch = self - .wl_storage - .storage - .block - .pred_epochs - .first_block_heights[last_epoch.0 as usize] - .0; + let first_block_of_last_epoch = + self.state.in_mem().block.pred_epochs.first_block_heights + [last_epoch.0 as usize] + .0; let num_blocks_in_last_epoch = - self.wl_storage.storage.block.height.0 - first_block_of_last_epoch; + self.state.in_mem().block.height.0 - first_block_of_last_epoch; // PoS inflation namada_proof_of_stake::rewards::apply_inflation( - &mut self.wl_storage, + &mut self.state, last_epoch, num_blocks_in_last_epoch, )?; // Pgf inflation pgf_inflation::apply_inflation( - &mut self.wl_storage, + self.state.restrict_writes_to_write_log(), namada::ibc::transfer_over_ibc, )?; - for ibc_event in self.wl_storage.write_log_mut().take_ibc_events() { + for ibc_event in self.state.write_log_mut().take_ibc_events() { let mut event = Event::from(ibc_event.clone()); // Add the height for IBC event query - let height = self.wl_storage.storage.get_last_block_height() + 1; + let height = self.state.in_mem().get_last_block_height() + 1; event["height"] = height.to_string(); response.events.push(event); } @@ -660,11 +660,11 @@ where // the wrapper). Requires the wrapper transaction as argument to recover // both the hashes. fn commit_inner_tx_hash(&mut self, wrapper_tx: Tx) { - self.wl_storage + self.state .write_tx_hash(wrapper_tx.raw_header_hash()) .expect("Error while writing tx hash to storage"); - self.wl_storage + self.state .delete_tx_hash(wrapper_tx.header_hash()) .expect("Error while deleting tx hash from storage"); } @@ -790,6 +790,7 @@ mod test_finalize_block { read_consensus_validator_set_addresses, }; use namada_sdk::tendermint::abci::types::MisbehaviorKind; + use namada_sdk::validity_predicate::VpSentinel; use namada_test_utils::tx_data::TxWriteData; use namada_test_utils::TestWasms; use test_log::test; @@ -816,7 +817,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -857,7 +858,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -896,13 +897,12 @@ mod test_finalize_block { // Add unshielded balance for fee paymenty let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); // create some wrapper txs @@ -974,7 +974,7 @@ mod test_finalize_block { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1014,7 +1014,7 @@ mod test_finalize_block { assert_eq!(code, &String::from(ResultCode::InvalidTx)); } // check that the corresponding wrapper tx was removed from the queue - assert!(shell.wl_storage.storage.tx_queue.is_empty()); + assert!(shell.state.in_mem().tx_queue.is_empty()); } /// Test that if a tx is undecryptable, it is applied @@ -1028,7 +1028,7 @@ mod test_finalize_block { let wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1066,7 +1066,7 @@ mod test_finalize_block { assert!(log.contains("Transaction could not be decrypted.")) } // check that the corresponding wrapper tx was removed from the queue - assert!(shell.wl_storage.storage.tx_queue.is_empty()); + assert!(shell.state.in_mem().tx_queue.is_empty()); } /// Test that the wrapper txs are queued in the order they @@ -1081,13 +1081,12 @@ mod test_finalize_block { // Add unshielded balance for fee payment let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); // create two decrypted txs @@ -1207,7 +1206,7 @@ mod test_finalize_block { // ---- The protocol tx that includes this event on-chain let ext = ethereum_events::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![event.clone()], validator_addr: address.clone(), } @@ -1218,13 +1217,13 @@ mod test_finalize_block { event, signers: BTreeSet::from([( address.clone(), - shell.wl_storage.storage.get_last_block_height(), + shell.state.in_mem().get_last_block_height(), )]), }; let digest = ethereum_events::VextDigest { signatures: vec![( - (address, shell.wl_storage.storage.get_last_block_height()), + (address, shell.state.in_mem().get_last_block_height()), ext.sig, )] .into_iter() @@ -1286,7 +1285,7 @@ mod test_finalize_block { // ---- The protocol tx that includes this event on-chain let ext = ethereum_events::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![event], validator_addr: address, } @@ -1335,22 +1334,21 @@ mod test_finalize_block { { let (mut shell, _, _, _) = setup_at_height(1u64); namada::eth_bridge::test_utils::commit_bridge_pool_root_at_height( - &mut shell.wl_storage, + &mut shell.state, &KeccakHash([1; 32]), 1.into(), ); let value = BlockHeight(2).serialize_to_vec(); shell - .wl_storage - .storage + .state + .in_mem_mut() .block .tree .update(&get_key_from_hash(&KeccakHash([1; 32])), value) .expect("Test failed"); shell - .wl_storage - .storage - .write(&get_nonce_key(), Uint::from(1).serialize_to_vec()) + .state + .db_write(&get_nonce_key(), Uint::from(1).serialize_to_vec()) .expect("Test failed"); let (tx, action) = craft_tx(&mut shell); let processed_tx = ProcessedTx { @@ -1365,16 +1363,16 @@ mod test_finalize_block { ..Default::default() }; let root = shell - .wl_storage + .state .read_bytes(&get_signed_root_key()) .expect("Reading signed Bridge pool root shouldn't fail."); assert!(root.is_none()); _ = shell.finalize_block(req).expect("Test failed"); - shell.wl_storage.commit_block().unwrap(); + shell.state.commit_block().unwrap(); match action { TestBpAction::VerifySignedRoot => { let (root, _) = shell - .wl_storage + .state .ethbridge_queries() .get_signed_bridge_pool_root() .expect("Test failed"); @@ -1382,10 +1380,8 @@ mod test_finalize_block { assert_eq!(root.data.1, ethUint::from(1)); } TestBpAction::CheckNonceIncremented => { - let nonce = shell - .wl_storage - .ethbridge_queries() - .get_bridge_pool_nonce(); + let nonce = + shell.state.ethbridge_queries().get_bridge_pool_nonce(); assert_eq!(nonce, ethUint::from(2)); } } @@ -1408,24 +1404,18 @@ mod test_finalize_block { ); let supply_key = token::storage_key::minted_balance_key(&token); let amt: Amount = 999_999_u64.into(); - shell - .wl_storage - .write(&owner_key, amt) - .expect("Test failed"); - shell - .wl_storage - .write(&supply_key, amt) - .expect("Test failed"); + shell.state.write(&owner_key, amt).expect("Test failed"); + shell.state.write(&supply_key, amt).expect("Test failed"); } // add bertha's gas fees the pool { let amt: Amount = 999_999_u64.into(); let pool_balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &bridge_pool::BRIDGE_POOL_ADDRESS, ); shell - .wl_storage + .state .write(&pool_balance_key, amt) .expect("Test failed"); } @@ -1444,14 +1434,14 @@ mod test_finalize_block { sender: bertha.clone(), }, gas_fee: GasFee { - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), amount: 10u64.into(), payer: bertha.clone(), }, }; let transfer = (&pending).into(); shell - .wl_storage + .state .write(&bridge_pool::get_pending_key(&pending), pending) .expect("Test failed"); transfer @@ -1466,10 +1456,7 @@ mod test_finalize_block { let ext = { let ext = ethereum_events::Vext { validator_addr, - block_height: shell - .wl_storage - .storage - .get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![ethereum_event], } .sign(&protocol_key); @@ -1508,23 +1495,22 @@ mod test_finalize_block { min_duration: DurationSecs(0), }; namada::ledger::parameters::update_epoch_parameter( - &mut shell.wl_storage, + &mut shell.state, &epoch_duration, ) .unwrap(); - shell.wl_storage.storage.next_epoch_min_start_height = BlockHeight(5); - shell.wl_storage.storage.next_epoch_min_start_time = DateTimeUtc::now(); + shell.state.in_mem_mut().next_epoch_min_start_height = BlockHeight(5); + shell.state.in_mem_mut().next_epoch_min_start_time = DateTimeUtc::now(); let txs_key = gen_keypair(); // Add unshielded balance for fee payment let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&txs_key.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); // Add a proposal to be executed on next epoch change. @@ -1543,7 +1529,7 @@ mod test_finalize_block { }; namada::governance::init_proposal( - &mut shell.wl_storage, + &mut shell.state, proposal, vec![], None, @@ -1558,8 +1544,7 @@ mod test_finalize_block { }; // Vote to accept the proposal (there's only one validator, so its // vote decides) - namada::governance::vote_proposal(&mut shell.wl_storage, vote) - .unwrap(); + namada::governance::vote_proposal(&mut shell.state, vote).unwrap(); }; // Add a proposal to be accepted and one to be rejected. @@ -1567,15 +1552,14 @@ mod test_finalize_block { add_proposal(1, ProposalVote::Nay); // Commit the genesis state - shell.wl_storage.commit_block().unwrap(); + shell.state.commit_block().unwrap(); shell.commit(); // Collect all storage key-vals into a sorted map let store_block_state = |shell: &TestShell| -> BTreeMap<_, _> { shell - .wl_storage - .storage - .db + .state + .db() .iter_prefix(None) .map(|(key, val, _gas)| (key, val)) .collect() @@ -1590,20 +1574,20 @@ mod test_finalize_block { // Keep applying finalize block let validator = shell.mode.get_validator_address().unwrap(); let pos_params = - namada_proof_of_stake::storage::read_pos_params(&shell.wl_storage) + namada_proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let consensus_key = namada_proof_of_stake::storage::validator_consensus_key_handle( validator, ) - .get(&shell.wl_storage, Epoch::default(), &pos_params) + .get(&shell.state, Epoch::default(), &pos_params) .unwrap() .unwrap(); let proposer_address = HEXUPPER .decode(consensus_key.tm_raw_hash().as_bytes()) .unwrap(); let val_stake = read_validator_stake( - &shell.wl_storage, + &shell.state, &pos_params, validator, Epoch::default(), @@ -1642,12 +1626,12 @@ mod test_finalize_block { ..Default::default() }; // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let _events = shell.finalize_block(req).unwrap(); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); let new_state = store_block_state(&shell); // The new state must be unchanged @@ -1656,7 +1640,7 @@ mod test_finalize_block { new_state.iter(), ); // Commit the block to move on to the next one - shell.wl_storage.commit_block().unwrap(); + shell.state.commit_block().unwrap(); // Store the state after commit for the next iteration last_storage_state = store_block_state(&shell); @@ -1684,14 +1668,14 @@ mod test_finalize_block { let mut validator_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let val1 = validator_set.pop_first().unwrap(); let val2 = validator_set.pop_first().unwrap(); @@ -1700,7 +1684,7 @@ mod test_finalize_block { let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -1793,11 +1777,7 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // FINALIZE BLOCK 2. Tell Namada that val1 is the block proposer. // Include votes that correspond to block 1. Make val2 the next block's @@ -1808,20 +1788,16 @@ mod test_finalize_block { votes.clone(), None, ); - assert!(rewards_prod_1.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - assert!( - !rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_prod_1.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_2.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_3.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_4.is_empty(&shell.state).unwrap()); + assert!(!rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // Val1 was the proposer, so its reward should be larger than all // others, which should themselves all be equal - let acc_sum = get_rewards_sum(&shell.wl_storage); + let acc_sum = get_rewards_sum(&shell.state); assert!(is_decimal_equal_enough(Dec::one(), acc_sum)); - let acc = get_rewards_acc(&shell.wl_storage); + let acc = get_rewards_acc(&shell.state); assert_eq!(acc.get(&val2.address), acc.get(&val3.address)); assert_eq!(acc.get(&val2.address), acc.get(&val4.address)); assert!( @@ -1831,16 +1807,16 @@ mod test_finalize_block { // FINALIZE BLOCK 3, with val1 as proposer for the next block. next_block_for_inflation(&mut shell, pkh1.to_vec(), votes, None); - assert!(rewards_prod_1.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); + assert!(rewards_prod_1.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_2.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_3.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_4.is_empty(&shell.state).unwrap()); // Val2 was the proposer for this block, so its rewards accumulator // should be the same as val1 now. Val3 and val4 should be equal as // well. - let acc_sum = get_rewards_sum(&shell.wl_storage); + let acc_sum = get_rewards_sum(&shell.state); assert!(is_decimal_equal_enough(Dec::two(), acc_sum)); - let acc = get_rewards_acc(&shell.wl_storage); + let acc = get_rewards_acc(&shell.state); assert_eq!(acc.get(&val1.address), acc.get(&val2.address)); assert_eq!(acc.get(&val3.address), acc.get(&val4.address)); assert!( @@ -1909,13 +1885,13 @@ mod test_finalize_block { votes.clone(), None, ); - assert!(rewards_prod_1.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_2.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_3.is_empty(&shell.wl_storage).unwrap()); - assert!(rewards_prod_4.is_empty(&shell.wl_storage).unwrap()); - let acc_sum = get_rewards_sum(&shell.wl_storage); + assert!(rewards_prod_1.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_2.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_3.is_empty(&shell.state).unwrap()); + assert!(rewards_prod_4.is_empty(&shell.state).unwrap()); + let acc_sum = get_rewards_sum(&shell.state); assert!(is_decimal_equal_enough(Dec::new(3, 0).unwrap(), acc_sum)); - let acc = get_rewards_acc(&shell.wl_storage); + let acc = get_rewards_acc(&shell.state); assert!( acc.get(&val1.address).cloned().unwrap() > acc.get(&val2.address).cloned().unwrap() @@ -1932,15 +1908,12 @@ mod test_finalize_block { // Advance to the start of epoch 1. Val1 is the only block proposer for // the rest of the epoch. Val4 does not vote for the rest of the epoch. let height_of_next_epoch = - shell.wl_storage.storage.next_epoch_min_start_height; + shell.state.in_mem().next_epoch_min_start_height; let current_height = 4_u64; - assert_eq!(current_height, shell.wl_storage.storage.block.height.0); + assert_eq!(current_height, shell.state.in_mem().block.height.0); for _ in current_height..height_of_next_epoch.0 + 2 { - dbg!( - get_rewards_acc(&shell.wl_storage), - get_rewards_sum(&shell.wl_storage), - ); + dbg!(get_rewards_acc(&shell.state), get_rewards_sum(&shell.state),); next_block_for_inflation( &mut shell, pkh1.to_vec(), @@ -1948,25 +1921,21 @@ mod test_finalize_block { None, ); } - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); let rp1 = rewards_prod_1 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); let rp2 = rewards_prod_2 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); let rp3 = rewards_prod_3 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); let rp4 = rewards_prod_4 - .get(&shell.wl_storage, &Epoch::default()) + .get(&shell.state, &Epoch::default()) .unwrap() .unwrap(); assert!(rp1 > rp2); @@ -1985,20 +1954,20 @@ mod test_finalize_block { let mut validator_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let validator = validator_set.pop_first().unwrap(); let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -2040,11 +2009,7 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); let (current_epoch, inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2052,7 +2017,7 @@ mod test_finalize_block { // Query the available rewards let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2061,7 +2026,7 @@ mod test_finalize_block { // Claim the rewards from the initial epoch let reward_1 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2074,7 +2039,7 @@ mod test_finalize_block { // Query the available rewards again and check that it is 0 now after // the claim let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2090,7 +2055,7 @@ mod test_finalize_block { None, ); let att = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2106,7 +2071,7 @@ mod test_finalize_block { // Unbond some tokens let unbond_amount = token::Amount::native_whole(50_000); let unbond_res = namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, unbond_amount, @@ -2118,7 +2083,7 @@ mod test_finalize_block { // Query the available rewards let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2126,7 +2091,7 @@ mod test_finalize_block { .unwrap(); let rew = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2140,13 +2105,13 @@ mod test_finalize_block { let withdraw_epoch = current_epoch + params.withdrawable_epoch_offset(); let last_claim_epoch = namada_proof_of_stake::storage::get_last_reward_claim_epoch( - &shell.wl_storage, + &shell.state, &validator.address, &validator.address, ) .unwrap(); let bond_amounts = namada_proof_of_stake::bond_amounts_for_rewards( - &shell.wl_storage, + &shell.state, &bond_id, last_claim_epoch.unwrap_or_default(), withdraw_epoch, @@ -2171,8 +2136,8 @@ mod test_finalize_block { let mut missed_rewards = token::Amount::zero(); while current_epoch < withdraw_epoch { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2186,7 +2151,7 @@ mod test_finalize_block { // Withdraw tokens let withdraw_amount = namada_proof_of_stake::withdraw_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2196,7 +2161,7 @@ mod test_finalize_block { // Query the available rewards let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2205,7 +2170,7 @@ mod test_finalize_block { // Claim tokens let reward_2 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2226,7 +2191,7 @@ mod test_finalize_block { // Query the available rewards to check that they are 0 let query_rewards = namada_proof_of_stake::query_reward_tokens( - &shell.wl_storage, + &shell.state, None, &validator.address, current_epoch, @@ -2246,27 +2211,27 @@ mod test_finalize_block { let mut validator_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let validator = validator_set.pop_first().unwrap(); let commission_rate = namada_proof_of_stake::storage::validator_commission_rate_handle( &validator.address, ) - .get(&shell.wl_storage, Epoch(0), ¶ms) + .get(&shell.state, Epoch(0), ¶ms) .unwrap() .unwrap(); let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -2292,26 +2257,22 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // Make an account with balance and delegate some tokens let delegator = address::testing::gen_implicit_address(); let del_amount = init_stake; - let staking_token = shell.wl_storage.storage.native_token.clone(); + let staking_token = shell.state.in_mem().native_token.clone(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &delegator, 2 * init_stake, ) .unwrap(); - let mut current_epoch = shell.wl_storage.storage.block.epoch; + let mut current_epoch = shell.state.in_mem().block.epoch; namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &validator.address, del_amount, @@ -2323,8 +2284,8 @@ mod test_finalize_block { // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2334,7 +2295,7 @@ mod test_finalize_block { // Claim the rewards for the validator for the first two epochs let val_reward_1 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2350,8 +2311,8 @@ mod test_finalize_block { // Go to the next epoch, where now the delegator's stake has been active // for an epoch let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, inflation_3) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2360,7 +2321,7 @@ mod test_finalize_block { // Claim again for the validator let val_reward_2 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator.address, current_epoch, @@ -2369,7 +2330,7 @@ mod test_finalize_block { // Claim for the delegator let del_reward_1 = namada_proof_of_stake::claim_reward_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &validator.address, current_epoch, @@ -2409,15 +2370,15 @@ mod test_finalize_block { let mut validators: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); - let mut current_epoch = shell.wl_storage.storage.block.epoch; + let params = read_pos_params(&shell.state).unwrap(); + let mut current_epoch = shell.state.in_mem().block.epoch; let validator1 = validators.pop_first().unwrap(); let validator2 = validators.pop_first().unwrap(); @@ -2426,23 +2387,23 @@ mod test_finalize_block { let init_stake = validator1.bonded_stake; // Give the validators some tokens for txs - let staking_token = shell.wl_storage.storage.native_token.clone(); + let staking_token = shell.state.in_mem().native_token.clone(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &validator1.address, init_stake, ) .unwrap(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &validator2.address, init_stake, ) .unwrap(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &validator3.address, init_stake, @@ -2451,7 +2412,7 @@ mod test_finalize_block { let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -2463,18 +2424,12 @@ mod test_finalize_block { // won't receive votes from TM since we receive votes at a 1-block // delay, so votes will be empty here next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); - assert!( - rewards_accumulator_handle() - .is_empty(&shell.wl_storage) - .unwrap() - ); + assert!(rewards_accumulator_handle().is_empty(&shell.state).unwrap()); // Check that there's 3 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 3); // let ck1 = validator_consensus_key_handle(&validator) // .get(&storage, current_epoch, ¶ms) @@ -2488,7 +2443,7 @@ mod test_finalize_block { // Validator1 bonds 1 NAM let bond_amount = token::Amount::native_whole(1); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator1.address, bond_amount, @@ -2500,7 +2455,7 @@ mod test_finalize_block { // Validator2 changes consensus key let new_ck2 = common_sk_from_simple_seed(1).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator2.address, &new_ck2, current_epoch, @@ -2509,7 +2464,7 @@ mod test_finalize_block { // Validator3 bonds 1 NAM and changes consensus key namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator3.address, bond_amount, @@ -2519,7 +2474,7 @@ mod test_finalize_block { .unwrap(); let new_ck3 = common_sk_from_simple_seed(2).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator3.address, &new_ck3, current_epoch, @@ -2528,17 +2483,15 @@ mod test_finalize_block { // Check that there's 5 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 5); // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, _inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2546,7 +2499,7 @@ mod test_finalize_block { } let consensus_vals = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, current_epoch, ) .unwrap(); @@ -2571,7 +2524,7 @@ mod test_finalize_block { // Val 1 changes consensus key let new_ck1 = common_sk_from_simple_seed(3).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator1.address, &new_ck1, current_epoch, @@ -2580,7 +2533,7 @@ mod test_finalize_block { // Val 2 is fully unbonded namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator2.address, init_stake, @@ -2591,7 +2544,7 @@ mod test_finalize_block { // Val 3 is fully unbonded and changes consensus key namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator3.address, init_stake + bond_amount, @@ -2601,7 +2554,7 @@ mod test_finalize_block { .unwrap(); let new2_ck3 = common_sk_from_simple_seed(4).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator1.address, &new2_ck3, current_epoch, @@ -2610,17 +2563,15 @@ mod test_finalize_block { // Check that there's 7 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 7); // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, _inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2628,7 +2579,7 @@ mod test_finalize_block { } let consensus_vals = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, current_epoch, ) .unwrap(); @@ -2645,7 +2596,7 @@ mod test_finalize_block { // Val2 bonds 1 NAM and changes consensus key namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator2.address, bond_amount, @@ -2655,7 +2606,7 @@ mod test_finalize_block { .unwrap(); let new2_ck2 = common_sk_from_simple_seed(5).ref_to(); namada_proof_of_stake::change_consensus_key( - &mut shell.wl_storage, + &mut shell.state, &validator2.address, &new2_ck2, current_epoch, @@ -2664,7 +2615,7 @@ mod test_finalize_block { // Val3 bonds 1 NAM namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &validator3.address, bond_amount, @@ -2675,17 +2626,15 @@ mod test_finalize_block { // Check that there's 8 unique consensus keys let consensus_keys = - namada_proof_of_stake::storage::get_consensus_key_set( - &shell.wl_storage, - ) - .unwrap(); + namada_proof_of_stake::storage::get_consensus_key_set(&shell.state) + .unwrap(); assert_eq!(consensus_keys.len(), 8); // Advance to pipeline epoch for _ in 0..params.pipeline_len { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (new_epoch, _inflation) = advance_epoch(&mut shell, &pkh1, &votes, None); @@ -2693,7 +2642,7 @@ mod test_finalize_block { } let consensus_vals = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, current_epoch, ) .unwrap(); @@ -2751,7 +2700,7 @@ mod test_finalize_block { replay_protection::last_key(&wrapper_tx.header_hash()); // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -2771,15 +2720,15 @@ mod test_finalize_block { assert_eq!(code, String::from(ResultCode::Ok).as_str()); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage assert!( shell .shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper_tx.header_hash()) .unwrap_or_default() ); @@ -2787,8 +2736,8 @@ mod test_finalize_block { assert!( !shell .shell - .wl_storage - .storage + .state + .in_mem() .block .tree .has_key(&wrapper_hash_key) @@ -2803,14 +2752,14 @@ mod test_finalize_block { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); let keypair_2 = gen_keypair(); - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let tx_code = TestWasms::TxNoOp.read_bytes(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2827,7 +2776,7 @@ mod test_finalize_block { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair_2.ref_to(), Epoch(0), @@ -2856,8 +2805,7 @@ mod test_finalize_block { for tx in [&wrapper, &new_wrapper] { let hash_subkey = replay_protection::last_key(&tx.header_hash()); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_subkey) .expect("Test failed"); } @@ -2876,7 +2824,7 @@ mod test_finalize_block { shell.enqueue_tx(wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); shell.enqueue_tx(new_wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -2886,7 +2834,7 @@ mod test_finalize_block { .expect("Test failed"); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); assert_eq!(event[0].event_type.to_string(), String::from("applied")); @@ -2899,15 +2847,15 @@ mod test_finalize_block { for (inner, wrapper) in [(inner, wrapper), (new_inner, new_wrapper)] { assert!( shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&inner.raw_header_hash()) .unwrap_or_default() ); assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper.header_hash()) .unwrap_or_default() ); @@ -2922,7 +2870,7 @@ mod test_finalize_block { fn test_tx_hash_handling() { let (mut shell, _, _, _) = setup(); let keypair = gen_keypair(); - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let (out_of_gas_wrapper, _) = mk_wrapper_tx(&shell, &keypair); let (undecryptable_wrapper, _) = mk_wrapper_tx(&shell, &keypair); @@ -2937,7 +2885,7 @@ mod test_finalize_block { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2999,8 +2947,7 @@ mod test_finalize_block { let hash_subkey = replay_protection::last_key(&wrapper.header_hash()); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_subkey) .unwrap(); } @@ -3034,7 +2981,7 @@ mod test_finalize_block { ); shell.enqueue_tx(failing_wrapper.clone(), GAS_LIMIT_MULTIPLIER.into()); // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -3044,7 +2991,7 @@ mod test_finalize_block { .expect("Test failed"); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); assert_eq!(event[0].event_type.to_string(), String::from("applied")); @@ -3071,8 +3018,8 @@ mod test_finalize_block { ] { assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry( &invalid_inner.raw_header_hash() ) @@ -3080,23 +3027,22 @@ mod test_finalize_block { ); assert!( shell - .wl_storage - .storage + .state .has_replay_protection_entry(&valid_wrapper.header_hash()) .unwrap_or_default() ); } assert!( shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&failing_inner.raw_header_hash()) .expect("test failed") ); assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&failing_wrapper.header_hash()) .unwrap_or_default() ); @@ -3114,7 +3060,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -3144,7 +3090,7 @@ mod test_finalize_block { }, }]; // merkle tree root before finalize_block - let root_pre = shell.shell.wl_storage.storage.block.tree.root(); + let root_pre = shell.shell.state.in_mem().block.tree.root(); let event = &shell .finalize_block(FinalizeBlock { @@ -3154,7 +3100,7 @@ mod test_finalize_block { .expect("Test failed"); // the merkle tree root should not change after finalize_block - let root_post = shell.shell.wl_storage.storage.block.tree.root(); + let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); assert_eq!(event[0].event_type.to_string(), String::from("accepted")); @@ -3167,15 +3113,15 @@ mod test_finalize_block { assert!( shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper_hash) .unwrap_or_default() ); assert!( !shell - .wl_storage - .write_log + .state + .write_log() .has_replay_protection_entry(&wrapper.raw_header_hash()) .unwrap_or_default() ); @@ -3193,7 +3139,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -3231,14 +3177,11 @@ mod test_finalize_block { let code = event.attributes.get("code").expect("Testfailed").as_str(); assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.to_public()), ); - let balance: Amount = shell - .wl_storage - .read(&balance_key) - .unwrap() - .unwrap_or_default(); + let balance: Amount = + shell.state.read(&balance_key).unwrap().unwrap_or_default(); assert_eq!(balance, 0.into()) } @@ -3251,13 +3194,13 @@ mod test_finalize_block { let validator = shell.mode.get_validator_address().unwrap().to_owned(); let pos_params = - namada_proof_of_stake::storage::read_pos_params(&shell.wl_storage) + namada_proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let consensus_key = namada_proof_of_stake::storage::validator_consensus_key_handle( &validator, ) - .get(&shell.wl_storage, Epoch::default(), &pos_params) + .get(&shell.state, Epoch::default(), &pos_params) .unwrap() .unwrap(); let proposer_address = HEXUPPER @@ -3265,8 +3208,8 @@ mod test_finalize_block { .unwrap(); let proposer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &validator, ) .unwrap(); @@ -3279,7 +3222,7 @@ mod test_finalize_block { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -3303,13 +3246,13 @@ mod test_finalize_block { let fee_amount = namada::token::denom_to_amount( fee_amount, &wrapper.header().wrapper().unwrap().fee.token, - &shell.wl_storage, + &shell.state, ) .unwrap(); let signer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &wrapper.header().wrapper().unwrap().fee_payer(), ) .unwrap(); @@ -3336,8 +3279,8 @@ mod test_finalize_block { assert_eq!(code, String::from(ResultCode::Ok).as_str()); let new_proposer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &validator, ) .unwrap(); @@ -3347,8 +3290,8 @@ mod test_finalize_block { ); let new_signer_balance = namada::token::read_balance( - &shell.wl_storage, - &shell.wl_storage.storage.native_token, + &shell.state, + &shell.state.in_mem().native_token, &wrapper.header().wrapper().unwrap().fee_payer(), ) .unwrap(); @@ -3366,13 +3309,13 @@ mod test_finalize_block { num_validators, ..Default::default() }); - let mut params = read_pos_params(&shell.wl_storage).unwrap(); + let mut params = read_pos_params(&shell.state).unwrap(); params.owned.unbonding_len = 4; - write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; + write_pos_params(&mut shell.state, ¶ms.owned)?; let validator_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -3387,7 +3330,7 @@ mod test_finalize_block { let get_pkh = |address, epoch| { let ck = validator_consensus_key_handle(&address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap() .unwrap(); let hash_string = tm_consensus_key_raw_hash(&ck); @@ -3402,7 +3345,7 @@ mod test_finalize_block { // Every validator should be in the consensus set assert_eq!( validator_state_handle(&validator.address) - .get(&shell.wl_storage, Epoch::default(), ¶ms) + .get(&shell.state, Epoch::default(), ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); @@ -3420,8 +3363,8 @@ mod test_finalize_block { next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); assert!(!votes.is_empty()); assert_eq!(votes.len(), 7_usize); @@ -3458,7 +3401,7 @@ mod test_finalize_block { Some(byzantine_validators), ); - let processing_epoch = shell.wl_storage.storage.block.epoch + let processing_epoch = shell.state.in_mem().block.epoch + params.unbonding_len + 1_u64 + params.cubic_slashing_window_length; @@ -3467,60 +3410,60 @@ mod test_finalize_block { // are properly updated assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, Epoch::default(), ¶ms) + .get(&shell.state, Epoch::default(), ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, Epoch::default(), ¶ms) + .get(&shell.state, Epoch::default(), ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); assert!( enqueued_slashes_handle() .at(&Epoch::default()) - .is_empty(&shell.wl_storage)? + .is_empty(&shell.state)? ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, Epoch::default()) + get_num_consensus_validators(&shell.state, Epoch::default()) .unwrap(), 7_u64 ); for epoch in Epoch::default().next().iter_range(params.pipeline_len) { assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert!( enqueued_slashes_handle() .at(&epoch) - .is_empty(&shell.wl_storage)? + .is_empty(&shell.state)? ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), + get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } assert!( !enqueued_slashes_handle() .at(&processing_epoch) - .is_empty(&shell.wl_storage)? + .is_empty(&shell.state)? ); // Advance to the processing epoch loop { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -3530,34 +3473,34 @@ mod test_finalize_block { ); // println!( // "Block {} epoch {}", - // shell.wl_storage.storage.block.height, - // shell.wl_storage.storage.block.epoch + // shell.state.in_mem().block.height, + // shell.state.in_mem().block.epoch // ); - if shell.wl_storage.storage.block.epoch == processing_epoch { + if shell.state.in_mem().block.epoch == processing_epoch { // println!("Reached processing epoch"); break; } else { assert!( enqueued_slashes_handle() - .at(&shell.wl_storage.storage.block.epoch) - .is_empty(&shell.wl_storage)? + .at(&shell.state.in_mem().block.epoch) + .is_empty(&shell.state)? ); let stake1 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, )?; let stake2 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val2.address, - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, )?; let total_stake = read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, )?; assert_eq!(stake1, initial_stake); assert_eq!(stake2, initial_stake); @@ -3565,35 +3508,33 @@ mod test_finalize_block { } } - let num_slashes = namada::state::iter_prefix_bytes( - &shell.wl_storage, - &slashes_prefix(), - )? - .filter(|kv_res| { - let (k, _v) = kv_res.as_ref().unwrap(); - is_validator_slashes_key(k).is_some() - }) - .count(); + let num_slashes = + namada::state::iter_prefix_bytes(&shell.state, &slashes_prefix())? + .filter(|kv_res| { + let (k, _v) = kv_res.as_ref().unwrap(); + is_validator_slashes_key(k).is_some() + }) + .count(); assert_eq!(num_slashes, 2); assert_eq!( validator_slashes_handle(&val1.address) - .len(&shell.wl_storage) + .len(&shell.state) .unwrap(), 1_u64 ); assert_eq!( validator_slashes_handle(&val2.address) - .len(&shell.wl_storage) + .len(&shell.state) .unwrap(), 1_u64 ); let slash1 = validator_slashes_handle(&val1.address) - .get(&shell.wl_storage, 0)? + .get(&shell.state, 0)? .unwrap(); let slash2 = validator_slashes_handle(&val2.address) - .get(&shell.wl_storage, 0)? + .get(&shell.state, 0)? .unwrap(); assert_eq!(slash1.r#type, SlashType::DuplicateVote); @@ -3612,47 +3553,47 @@ mod test_finalize_block { // Check that there are still 5 consensus validators and the 2 // misbehaving ones are still jailed for epoch in shell - .wl_storage - .storage + .state + .in_mem() .block .epoch .iter_range(params.pipeline_len + 1) { assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), + get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } // Check that the deltas at the pipeline epoch are slashed let pipeline_epoch = - shell.wl_storage.storage.block.epoch + params.pipeline_len; + shell.state.in_mem().block.epoch + params.pipeline_len; let stake1 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, pipeline_epoch, )?; let stake2 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val2.address, pipeline_epoch, )?; let total_stake = - read_total_stake(&shell.wl_storage, ¶ms, pipeline_epoch)?; + read_total_stake(&shell.state, ¶ms, pipeline_epoch)?; let expected_slashed = initial_stake.mul_ceil(cubic_rate); @@ -3676,51 +3617,50 @@ mod test_finalize_block { assert_eq!(total_stake, total_initial_stake - 2u64 * expected_slashed); // Unjail one of the validators - let current_epoch = shell.wl_storage.storage.block.epoch; - unjail_validator(&mut shell.wl_storage, &val1.address, current_epoch)?; + let current_epoch = shell.state.in_mem().block.epoch; + unjail_validator(&mut shell.state, &val1.address, current_epoch)?; let pipeline_epoch = current_epoch + params.pipeline_len; // Check that the state is the same until the pipeline epoch, at which // point one validator is unjailed for epoch in shell - .wl_storage - .storage + .state + .in_mem() .block .epoch .iter_range(params.pipeline_len) { assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, epoch, ¶ms) + .get(&shell.state, epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, epoch).unwrap(), + get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } assert_eq!( validator_state_handle(&val1.address) - .get(&shell.wl_storage, pipeline_epoch, ¶ms) + .get(&shell.state, pipeline_epoch, ¶ms) .unwrap(), Some(ValidatorState::Consensus) ); assert_eq!( validator_state_handle(&val2.address) - .get(&shell.wl_storage, pipeline_epoch, ¶ms) + .get(&shell.state, pipeline_epoch, ¶ms) .unwrap(), Some(ValidatorState::Jailed) ); assert_eq!( - get_num_consensus_validators(&shell.wl_storage, pipeline_epoch) - .unwrap(), + get_num_consensus_validators(&shell.state, pipeline_epoch).unwrap(), 6_u64 ); @@ -3758,19 +3698,19 @@ mod test_finalize_block { num_validators, ..Default::default() }); - let mut params = read_pos_params(&shell.wl_storage).unwrap(); + let mut params = read_pos_params(&shell.state).unwrap(); params.owned.unbonding_len = 4; params.owned.max_validator_slots = 50; - write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; + write_pos_params(&mut shell.state, ¶ms.owned)?; // Slash pool balance - let nam_address = shell.wl_storage.storage.native_token.clone(); + let nam_address = shell.state.in_mem().native_token.clone(); let slash_balance_key = token::storage_key::balance_key( &nam_address, &namada_proof_of_stake::SLASH_POOL_ADDRESS, ); let slash_pool_balance_init: token::Amount = shell - .wl_storage + .state .read(&slash_balance_key) .expect("must be able to read") .unwrap_or_default(); @@ -3778,7 +3718,7 @@ mod test_finalize_block { let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -3787,7 +3727,7 @@ mod test_finalize_block { let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), @@ -3799,28 +3739,28 @@ mod test_finalize_block { // Finalize block 1 next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); - let votes = get_default_true_votes(&shell.wl_storage, Epoch::default()); + let votes = get_default_true_votes(&shell.state, Epoch::default()); assert!(!votes.is_empty()); // Advance to epoch 1 and // 1. Delegate 67231 NAM to validator // 2. Validator self-unbond 154654 NAM let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); - assert_eq!(shell.wl_storage.storage.block.epoch.0, 1_u64); + assert_eq!(shell.state.in_mem().block.epoch.0, 1_u64); // Make an account with balance and delegate some tokens let delegator = address::testing::gen_implicit_address(); let del_1_amount = token::Amount::native_whole(37_231); - let staking_token = shell.wl_storage.storage.native_token.clone(); + let staking_token = shell.state.in_mem().native_token.clone(); namada::token::credit_tokens( - &mut shell.wl_storage, + &mut shell.state, &staking_token, &delegator, token::Amount::native_whole(200_000), ) .unwrap(); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, del_1_amount, @@ -3832,7 +3772,7 @@ mod test_finalize_block { // Self-unbond let self_unbond_1_amount = token::Amount::native_whole(84_654); namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val1.address, self_unbond_1_amount, @@ -3842,7 +3782,7 @@ mod test_finalize_block { .unwrap(); let val_stake = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, current_epoch + params.pipeline_len, @@ -3850,7 +3790,7 @@ mod test_finalize_block { .unwrap(); let total_stake = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, current_epoch + params.pipeline_len, ) @@ -3868,14 +3808,14 @@ mod test_finalize_block { // Advance to epoch 2 and // 1. Unbond 18000 NAM from delegation let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); tracing::debug!("\nUnbonding in epoch 2"); let del_unbond_1_amount = token::Amount::native_whole(18_000); namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, del_unbond_1_amount, @@ -3885,14 +3825,14 @@ mod test_finalize_block { .unwrap(); let val_stake = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, current_epoch + params.pipeline_len, ) .unwrap(); let total_stake = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, current_epoch + params.pipeline_len, ) @@ -3913,15 +3853,15 @@ mod test_finalize_block { // Advance to epoch 3 and // 1. Validator self-bond 9123 NAM let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); tracing::debug!("\nBonding in epoch 3"); let self_bond_1_amount = token::Amount::native_whole(9_123); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val1.address, self_bond_1_amount, @@ -3933,15 +3873,15 @@ mod test_finalize_block { // Advance to epoch 4 // 1. Validator self-unbond 15000 NAM let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 4_u64); let self_unbond_2_amount = token::Amount::native_whole(15_000); namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val1.address, self_unbond_2_amount, @@ -3953,8 +3893,8 @@ mod test_finalize_block { // Advance to epoch 5 and // Delegate 8144 NAM to validator let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 5_u64); @@ -3963,7 +3903,7 @@ mod test_finalize_block { // Delegate let del_2_amount = token::Amount::native_whole(8_144); namada_proof_of_stake::bond_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, del_2_amount, @@ -3976,8 +3916,8 @@ mod test_finalize_block { // Advance to epoch 6 let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 6_u64); @@ -3986,12 +3926,8 @@ mod test_finalize_block { // NOTE: Only the type, height, and validator address fields from the // Misbehavior struct are used in Namada let misbehavior_epoch = Epoch(3_u64); - let height = shell - .wl_storage - .storage - .block - .pred_epochs - .first_block_heights[misbehavior_epoch.0 as usize]; + let height = shell.state.in_mem().block.pred_epochs.first_block_heights + [misbehavior_epoch.0 as usize]; let misbehaviors = vec![Misbehavior { kind: MisbehaviorKind::DuplicateVote, validator: Validator { @@ -4003,8 +3939,8 @@ mod test_finalize_block { total_voting_power: Default::default(), }]; let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -4022,7 +3958,7 @@ mod test_finalize_block { let enqueued_slash = enqueued_slashes_handle() .at(&processing_epoch) .at(&val1.address) - .front(&shell.wl_storage) + .front(&shell.state) .unwrap() .unwrap(); assert_eq!(enqueued_slash.epoch, misbehavior_epoch); @@ -4030,7 +3966,7 @@ mod test_finalize_block { assert_eq!(enqueued_slash.rate, Dec::zero()); let last_slash = namada_proof_of_stake::storage::read_validator_last_slash_epoch( - &shell.wl_storage, + &shell.state, &val1.address, ) .unwrap(); @@ -4039,7 +3975,7 @@ mod test_finalize_block { namada_proof_of_stake::storage::validator_slashes_handle( &val1.address ) - .is_empty(&shell.wl_storage) + .is_empty(&shell.state) .unwrap() ); @@ -4050,12 +3986,8 @@ mod test_finalize_block { // Discover two more misbehaviors, one committed in epoch 3, one in // epoch 4 - let height4 = shell - .wl_storage - .storage - .block - .pred_epochs - .first_block_heights[4]; + let height4 = + shell.state.in_mem().block.pred_epochs.first_block_heights[4]; let misbehaviors = vec![ Misbehavior { kind: MisbehaviorKind::DuplicateVote, @@ -4079,8 +4011,8 @@ mod test_finalize_block { }, ]; let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -4096,18 +4028,18 @@ mod test_finalize_block { .at(&processing_epoch.next()) .at(&val1.address); - assert_eq!(enqueued_slashes_8.len(&shell.wl_storage).unwrap(), 2_u64); - assert_eq!(enqueued_slashes_9.len(&shell.wl_storage).unwrap(), 1_u64); + assert_eq!(enqueued_slashes_8.len(&shell.state).unwrap(), 2_u64); + assert_eq!(enqueued_slashes_9.len(&shell.state).unwrap(), 1_u64); let last_slash = namada_proof_of_stake::storage::read_validator_last_slash_epoch( - &shell.wl_storage, + &shell.state, &val1.address, ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); assert!( namada_proof_of_stake::is_validator_frozen( - &shell.wl_storage, + &shell.state, &val1.address, current_epoch, ¶ms @@ -4118,13 +4050,13 @@ mod test_finalize_block { namada_proof_of_stake::storage::validator_slashes_handle( &val1.address ) - .is_empty(&shell.wl_storage) + .is_empty(&shell.state) .unwrap() ); let pre_stake_10 = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(10), @@ -4145,26 +4077,26 @@ mod test_finalize_block { // Advance to epoch 9, where the infractions committed in epoch 3 will // be processed let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let _ = advance_epoch(&mut shell, &pkh1, &votes, None); let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 9_u64); let val_stake_3 = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(3), ) .unwrap(); let val_stake_4 = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(4), @@ -4172,13 +4104,13 @@ mod test_finalize_block { .unwrap(); let tot_stake_3 = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, Epoch(3), ) .unwrap(); let tot_stake_4 = namada_proof_of_stake::storage::read_total_stake( - &shell.wl_storage, + &shell.state, ¶ms, Epoch(4), ) @@ -4204,9 +4136,9 @@ mod test_finalize_block { namada_proof_of_stake::storage::validator_slashes_handle( &val1.address, ); - assert_eq!(val_slashes.len(&shell.wl_storage).unwrap(), 2u64); + assert_eq!(val_slashes.len(&shell.state).unwrap(), 2u64); let is_rate_good = val_slashes - .iter(&shell.wl_storage) + .iter(&shell.state) .unwrap() .all(|s| equal_enough(s.unwrap().rate, cubic_rate)); assert!(is_rate_good); @@ -4214,7 +4146,7 @@ mod test_finalize_block { // Check the amount of stake deducted from the futuremost epoch while // processing the slashes let post_stake_10 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(10), @@ -4266,7 +4198,7 @@ mod test_finalize_block { // Check the balance of the Slash Pool // TODO: finish once implemented // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4280,8 +4212,8 @@ mod test_finalize_block { // Advance to epoch 10, where the infraction committed in epoch 4 will // be processed let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let (current_epoch, _) = advance_epoch(&mut shell, &pkh1, &votes, None); assert_eq!(current_epoch.0, 10_u64); @@ -4289,7 +4221,7 @@ mod test_finalize_block { // Check the balance of the Slash Pool // TODO: finish once implemented // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4316,14 +4248,14 @@ mod test_finalize_block { // ); let val_stake = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, current_epoch + params.pipeline_len, )?; let post_stake_10 = read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val1.address, Epoch(10), @@ -4371,17 +4303,17 @@ mod test_finalize_block { for _ in 0..2 { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); let _ = advance_epoch(&mut shell, &pkh1, &votes, None); } - let current_epoch = shell.wl_storage.storage.block.epoch; + let current_epoch = shell.state.in_mem().block.epoch; assert_eq!(current_epoch.0, 12_u64); tracing::debug!("\nCHECK BOND AND UNBOND DETAILS"); let details = namada_proof_of_stake::queries::bonds_and_unbonds( - &shell.wl_storage, + &shell.state, None, None, ) @@ -4499,7 +4431,7 @@ mod test_finalize_block { // Withdraw the delegation unbonds, which total to 18_000. This should // only be affected by the slashes in epoch 3 let del_withdraw = namada_proof_of_stake::withdraw_tokens( - &mut shell.wl_storage, + &mut shell.state, Some(&delegator), &val1.address, current_epoch, @@ -4518,7 +4450,7 @@ mod test_finalize_block { // TODO: finish once implemented // Check the balance of the Slash Pool // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4532,7 +4464,7 @@ mod test_finalize_block { // Withdraw the self unbonds, which total 154_654 + 15_000 - 9_123. Only // the (15_000 - 9_123) tokens are slashable. // let self_withdraw = namada_proof_of_stake::withdraw_tokens( - // &mut shell.wl_storage, + // &mut shell.state, // None, // &val1.address, // current_epoch, @@ -4545,7 +4477,7 @@ mod test_finalize_block { // ); // Check the balance of the Slash Pool // let slash_pool_balance: token::Amount = shell - // .wl_storage + // .state // .read(&slash_balance_key) // .expect("must be able to read") // .unwrap_or_default(); @@ -4575,11 +4507,11 @@ mod test_finalize_block { num_validators, ..Default::default() }); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let initial_consensus_set: Vec
= read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -4587,14 +4519,14 @@ mod test_finalize_block { .collect(); let val1 = initial_consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.clone(), Epoch::default(), ); let val2 = initial_consensus_set[1].clone(); let pkh2 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val2.clone(), Epoch::default(), @@ -4602,7 +4534,7 @@ mod test_finalize_block { let validator_stake = namada_proof_of_stake::storage::read_validator_stake( - &shell.wl_storage, + &shell.state, ¶ms, &val2, Epoch::default(), @@ -4619,8 +4551,8 @@ mod test_finalize_block { // Ensure that there is no liveness data yet since there were no votes let missed_votes = liveness_missed_votes_handle(); let sum_missed_votes = liveness_sum_missed_votes_handle(); - assert!(missed_votes.is_empty(&shell.wl_storage)?); - assert!(sum_missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); + assert!(sum_missed_votes.is_empty(&shell.state)?); let minimum_unsigned_blocks = ((Dec::one() - params.liveness_threshold) @@ -4631,8 +4563,8 @@ mod test_finalize_block { // Finalize block 2 and ensure that some data has been written let default_all_votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); next_block_for_inflation( &mut shell, @@ -4640,17 +4572,17 @@ mod test_finalize_block { default_all_votes.clone(), None, ); - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); for val in &initial_consensus_set { - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; assert_eq!(sum, Some(0u64)); } // Completely unbond one of the validator to test the pruning at the // pipeline epoch - let mut current_epoch = shell.wl_storage.storage.block.epoch; + let mut current_epoch = shell.state.in_mem().block.epoch; namada_proof_of_stake::unbond_tokens( - &mut shell.wl_storage, + &mut shell.state, None, &val5, validator_stake, @@ -4658,16 +4590,12 @@ mod test_finalize_block { false, )?; let pipeline_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch + params.pipeline_len, )?; assert_eq!(pipeline_vals.len(), initial_consensus_set.len() - 1); let val5_pipeline_state = validator_state_handle(&val5) - .get( - &shell.wl_storage, - current_epoch + params.pipeline_len, - ¶ms, - )? + .get(&shell.state, current_epoch + params.pipeline_len, ¶ms)? .unwrap(); assert_eq!(val5_pipeline_state, ValidatorState::BelowThreshold); @@ -4682,8 +4610,8 @@ mod test_finalize_block { // NOTE: assume the minimum blocks for jailing is larger than remaining // blocks to next epoch! let mut votes_no2 = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); votes_no2.retain(|vote| vote.validator.address != pkh2); @@ -4696,25 +4624,25 @@ mod test_finalize_block { votes_no2.clone(), None, ); - current_epoch = shell.wl_storage.storage.block.epoch; + current_epoch = shell.state.in_mem().block.epoch; val2_num_missed_blocks += 1; } // Checks upon the new epoch for val in &initial_consensus_set { let missed_votes = liveness_missed_votes_handle().at(val); - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; if val == &val2 { assert_eq!(sum, Some(val2_num_missed_blocks)); for height in first_height_without_vote ..first_height_without_vote + val2_num_missed_blocks { - assert!(missed_votes.contains(&shell.wl_storage, &height)?); + assert!(missed_votes.contains(&shell.state, &height)?); assert!(sum.unwrap() < minimum_unsigned_blocks); } } else { - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); assert_eq!(sum, Some(0u64)); } } @@ -4727,30 +4655,29 @@ mod test_finalize_block { votes_no2.clone(), None, ); - if shell.wl_storage.storage.update_epoch_blocks_delay == Some(1) { + if shell.state.in_mem().update_epoch_blocks_delay == Some(1) { break; } } - assert_eq!(shell.wl_storage.storage.block.epoch, current_epoch); + assert_eq!(shell.state.in_mem().block.epoch, current_epoch); let pipeline_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch + params.pipeline_len, )?; assert_eq!(pipeline_vals.len(), initial_consensus_set.len() - 1); let val2_sum_missed_votes = - liveness_sum_missed_votes_handle().get(&shell.wl_storage, &val2)?; + liveness_sum_missed_votes_handle().get(&shell.state, &val2)?; assert_eq!( val2_sum_missed_votes, Some( - shell.wl_storage.storage.block.height.0 - - first_height_without_vote + shell.state.in_mem().block.height.0 - first_height_without_vote ) ); for val in &initial_consensus_set { if val == &val2 { continue; } - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; assert_eq!(sum, Some(0u64)); } @@ -4762,16 +4689,16 @@ mod test_finalize_block { votes_no2.clone(), None, ); - current_epoch = shell.wl_storage.storage.block.epoch; + current_epoch = shell.state.in_mem().block.epoch; assert_eq!(current_epoch, Epoch(2)); let val2_sum_missed_votes = - liveness_sum_missed_votes_handle().get(&shell.wl_storage, &val2)?; + liveness_sum_missed_votes_handle().get(&shell.state, &val2)?; assert_eq!(val2_sum_missed_votes, Some(minimum_unsigned_blocks)); // Check the validator sets for all epochs up through the pipeline let consensus_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch, )?; assert_eq!( @@ -4785,7 +4712,7 @@ mod test_finalize_block { ); for offset in 1..=params.pipeline_len { let consensus_vals = read_consensus_validator_set_addresses( - &shell.wl_storage, + &shell.state, current_epoch + offset, )?; assert_eq!( @@ -4793,11 +4720,11 @@ mod test_finalize_block { HashSet::from_iter([val1.clone(), val3.clone(), val4.clone()]) ); let val2_state = validator_state_handle(&val2) - .get(&shell.wl_storage, current_epoch + offset, ¶ms)? + .get(&shell.state, current_epoch + offset, ¶ms)? .unwrap(); assert_eq!(val2_state, ValidatorState::Jailed); let val5_state = validator_state_handle(&val5) - .get(&shell.wl_storage, current_epoch + offset, ¶ms)? + .get(&shell.state, current_epoch + offset, ¶ms)? .unwrap(); assert_eq!(val5_state, ValidatorState::BelowThreshold); } @@ -4806,26 +4733,26 @@ mod test_finalize_block { // there, 5 should be removed) for val in &initial_consensus_set { let missed_votes = liveness_missed_votes_handle().at(val); - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; if val == &val2 { assert_eq!( sum, Some( - shell.wl_storage.storage.block.height.0 + shell.state.in_mem().block.height.0 - first_height_without_vote ) ); for height in first_height_without_vote - ..shell.wl_storage.storage.block.height.0 + ..shell.state.in_mem().block.height.0 { - assert!(missed_votes.contains(&shell.wl_storage, &height)?); + assert!(missed_votes.contains(&shell.state, &height)?); } } else if val == &val5 { - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); assert!(sum.is_none()); } else { - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); assert_eq!(sum, Some(0u64)); } } @@ -4835,8 +4762,8 @@ mod test_finalize_block { let next_epoch = current_epoch.next(); loop { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; if current_epoch == next_epoch { @@ -4847,9 +4774,9 @@ mod test_finalize_block { // Check that the liveness data only contains data for vals 1, 3, and 4 for val in &initial_consensus_set { let missed_votes = liveness_missed_votes_handle().at(val); - let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + let sum = sum_missed_votes.get(&shell.state, val)?; - assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(missed_votes.is_empty(&shell.state)?); if val == &val2 || val == &val5 { assert!(sum.is_none()); } else { @@ -4859,13 +4786,13 @@ mod test_finalize_block { // Validator 2 unjail itself namada_proof_of_stake::unjail_validator( - &mut shell.wl_storage, + &mut shell.state, &val2, current_epoch, )?; let pipeline_epoch = current_epoch + params.pipeline_len; let val2_pipeline_state = validator_state_handle(&val2).get( - &shell.wl_storage, + &shell.state, pipeline_epoch, ¶ms, )?; @@ -4874,8 +4801,8 @@ mod test_finalize_block { // Advance to the pipeline epoch loop { let votes = get_default_true_votes( - &shell.wl_storage, - shell.wl_storage.storage.block.epoch, + &shell.state, + shell.state.in_mem().block.epoch, ); current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; if current_epoch == pipeline_epoch { @@ -4883,11 +4810,11 @@ mod test_finalize_block { } } let sum_liveness = liveness_sum_missed_votes_handle(); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val1)?, Some(0u64)); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val2)?, None); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val3)?, Some(0u64)); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val4)?, Some(0u64)); - assert_eq!(sum_liveness.get(&shell.wl_storage, &val5)?, None); + assert_eq!(sum_liveness.get(&shell.state, &val1)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.state, &val2)?, None); + assert_eq!(sum_liveness.get(&shell.state, &val3)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.state, &val4)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.state, &val5)?, None); Ok(()) } @@ -4924,15 +4851,15 @@ mod test_finalize_block { consensus_votes: &[VoteInfo], misbehaviors: Option>, ) -> (Epoch, token::Amount) { - let current_epoch = shell.wl_storage.storage.block.epoch; + let current_epoch = shell.state.in_mem().block.epoch; let staking_token = - namada_proof_of_stake::staking_token_address(&shell.wl_storage); + namada_proof_of_stake::staking_token_address(&shell.state); // NOTE: assumed that the only change in pos address balance by // advancing to the next epoch is minted inflation - no change occurs // due to slashing let pos_balance_pre = shell - .wl_storage + .state .read::(&token::storage_key::balance_key( &staking_token, &pos_address, @@ -4946,12 +4873,12 @@ mod test_finalize_block { consensus_votes.to_owned(), misbehaviors.clone(), ); - if shell.wl_storage.storage.block.epoch == current_epoch.next() { + if shell.state.in_mem().block.epoch == current_epoch.next() { break; } } let pos_balance_post = shell - .wl_storage + .state .read::(&token::storage_key::balance_key( &staking_token, &pos_address, @@ -4960,7 +4887,7 @@ mod test_finalize_block { .unwrap_or_default(); ( - shell.wl_storage.storage.block.epoch, + shell.state.in_mem().block.epoch, pos_balance_post - pos_balance_pre, ) } @@ -4972,7 +4899,7 @@ mod test_finalize_block { setup_at_height(3u64); let proposal_execution_key = get_proposal_execution_key(0); shell - .wl_storage + .state .write(&proposal_execution_key, 0u64) .expect("Test failed."); let mut tx = Tx::new(shell.chain_id.clone(), None); @@ -4981,21 +4908,22 @@ mod test_finalize_block { NonZeroU64::new_unchecked(42) }); shell - .wl_storage + .state .write(&min_confirmations_key(), new_min_confirmations) .expect("Test failed"); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let keys_changed = BTreeSet::from([min_confirmations_key()]); let verifiers = BTreeSet::default(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = namada::ledger::native_vp::Ctx::new( shell.mode.get_validator_address().expect("Test failed"), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + shell.state.read_only(), &tx, &TxIndex(0), - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -5010,7 +4938,7 @@ mod test_finalize_block { let mut req = FinalizeBlock::default(); req.header.time = namada::core::time::DateTimeUtc::now(); let current_decision_height = shell.get_current_decision_height(); - if let Some(b) = shell.wl_storage.storage.last_block.as_mut() { + if let Some(b) = shell.state.in_mem_mut().last_block.as_mut() { b.height = current_decision_height + 11; } shell.finalize_block(req).expect("Test failed"); @@ -5018,17 +4946,17 @@ mod test_finalize_block { let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = read_pos_params(&shell.wl_storage).unwrap(); + let params = read_pos_params(&shell.state).unwrap(); let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), diff --git a/crates/apps/src/lib/node/ledger/shell/governance.rs b/crates/apps/src/lib/node/ledger/shell/governance.rs index 36ff6e7cf4..8ad7b602dc 100644 --- a/crates/apps/src/lib/node/ledger/shell/governance.rs +++ b/crates/apps/src/lib/node/ledger/shell/governance.rs @@ -1,3 +1,4 @@ +use std::cell::RefCell; use std::collections::HashMap; use namada::core::address::Address; @@ -68,41 +69,37 @@ where let proposal_author_key = gov_storage::get_author_key(id); let funds: token::Amount = - force_read(&shell.wl_storage, &proposal_funds_key)?; + force_read(&shell.state, &proposal_funds_key)?; let proposal_end_epoch: Epoch = - force_read(&shell.wl_storage, &proposal_end_epoch_key)?; + force_read(&shell.state, &proposal_end_epoch_key)?; let proposal_type: ProposalType = - force_read(&shell.wl_storage, &proposal_type_key)?; + force_read(&shell.state, &proposal_type_key)?; let proposal_author: Address = - force_read(&shell.wl_storage, &proposal_author_key)?; + force_read(&shell.state, &proposal_author_key)?; - let is_steward = pgf::is_steward(&shell.wl_storage, &proposal_author)?; + let is_steward = pgf::is_steward(&shell.state, &proposal_author)?; - let params = read_pos_params(&shell.wl_storage)?; + let params = read_pos_params(&shell.state)?; let total_voting_power = - read_total_stake(&shell.wl_storage, ¶ms, proposal_end_epoch)?; + read_total_stake(&shell.state, ¶ms, proposal_end_epoch)?; let tally_type = TallyType::from(proposal_type.clone(), is_steward); let votes = compute_proposal_votes( - &shell.wl_storage, + &shell.state, ¶ms, id, proposal_end_epoch, )?; let proposal_result = compute_proposal_result(votes, total_voting_power, tally_type); - gov_api::write_proposal_result( - &mut shell.wl_storage, - id, - proposal_result, - )?; + gov_api::write_proposal_result(&mut shell.state, id, proposal_result)?; let transfer_address = match proposal_result.result { TallyResult::Passed => { let proposal_event = match proposal_type { ProposalType::Default(_) => { let proposal_code = - gov_api::get_proposal_code(&shell.wl_storage, id)?; + gov_api::get_proposal_code(&shell.state, id)?; let result = execute_default_proposal( shell, id, @@ -129,7 +126,7 @@ where } ProposalType::PGFSteward(stewards) => { let result = execute_pgf_steward_proposal( - &mut shell.wl_storage, + &mut shell.state, stewards, )?; tracing::info!( @@ -142,10 +139,9 @@ where .into() } ProposalType::PGFPayment(payments) => { - let native_token = - &shell.wl_storage.get_native_token()?; + let native_token = &shell.state.get_native_token()?; let result = execute_pgf_funding_proposal( - &mut shell.wl_storage, + &mut shell.state, native_token, payments, id, @@ -157,15 +153,13 @@ where ); for ibc_event in - shell.wl_storage.write_log_mut().take_ibc_events() + shell.state.write_log_mut().take_ibc_events() { let mut event = Event::from(ibc_event.clone()); // Add the height for IBC event query - let height = shell - .wl_storage - .storage - .get_last_block_height() - + 1; + let height = + shell.state.in_mem().get_last_block_height() + + 1; event["height"] = height.to_string(); events.emit(event); } @@ -177,13 +171,13 @@ where events.emit(proposal_event); proposals_result.passed.push(id); - gov_api::get_proposal_author(&shell.wl_storage, id)? + gov_api::get_proposal_author(&shell.state, id)? } TallyResult::Rejected => { if let ProposalType::PGFPayment(_) = proposal_type { if proposal_result.two_thirds_nay_over_two_thirds_total() { pgf::remove_steward( - &mut shell.wl_storage, + &mut shell.state, &proposal_author, )?; @@ -211,10 +205,10 @@ where } }; - let native_token = shell.wl_storage.get_native_token()?; + let native_token = shell.state.get_native_token()?; if let Some(address) = transfer_address { token::transfer( - &mut shell.wl_storage, + &mut shell.state, &native_token, &gov_address, &address, @@ -222,7 +216,7 @@ where )?; } else { token::burn_tokens( - &mut shell.wl_storage, + &mut shell.state, &native_token, &gov_address, funds, @@ -306,7 +300,7 @@ where { if let Some(code) = proposal_code { let pending_execution_key = gov_storage::get_proposal_execution_key(id); - shell.wl_storage.write(&pending_execution_key, ())?; + shell.state.write(&pending_execution_key, ())?; let mut tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); tx.header.chain_id = shell.chain_id.clone(); @@ -319,28 +313,27 @@ where * based on the code size. We dont * need it here. */ TxIndex::default(), - &mut TxGasMeter::new_from_sub_limit(u64::MAX.into()), /* No gas limit for governance proposal */ - &mut shell.wl_storage, + &RefCell::new(TxGasMeter::new_from_sub_limit(u64::MAX.into())), /* No gas limit for governance proposal */ + &mut shell.state, &mut shell.vp_wasm_cache, &mut shell.tx_wasm_cache, None, ); shell - .wl_storage - .storage + .state .delete(&pending_execution_key) .expect("Should be able to delete the storage."); match tx_result { Ok(tx_result) => { if tx_result.is_accepted() { - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); Ok(true) } else { Ok(false) } } Err(_) => { - shell.wl_storage.drop_tx(); + shell.state.drop_tx(); Ok(false) } } @@ -379,7 +372,7 @@ where } fn execute_pgf_funding_proposal( - storage: &mut WlStorage, + state: &mut WlState, token: &Address, fundings: BTreeSet, proposal_id: u64, @@ -393,7 +386,7 @@ where PGFAction::Continuous(action) => match action { AddRemove::Add(target) => { pgf_storage::fundings_handle().insert( - storage, + state, target.target().clone(), StoragePgfFunding::new(target.clone(), proposal_id), )?; @@ -407,7 +400,7 @@ where } AddRemove::Remove(target) => { pgf_storage::fundings_handle() - .remove(storage, &target.target())?; + .remove(state, &target.target())?; tracing::info!( "Removed ContinousPgf from proposal id {}: set {} to \ {}.", @@ -420,14 +413,14 @@ where PGFAction::Retro(target) => { let result = match &target { PGFTarget::Internal(target) => token::transfer( - storage, + state, token, &ADDRESS, &target.target, target.amount, ), PGFTarget::Ibc(target) => { - ibc::transfer_over_ibc(storage, token, &ADDRESS, target) + ibc::transfer_over_ibc(state, token, &ADDRESS, target) } }; match result { diff --git a/crates/apps/src/lib/node/ledger/shell/init_chain.rs b/crates/apps/src/lib/node/ledger/shell/init_chain.rs index 6da33496db..c109aca57e 100644 --- a/crates/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/crates/apps/src/lib/node/ledger/shell/init_chain.rs @@ -89,7 +89,7 @@ where #[cfg(any(test, feature = "testing"))] _num_validators: u64, ) -> Result { let mut response = response::InitChain::default(); - let chain_id = self.wl_storage.storage.chain_id.as_str(); + let chain_id = self.state.in_mem().chain_id.as_str(); if chain_id != init.chain_id.as_str() { return Err(Error::ChainId(format!( "Current chain ID: {}, Tendermint chain ID: {}", @@ -122,7 +122,7 @@ where { // update the native token from the genesis file let native_token = genesis.get_native_token().clone(); - self.wl_storage.storage.native_token = native_token; + self.state.in_mem_mut().native_token = native_token; } let mut validation = InitChainValidation::new(self, false); validation.run( @@ -140,22 +140,20 @@ where let anchor = empty_commitment_tree.root(); let note_commitment_tree_key = token::storage_key::masp_commitment_tree_key(); - self.wl_storage + self.state .write(¬e_commitment_tree_key, empty_commitment_tree) .unwrap(); let commitment_tree_anchor_key = token::storage_key::masp_commitment_anchor_key(anchor); - self.wl_storage - .write(&commitment_tree_anchor_key, ()) - .unwrap(); + self.state.write(&commitment_tree_anchor_key, ()).unwrap(); // Init masp convert anchor let convert_anchor_key = token::storage_key::masp_convert_anchor_key(); - self.wl_storage.write( + self.state.write( &convert_anchor_key, namada::core::hash::Hash( bls12_381::Scalar::from( - self.wl_storage.storage.conversion_state.tree.root(), + self.state.in_mem().conversion_state.tree.root(), ) .to_bytes(), ), @@ -202,19 +200,19 @@ where // Initialize protocol parameters let parameters = genesis.get_chain_parameters(&self.wasm_dir); self.store_wasms(¶meters)?; - parameters::init_storage(¶meters, &mut self.wl_storage).unwrap(); + parameters::init_storage(¶meters, &mut self.state).unwrap(); // Initialize governance parameters let gov_params = genesis.get_gov_params(); - gov_params.init_storage(&mut self.wl_storage).unwrap(); + gov_params.init_storage(&mut self.state).unwrap(); // configure the Ethereum bridge if the configuration is set. if let Some(config) = genesis.get_eth_bridge_params() { tracing::debug!("Initializing Ethereum bridge storage."); - config.init_storage(&mut self.wl_storage); + config.init_storage(&mut self.state); self.update_eth_oracle(&Default::default()); } else { - self.wl_storage + self.state .write( &namada::eth_bridge::storage::active_key(), EthBridgeStatus::Disabled, @@ -223,16 +221,16 @@ where } // Depends on parameters being initialized - self.wl_storage - .storage + self.state + .in_mem_mut() .init_genesis_epoch(initial_height, genesis_time, ¶meters) .expect("Initializing genesis epoch must not fail"); // PoS system depends on epoch being initialized let pos_params = genesis.get_pos_params(); - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); pos::namada_proof_of_stake::init_genesis( - &mut self.wl_storage, + &mut self.state, &pos_params, current_epoch, ) @@ -241,7 +239,7 @@ where // PGF parameters let pgf_params = genesis.get_pgf_params(); pgf_params - .init_storage(&mut self.wl_storage) + .init_storage(&mut self.state) .expect("Should be able to initialized PGF at genesis"); // Loaded VP code cache to avoid loading the same files multiple times @@ -258,19 +256,19 @@ where self.apply_genesis_txs_bonds(&genesis); pos::namada_proof_of_stake::compute_and_store_total_consensus_stake( - &mut self.wl_storage, + &mut self.state, current_epoch, ) .expect("Could not compute total consensus stake at genesis"); // This has to be done after `apply_genesis_txs_validator_account` pos::namada_proof_of_stake::copy_genesis_validator_sets( - &mut self.wl_storage, + &mut self.state, &pos_params, current_epoch, ) .expect("Must be able to copy PoS genesis validator sets"); - ibc::init_genesis_storage(&mut self.wl_storage); + ibc::init_genesis_storage(&mut self.state); ControlFlow::Continue(()) } @@ -392,15 +390,13 @@ where let hash_key = Key::wasm_hash(name); let code_name_key = Key::wasm_code_name(name.to_owned()); - self.wl_storage.write_bytes(&code_key, code).unwrap(); - self.wl_storage.write(&code_len_key, code_len).unwrap(); - self.wl_storage.write_bytes(&hash_key, code_hash).unwrap(); + self.state.write_bytes(&code_key, code).unwrap(); + self.state.write(&code_len_key, code_len).unwrap(); + self.state.write_bytes(&hash_key, code_hash).unwrap(); if &Some(code_hash) == implicit_vp_code_hash { is_implicit_vp_stored = true; } - self.wl_storage - .write_bytes(&code_name_key, code_hash) - .unwrap(); + self.state.write_bytes(&code_name_key, code_hash).unwrap(); } else { tracing::warn!("The wasm {name} isn't allowed."); self.warn(Warning::DisallowedWasm(name.to_string())); @@ -430,10 +426,10 @@ where config: TokenConfig { denom, masp_params }, } = token; // associate a token with its denomination. - write_denom(&mut self.wl_storage, address, *denom).unwrap(); + write_denom(&mut self.state, address, *denom).unwrap(); namada::token::write_params( masp_params, - &mut self.wl_storage, + &mut self.state, address, denom, ) @@ -442,8 +438,8 @@ where // add token addresses to the masp reward conversions lookup // table. let alias = alias.to_string(); - self.wl_storage - .storage + self.state + .in_mem_mut() .conversion_state .tokens .insert(alias, address.clone()); @@ -474,7 +470,7 @@ where for (owner, balance) in balances { if let genesis::GenesisAddress::PublicKey(pk) = owner { namada::account::init_account_storage( - &mut self.wl_storage, + &mut self.state, &owner.address(), std::slice::from_ref(&pk.raw), 1, @@ -488,7 +484,7 @@ where owner, ); credit_tokens( - &mut self.wl_storage, + &mut self.state, token_address, &owner.address(), balance.amount(), @@ -497,7 +493,7 @@ where total_token_balance += balance.amount(); } // Write the total amount of tokens for the ratio - self.wl_storage + self.state .write( &token::storage_key::minted_balance_key(token_address), total_token_balance, @@ -530,14 +526,14 @@ where ); let vp_code = self.lookup_vp(vp, genesis, vp_cache)?; let code_hash = CodeHash::sha256(&vp_code); - self.wl_storage + self.state .write_bytes(&Key::validity_predicate(address), code_hash) .unwrap(); let public_keys: Vec<_> = public_keys.iter().map(|pk| pk.raw.clone()).collect(); namada::account::init_account_storage( - &mut self.wl_storage, + &mut self.state, address, &public_keys, *threshold, @@ -587,18 +583,18 @@ where let vp_code = self.lookup_vp(vp, genesis, vp_cache)?; let code_hash = CodeHash::sha256(&vp_code); - self.wl_storage + self.state .write_bytes(&Key::validity_predicate(address), code_hash) .expect("Unable to write user VP"); - self.wl_storage + self.state .write(&protocol_pk_key(address), &protocol_key.pk.raw) .expect("Unable to set genesis user protocol public key"); // TODO: replace pos::init_genesis validators arg with // init_genesis_validator from here if let Err(err) = pos::namada_proof_of_stake::become_validator( - &mut self.wl_storage, + &mut self.state, BecomeValidator { params, address, @@ -630,7 +626,7 @@ where /// Apply genesis txs to transfer tokens fn apply_genesis_txs_bonds(&mut self, genesis: &genesis::chain::Finalized) { - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); if let Some(txs) = &genesis.transactions.bond { for BondTx { source, @@ -646,7 +642,7 @@ where ); if let Err(err) = pos::namada_proof_of_stake::bond_tokens( - &mut self.wl_storage, + &mut self.state, Some(&source.address()), validator, amount.amount(), @@ -957,9 +953,8 @@ mod test { // Collect all storage key-vals into a sorted map let store_block_state = |shell: &TestShell| -> BTreeMap<_, _> { shell - .wl_storage - .storage - .db + .state + .db() .iter_prefix(None) .map(|(key, val, _gas)| (key, val)) .collect() @@ -1136,15 +1131,13 @@ mod test { }; // Initialize governance parameters let gov_params = genesis.get_gov_params(); - gov_params - .init_storage(&mut initializer.wl_storage) - .unwrap(); + gov_params.init_storage(&mut initializer.state).unwrap(); // PoS system depends on epoch being initialized let pos_params = genesis.get_pos_params(); let (current_epoch, _gas) = - initializer.wl_storage.storage.get_current_epoch(); + initializer.state.in_mem().get_current_epoch(); pos::namada_proof_of_stake::init_genesis( - &mut initializer.wl_storage, + &mut initializer.state, &pos_params, current_epoch, ) diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index d0d12ac014..4d9d8d2dac 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -10,8 +10,10 @@ mod finalize_block; mod governance; mod init_chain; pub use init_chain::InitChainValidation; +use namada_sdk::state::StateRead; use namada_sdk::tx::data::GasLimit; pub mod prepare_proposal; +use namada::state::State; pub mod process_proposal; pub(super) mod queries; mod stats; @@ -21,6 +23,7 @@ pub mod testing; pub mod utils; mod vote_extensions; +use std::cell::RefCell; use std::collections::{BTreeSet, HashSet}; use std::convert::{TryFrom, TryInto}; use std::path::{Path, PathBuf}; @@ -54,11 +57,9 @@ use namada::ledger::{parameters, protocol}; use namada::parameters::validate_tx_bytes; use namada::proof_of_stake::storage::read_pos_params; use namada::state::tx_queue::{ExpiredTx, TxInQueue}; -use namada::state::wl_storage::WriteLogAndStorage; -use namada::state::write_log::WriteLog; use namada::state::{ - DBIter, Sha256Hasher, State, StorageHasher, StorageRead, TempWlStorage, - WlStorage, DB, EPOCH_SWITCH_BLOCKS_DELAY, + DBIter, FullAccessState, Sha256Hasher, StorageHasher, StorageRead, + TempWlState, WlState, DB, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada::token; pub use namada::tx::data::ResultCode; @@ -331,7 +332,7 @@ where /// The id of the current chain pub chain_id: ChainId, /// The persistent storage with write log - pub wl_storage: WlStorage, + pub state: FullAccessState, /// Path to the base directory with DB data and configs #[allow(dead_code)] pub(crate) base_dir: PathBuf, @@ -431,20 +432,14 @@ where }; // load last state from storage - let mut storage = State::open( + let state = FullAccessState::open( db_path, + db_cache, chain_id.clone(), native_token, - db_cache, config.shell.storage_read_past_height_limit, is_merklized_storage_key, ); - storage - .load_last_state() - .map_err(|e| { - tracing::error!("Cannot load the last state from the DB {}", e); - }) - .expect("PersistentStorage cannot be initialized"); let vp_wasm_cache_dir = base_dir.join(chain_id.as_str()).join("vp_wasm_cache"); let tx_wasm_cache_dir = @@ -513,13 +508,9 @@ where TendermintMode::Seed => ShellMode::Seed, }; - let wl_storage = WlStorage { - storage, - write_log: WriteLog::default(), - }; let mut shell = Self { chain_id, - wl_storage, + state, base_dir, wasm_dir, mode, @@ -555,7 +546,7 @@ where /// Iterate over the wrapper txs in order #[allow(dead_code)] fn iter_tx_queue(&mut self) -> impl Iterator { - self.wl_storage.storage.tx_queue.iter() + self.state.in_mem().tx_queue.iter() } /// Load the Merkle root hash and the height of the last committed block, if @@ -565,7 +556,7 @@ where last_block_height: tendermint::block::Height::from(0_u32), ..Default::default() }; - let result = self.wl_storage.storage.get_state(); + let result = self.state.in_mem().get_state(); match result { Some((root, height)) => { @@ -595,7 +586,7 @@ where where T: Clone + BorshDeserialize, { - let result = self.wl_storage.storage.read(key); + let result = self.state.db_read(key); match result { Ok((bytes, _gas)) => match bytes { @@ -611,7 +602,7 @@ where /// Read the bytes for a storage key dropping any error pub fn read_storage_key_bytes(&self, key: &Key) -> Option> { - let result = self.wl_storage.storage.read(key); + let result = self.state.db_read(key); match result { Ok((bytes, _gas)) => bytes, @@ -624,7 +615,7 @@ where &self, current_epoch: namada_sdk::storage::Epoch, ) -> namada_sdk::storage::Epoch { - if let Some(delay) = self.wl_storage.storage.update_epoch_blocks_delay { + if let Some(delay) = self.state.in_mem().update_epoch_blocks_delay { if delay == EPOCH_SWITCH_BLOCKS_DELAY { // If we're about to update validator sets for the // upcoming epoch, we can still remove the validator @@ -649,18 +640,18 @@ where ..Default::default() }; // commit block's data from write log and store the in DB - self.wl_storage.commit_block().unwrap_or_else(|e| { + self.state.commit_block().unwrap_or_else(|e| { tracing::error!( "Encountered a storage error while committing a block {:?}", e ) }); - let root = self.wl_storage.storage.merkle_root(); + let root = self.state.in_mem().merkle_root(); tracing::info!( "Committed block hash: {}, height: {}", root, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ); response.data = root.0.to_vec().into(); @@ -691,7 +682,7 @@ where block is {}", eth_height ); - self.wl_storage.storage.ethereum_height = Some(eth_height); + self.state.in_mem_mut().ethereum_height = Some(eth_height); } None => tracing::info!( "Ethereum oracle has not yet fully processed any Ethereum \ @@ -737,8 +728,8 @@ where fn broadcast_expired_txs(&mut self) { let eth_events = { let mut events: Vec<_> = self - .wl_storage - .storage + .state + .in_mem_mut() .expired_txs_queue .drain() .map(|expired_tx| match expired_tx { @@ -794,7 +785,7 @@ where // for the first time ever, in which case the chain hasn't been // initialized yet. let has_key = self - .wl_storage + .state .has_key(&namada::eth_bridge::storage::active_key()) .expect( "We should always be able to check whether a key exists \ @@ -807,33 +798,32 @@ where ); return; } - let Some(config) = EthereumOracleConfig::read(&self.wl_storage) else { + let Some(config) = EthereumOracleConfig::read(&self.state) else { tracing::info!("Not starting oracle as the Ethereum bridge config couldn't be found in storage"); return; }; - let active = - if !self.wl_storage.ethbridge_queries().is_bridge_active() { - if !changed_keys - .contains(&namada::eth_bridge::storage::active_key()) - { - tracing::info!( - "Not starting oracle as the Ethereum bridge is \ - disabled" - ); - return; - } else { - tracing::info!( - "Disabling oracle as the bridge has been disabled" - ); - false - } + let active = if !self.state.ethbridge_queries().is_bridge_active() { + if !changed_keys + .contains(&namada::eth_bridge::storage::active_key()) + { + tracing::info!( + "Not starting oracle as the Ethereum bridge is \ + disabled" + ); + return; } else { - true - }; + tracing::info!( + "Disabling oracle as the bridge has been disabled" + ); + false + } + } else { + true + }; let start_block = self - .wl_storage - .storage + .state + .in_mem() .ethereum_height .clone() .unwrap_or(config.eth_start_height); @@ -893,7 +883,7 @@ where // // NB: always keep this as the first tx check, // as it is a pretty cheap one - if !validate_tx_bytes(&self.wl_storage, tx_bytes.len()) + if !validate_tx_bytes(&self.state, tx_bytes.len()) .expect("Failed to get max tx bytes param from storage") { response.code = ResultCode::TooLarge.into(); @@ -925,8 +915,7 @@ where // Tx expiration if let Some(exp) = tx.header.expiration { let last_block_timestamp = self - .wl_storage - .storage + .state .get_last_block_timestamp() .expect("Failed to retrieve last block timestamp"); @@ -977,9 +966,9 @@ where ethereum_tx_data_variants::EthEventsVext::try_from(&tx), ); if let Err(err) = validate_eth_events_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) { response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( @@ -999,9 +988,9 @@ where ), ); if let Err(err) = validate_bp_roots_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) { response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( @@ -1021,7 +1010,7 @@ where ), ); if let Err(err) = validate_valset_upd_vext( - &self.wl_storage, + &self.state, &ext, // n.b. only accept validator set updates // issued at the last committed epoch @@ -1031,7 +1020,7 @@ where // committed to storage, so `last_epoch` // reflects the current value of the // epoch. - self.wl_storage.storage.last_epoch, + self.state.in_mem().last_epoch, ) { response.code = ResultCode::InvalidVoteExtension.into(); response.log = format!( @@ -1066,8 +1055,7 @@ where // Max block gas let block_gas_limit: Gas = Gas::from_whole_units( - namada::parameters::get_max_block_gas(&self.wl_storage) - .unwrap(), + namada::parameters::get_max_block_gas(&self.state).unwrap(), ); if gas_meter.tx_gas_limit > block_gas_limit { response.code = ResultCode::AllocationError.into(); @@ -1080,8 +1068,7 @@ where // Replay protection check let inner_tx_hash = tx.raw_header_hash(); if self - .wl_storage - .storage + .state .has_replay_protection_entry(&tx.raw_header_hash()) .expect("Error while checking inner tx hash key in storage") { @@ -1097,14 +1084,9 @@ where let tx = Tx::try_from(tx_bytes) .expect("Deserialization shouldn't fail"); let wrapper_hash = &tx.header_hash(); - if self - .wl_storage - .storage - .has_replay_protection_entry(wrapper_hash) - .expect( - "Error while checking wrapper tx hash key in storage", - ) - { + if self.state.has_replay_protection_entry(wrapper_hash).expect( + "Error while checking wrapper tx hash key in storage", + ) { response.code = ResultCode::ReplayTx.into(); response.log = format!( "{INVALID_MSG}: Wrapper transaction hash {} already \ @@ -1118,7 +1100,7 @@ where if let Err(e) = mempool_fee_check( &wrapper, get_fee_unshielding_transaction(&tx, &wrapper), - &mut TempWlStorage::new(&self.wl_storage.storage), + &mut self.state.with_temp_write_log(), &mut self.vp_wasm_cache.clone(), &mut self.tx_wasm_cache.clone(), ) { @@ -1161,9 +1143,9 @@ where { use namada::ledger::pos::namada_proof_of_stake; - let (current_epoch, _gas) = self.wl_storage.storage.get_current_epoch(); + let (current_epoch, _gas) = self.state.in_mem().get_current_epoch(); let pos_params = - namada_proof_of_stake::storage::read_pos_params(&self.wl_storage) + namada_proof_of_stake::storage::read_pos_params(&self.state) .expect("Could not find the PoS parameters"); let validator_set_update_fn = if is_genesis { @@ -1173,7 +1155,7 @@ where }; validator_set_update_fn( - &self.wl_storage, + &self.state, &pos_params, current_epoch, |update| { @@ -1198,23 +1180,22 @@ where /// Retrieves the [`BlockHeight`] that is currently being decided. #[inline] pub fn get_current_decision_height(&self) -> BlockHeight { - self.wl_storage.get_current_decision_height() + self.state.get_current_decision_height() } /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, /// within the current epoch. pub fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool { - self.wl_storage - .is_deciding_offset_within_epoch(height_offset) + self.state.is_deciding_offset_within_epoch(height_offset) } } /// Checks that neither the wrapper nor the inner transaction have already -/// been applied. Requires a [`TempWlStorage`] to perform the check during +/// been applied. Requires a [`TempWlState`] to perform the check during /// block construction and validation pub fn replay_protection_checks( wrapper: &Tx, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, ) -> Result<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -1223,7 +1204,7 @@ where let inner_tx_hash = wrapper.raw_header_hash(); // Check the inner tx hash only against the storage, skip the write // log - if temp_wl_storage + if temp_state .has_committed_replay_protection_entry(&inner_tx_hash) .expect("Error while checking inner tx hash key in storage") { @@ -1234,7 +1215,7 @@ where } let wrapper_hash = wrapper.header_hash(); - if temp_wl_storage + if temp_state .has_replay_protection_entry(&wrapper_hash) .expect("Error while checking wrapper tx hash key in storage") { @@ -1245,7 +1226,7 @@ where } // Write wrapper hash to WAL - temp_wl_storage + temp_state .write_tx_hash(wrapper_hash) .map_err(|e| Error::ReplayAttempt(e.to_string())) } @@ -1254,7 +1235,7 @@ where fn mempool_fee_check( wrapper: &WrapperTx, masp_transaction: Option, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -1264,7 +1245,7 @@ where CA: 'static + WasmCacheAccess + Sync, { let minimum_gas_price = namada::ledger::parameters::read_gas_cost( - temp_wl_storage, + temp_state, &wrapper.fee.token, ) .expect("Must be able to read gas cost parameter") @@ -1277,11 +1258,11 @@ where wrapper, masp_transaction, minimum_gas_price, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; - protocol::check_fees(temp_wl_storage, wrapper).map_err(Error::TxApply) + protocol::check_fees(temp_state, wrapper).map_err(Error::TxApply) } /// Check the validity of the fee payment, including the minimum amounts @@ -1290,7 +1271,7 @@ pub fn wrapper_fee_check( wrapper: &WrapperTx, masp_transaction: Option, minimum_gas_price: token::Amount, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -1302,7 +1283,7 @@ where match token::denom_to_amount( wrapper.fee.amount_per_gas_unit, &wrapper.fee.token, - temp_wl_storage, + temp_state, ) { Ok(amount_per_gas_unit) if amount_per_gas_unit < minimum_gas_price => { // The fees do not match the minimum required @@ -1328,7 +1309,7 @@ where fee_unshielding_validation( wrapper, transaction, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; @@ -1341,7 +1322,7 @@ where fn fee_unshielding_validation( wrapper: &WrapperTx, masp_transaction: Transaction, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -1355,9 +1336,9 @@ where // further validation // Validate data and generate unshielding tx - let transfer_code_hash = get_transfer_hash_from_storage(temp_wl_storage); + let transfer_code_hash = get_transfer_hash_from_storage(temp_state); - let descriptions_limit = temp_wl_storage + let descriptions_limit = temp_state .read( ¶meters::storage::get_fee_unshielding_descriptions_limit_key(), ) @@ -1373,7 +1354,7 @@ where ) .map_err(|e| Error::TxApply(protocol::Error::FeeUnshieldingError(e)))?; - let fee_unshielding_gas_limit: GasLimit = temp_wl_storage + let fee_unshielding_gas_limit: GasLimit = temp_state .read(¶meters::storage::get_fee_unshielding_gas_limit_key()) .expect("Error reading from storage") .expect("Missing fee unshielding gas limit in storage"); @@ -1386,14 +1367,14 @@ where // from being passed/triggering VPs) but we cannot // commit the tx write log yet cause the tx could still // be invalid. - temp_wl_storage.write_log.precommit_tx(); + temp_state.write_log_mut().precommit_tx(); let result = apply_wasm_tx( unshield, &TxIndex::default(), ShellParams::new( - &mut TxGasMeter::new(fee_unshielding_gas_limit), - temp_wl_storage, + &RefCell::new(TxGasMeter::new(fee_unshielding_gas_limit)), + temp_state, vp_wasm_cache, tx_wasm_cache, ), @@ -1623,7 +1604,7 @@ mod test_utils { vp_wasm_compilation_cache, tx_wasm_compilation_cache, ); - shell.wl_storage.storage.block.height = height.into(); + shell.state.in_mem_mut().block.height = height.into(); (Self { shell }, receiver, eth_sender, control_receiver) } @@ -1731,7 +1712,7 @@ mod test_utils { /// wrapper as parameter. #[cfg(test)] pub fn enqueue_tx(&mut self, tx: Tx, inner_tx_gas: Gas) { - self.shell.wl_storage.storage.tx_queue.push(TxInQueue { + self.shell.state.in_mem_mut().tx_queue.push(TxInQueue { tx, gas: inner_tx_gas, }); @@ -1739,9 +1720,9 @@ mod test_utils { /// Start a counter for the next epoch in `num_blocks`. pub fn start_new_epoch_in(&mut self, num_blocks: u64) { - self.wl_storage.storage.next_epoch_min_start_height = - self.wl_storage.storage.get_last_block_height() + num_blocks; - self.wl_storage.storage.next_epoch_min_start_time = + self.state.in_mem_mut().next_epoch_min_start_height = + self.state.in_mem().get_last_block_height() + num_blocks; + self.state.in_mem_mut().next_epoch_min_start_time = DateTimeUtc::now(); } @@ -1759,9 +1740,9 @@ mod test_utils { self.start_new_epoch_in(1); let next_epoch_min_start_height = - self.wl_storage.storage.next_epoch_min_start_height; + self.state.in_mem().next_epoch_min_start_height; if let Some(LastBlock { height, .. }) = - self.wl_storage.storage.last_block.as_mut() + self.state.in_mem_mut().last_block.as_mut() { *height = next_epoch_min_start_height; } else { @@ -1772,7 +1753,7 @@ mod test_utils { for _i in 0..EPOCH_SWITCH_BLOCKS_DELAY { self.finalize_and_commit(req.clone()); } - self.wl_storage.storage.get_current_epoch().0 + self.state.in_mem().get_current_epoch().0 } } @@ -1854,7 +1835,7 @@ mod test_utils { initial_height: 1_u32.into(), }; test.init_chain(req, num_validators); - test.wl_storage.commit_block().expect("Test failed"); + test.state.commit_block().expect("Test failed"); (test, receiver, eth_sender, control_receiver) } @@ -1919,7 +1900,7 @@ mod test_utils { use namada::eth_bridge::storage::active_key; use namada::eth_bridge::storage::eth_bridge_queries::EthBridgeStatus; shell - .wl_storage + .state .write(&active_key(), EthBridgeStatus::Disabled) .expect("Test failed"); } @@ -1958,8 +1939,8 @@ mod test_utils { tx_wasm_compilation_cache, ); shell - .wl_storage - .storage + .state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .expect("begin_block failed"); let keypair = gen_keypair(); @@ -1979,15 +1960,15 @@ mod test_utils { wrapper.set_code(Code::new("wasm_code".as_bytes().to_owned(), None)); wrapper.set_data(Data::new("transaction data".as_bytes().to_owned())); - shell.wl_storage.storage.tx_queue.push(TxInQueue { + shell.state.in_mem_mut().tx_queue.push(TxInQueue { tx: wrapper, gas: u64::MAX.into(), }); // Artificially increase the block height so that chain // will read the new block when restarted shell - .wl_storage - .storage + .state + .in_mem_mut() .block .pred_epochs .new_epoch(BlockHeight(1)); @@ -2012,12 +1993,12 @@ mod test_utils { fee_unshielding_descriptions_limit: 0, minimum_gas_price: Default::default(), }; - parameters::init_storage(¶ms, &mut shell.wl_storage) + parameters::init_storage(¶ms, &mut shell.state) .expect("Test failed"); - // make wl_storage to update conversion for a new epoch - update_allowed_conversions(&mut shell.wl_storage) + // make state to update conversion for a new epoch + update_allowed_conversions(&mut shell.state) .expect("update conversions failed"); - shell.wl_storage.commit_block().expect("commit failed"); + shell.state.commit_block().expect("commit failed"); // Drop the shell std::mem::drop(shell); @@ -2045,7 +2026,7 @@ mod test_utils { vp_wasm_compilation_cache, tx_wasm_compilation_cache, ); - assert!(!shell.wl_storage.storage.tx_queue.is_empty()); + assert!(!shell.state.in_mem().tx_queue.is_empty()); } pub(super) fn get_pkh_from_address( @@ -2074,11 +2055,7 @@ mod test_utils { ) { // Let the header time be always ahead of the next epoch min start time let header = Header { - time: shell - .wl_storage - .storage - .next_epoch_min_start_time - .next_second(), + time: shell.state.in_mem().next_epoch_min_start_time.next_second(), ..Default::default() }; let mut req = FinalizeBlock { @@ -2173,13 +2150,13 @@ mod shell_tests { transfers: vec![], }; shell - .wl_storage - .storage + .state + .in_mem_mut() .expired_txs_queue .push(ExpiredTx::EthereumEvent(ethereum_event_0.clone())); shell - .wl_storage - .storage + .state + .in_mem_mut() .expired_txs_queue .push(ExpiredTx::EthereumEvent(ethereum_event_1.clone())); @@ -2210,8 +2187,8 @@ mod shell_tests { let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue // sent transfers to namada nonce to 5 .transfers_to_namada = InnerEthEventsQueue::new_at(5.into()); @@ -2295,7 +2272,7 @@ mod shell_tests { let eth_vext = EthereumTxData::EthEventsVext( ethereum_events::Vext { validator_addr: address.clone(), - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![ethereum_event], } .sign(protocol_key) @@ -2309,7 +2286,7 @@ mod shell_tests { let sig = Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig; let bp_vext = EthereumTxData::BridgePoolVext( bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } @@ -2423,7 +2400,7 @@ mod shell_tests { token::Amount::from_uint(100, 0) .expect("This can't fail"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2462,7 +2439,7 @@ mod shell_tests { token::Amount::from_uint(100, 0) .expect("This can't fail"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2534,7 +2511,7 @@ mod shell_tests { token::Amount::from_uint(100, 0) .expect("This can't fail"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2551,12 +2528,11 @@ mod shell_tests { ))); // Write wrapper hash to storage - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let wrapper_hash = wrapper.header_hash(); let wrapper_hash_key = replay_protection::last_key(&wrapper_hash); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &wrapper_hash_key) .expect("Test failed"); @@ -2593,8 +2569,7 @@ mod shell_tests { // Write inner hash in storage let inner_hash_key = replay_protection::last_key(&inner_tx_hash); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &inner_hash_key) .expect("Test failed"); @@ -2682,14 +2657,14 @@ mod shell_tests { let (shell, _recv, _, _) = test_utils::setup(); let block_gas_limit = - parameters::get_max_block_gas(&shell.wl_storage).unwrap(); + parameters::get_max_block_gas(&shell.state).unwrap(); let keypair = super::test_utils::gen_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2722,7 +2697,7 @@ mod shell_tests { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2750,10 +2725,9 @@ mod shell_tests { #[test] fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); - let apfel_denom = - read_denom(&shell.wl_storage, &address::testing::apfel()) - .expect("unable to read denomination from storage") - .expect("unable to find denomination of apfels"); + let apfel_denom = read_denom(&shell.state, &address::testing::apfel()) + .expect("unable to read denomination from storage") + .expect("unable to find denomination of apfels"); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( @@ -2797,7 +2771,7 @@ mod shell_tests { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2833,7 +2807,7 @@ mod shell_tests { amount_per_gas_unit: DenominatedAmount::native( 1_000_000_000.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2869,7 +2843,7 @@ mod shell_tests { amount_per_gas_unit: DenominatedAmount::native( token::Amount::max(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2902,7 +2876,7 @@ mod shell_tests { let max_tx_bytes: u32 = { let key = parameters::storage::get_max_tx_bytes_key(); shell - .wl_storage + .state .read(&key) .expect("Failed to read from storage") .expect("Max tx bytes should have been written to storage") @@ -2916,7 +2890,7 @@ mod shell_tests { amount_per_gas_unit: DenominatedAmount::native( 100.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), diff --git a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs index a8860c1607..e6cabd0566 100644 --- a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -9,7 +9,7 @@ use namada::gas::TxGasMeter; use namada::ledger::protocol; use namada::ledger::storage::tx_queue::TxInQueue; use namada::proof_of_stake::storage::find_validator_by_raw_hash; -use namada::state::{DBIter, StorageHasher, TempWlStorage, DB}; +use namada::state::{DBIter, StorageHasher, TempWlState, DB}; use namada::tx::data::{DecryptedTx, TxType, WrapperTx}; use namada::tx::Tx; use namada::vm::wasm::{TxCache, VpCache}; @@ -54,15 +54,13 @@ where // add encrypted txs let tm_raw_hash_string = tm_raw_hash_to_string(req.proposer_address); - let block_proposer = find_validator_by_raw_hash( - &self.wl_storage, - tm_raw_hash_string, - ) - .unwrap() - .expect( - "Unable to find native validator address of block proposer \ - from tendermint raw hash", - ); + let block_proposer = + find_validator_by_raw_hash(&self.state, tm_raw_hash_string) + .unwrap() + .expect( + "Unable to find native validator address of block \ + proposer from tendermint raw hash", + ); let (encrypted_txs, alloc) = self.build_encrypted_txs( alloc, &req.txs, @@ -113,16 +111,14 @@ where if hints::unlikely(is_2nd_height_off || is_3rd_height_off) { tracing::warn!( proposal_height = - ?self.wl_storage.storage.block.height, + ?self.state.in_mem().block.height, "No mempool txs are being included in the current proposal" ); EncryptedTxBatchAllocator::WithoutEncryptedTxs( - (&self.wl_storage).into(), + (&*self.state).into(), ) } else { - EncryptedTxBatchAllocator::WithEncryptedTxs( - (&self.wl_storage).into(), - ) + EncryptedTxBatchAllocator::WithEncryptedTxs((&*self.state).into()) } } @@ -141,20 +137,20 @@ where // valid because of mempool check TryInto::::try_into(block_time).ok() }); - let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); + let mut temp_state = self.state.with_temp_write_log(); let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let mut tx_wasm_cache = self.tx_wasm_cache.clone(); let txs = txs .iter() .filter_map(|tx_bytes| { - match validate_wrapper_bytes(tx_bytes, block_time, block_proposer, proposer_local_config, &mut temp_wl_storage, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { + match validate_wrapper_bytes(tx_bytes, block_time, block_proposer, proposer_local_config, &mut temp_state, &mut vp_wasm_cache, &mut tx_wasm_cache, ) { Ok(gas) => { - temp_wl_storage.write_log.commit_tx(); + temp_state.write_log_mut().commit_tx(); Some((tx_bytes.to_owned(), gas)) }, Err(()) => { - temp_wl_storage.write_log.drop_tx(); + temp_state.write_log_mut().drop_tx(); None } } @@ -209,8 +205,8 @@ where mut alloc: BlockAllocator, ) -> (Vec, BlockAllocator) { let txs = self - .wl_storage - .storage + .state + .in_mem() .tx_queue .iter() .map( @@ -263,7 +259,7 @@ where mut alloc: BlockAllocator, txs: &[TxBytes], ) -> Vec { - if self.wl_storage.storage.last_block.is_none() { + if self.state.in_mem().last_block.is_none() { // genesis should not contain vote extensions. // // this is because we have not decided any block through @@ -323,7 +319,7 @@ fn validate_wrapper_bytes( block_time: Option, block_proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result @@ -351,8 +347,7 @@ where let mut tx_gas_meter = TxGasMeter::new(wrapper.gas_limit); tx_gas_meter.add_wrapper_gas(tx_bytes).map_err(|_| ())?; - super::replay_protection_checks(&tx, temp_wl_storage) - .map_err(|_| ())?; + super::replay_protection_checks(&tx, temp_state).map_err(|_| ())?; // Check fees and extract the gas limit of this transaction match prepare_proposal_fee_check( @@ -360,7 +355,7 @@ where protocol::get_fee_unshielding_transaction(&tx, &wrapper), block_proposer, proposer_local_config, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, ) { @@ -377,7 +372,7 @@ fn prepare_proposal_fee_check( masp_transaction: Option, proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<(), Error> @@ -400,7 +395,7 @@ where ))))? .to_owned(), None => namada::ledger::parameters::read_gas_cost( - temp_wl_storage, + temp_state, &wrapper.fee.token, ) .expect("Must be able to read gas cost parameter") @@ -415,12 +410,12 @@ where wrapper, masp_transaction, minimum_gas_price, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; - protocol::transfer_fee(temp_wl_storage, proposer, wrapper) + protocol::transfer_fee(temp_state, proposer, wrapper) .map_err(Error::TxApply) } @@ -449,6 +444,7 @@ mod test_prepare_proposal { use namada::tx::{Code, Data, Header, Section, Signature, Signed}; use namada::vote_ext::{ethereum_events, ethereum_tx_data_variants}; use namada::{replay_protection, token}; + use namada_sdk::storage::StorageWrite; use super::*; use crate::config::ValidatorLocalConfig; @@ -505,7 +501,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -583,10 +579,7 @@ mod test_prepare_proposal { } let (shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); - assert_eq!( - shell.wl_storage.storage.get_last_block_height(), - LAST_HEIGHT - ); + assert_eq!(shell.state.in_mem().get_last_block_height(), LAST_HEIGHT); check_invalid(&shell, LAST_HEIGHT + 2); check_invalid(&shell, LAST_HEIGHT + 1); @@ -637,20 +630,20 @@ mod test_prepare_proposal { ..Default::default() }); - let params = shell.wl_storage.pos_queries().get_pos_params(); + let params = shell.state.pos_queries().get_pos_params(); // artificially change the voting power of the default validator to // one, change the block height, and commit a dummy block, // to move to a new epoch let events_epoch = shell - .wl_storage + .state .pos_queries() .get_epoch(FIRST_HEIGHT) .expect("Test failed"); let validators_handle = consensus_validator_set_handle().at(&events_epoch); let consensus_in_mem = validators_handle - .iter(&shell.wl_storage) + .iter(&shell.state) .expect("Test failed") .map(|val| { let ( @@ -666,7 +659,7 @@ mod test_prepare_proposal { let mut consensus_set: BTreeSet = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -675,13 +668,13 @@ mod test_prepare_proposal { let val1 = consensus_set.pop_first().unwrap(); let val2 = consensus_set.pop_first().unwrap(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), ); let pkh2 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val2.address.clone(), Epoch::default(), @@ -691,11 +684,11 @@ mod test_prepare_proposal { if address == wallet::defaults::validator_address() { validators_handle .at(&val_stake) - .remove(&mut shell.wl_storage, &val_position) + .remove(&mut shell.state, &val_position) .expect("Test failed"); validators_handle .at(&1.into()) - .insert(&mut shell.wl_storage, val_position, address) + .insert(&mut shell.state, val_position, address) .expect("Test failed"); } } @@ -726,7 +719,7 @@ mod test_prepare_proposal { shell.start_new_epoch(Some(req)); assert_eq!( shell - .wl_storage + .state .pos_queries() .get_epoch(shell.get_current_decision_height()), Some(Epoch(1)) @@ -785,13 +778,15 @@ mod test_prepare_proposal { // Load some tokens to tx signer to pay fees let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1_000).serialize_to_vec()) + .state + .db_write( + &balance_key, + Amount::native_whole(1_000).serialize_to_vec(), + ) .unwrap(); let mut req = RequestPrepareProposal { @@ -807,7 +802,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( 1.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -876,7 +871,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -896,9 +891,8 @@ mod test_prepare_proposal { let wrapper_unsigned_hash = wrapper.header_hash(); let hash_key = replay_protection::last_key(&wrapper_unsigned_hash); shell - .wl_storage - .storage - .write(&hash_key, vec![]) + .state + .write_bytes(&hash_key, vec![]) .expect("Test failed"); let req = RequestPrepareProposal { @@ -921,7 +915,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -958,7 +952,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -978,9 +972,8 @@ mod test_prepare_proposal { // Write inner hash to storage let hash_key = replay_protection::last_key(&inner_unsigned_hash); shell - .wl_storage - .storage - .write(&hash_key, vec![]) + .state + .write_bytes(&hash_key, vec![]) .expect("Test failed"); let req = RequestPrepareProposal { @@ -1004,7 +997,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1026,7 +1019,7 @@ mod test_prepare_proposal { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair_2.ref_to(), Epoch(0), @@ -1056,7 +1049,7 @@ mod test_prepare_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1098,13 +1091,13 @@ mod test_prepare_proposal { let (shell, _recv, _, _) = test_utils::setup(); let block_gas_limit = - namada::parameters::get_max_block_gas(&shell.wl_storage).unwrap(); + namada::parameters::get_max_block_gas(&shell.state).unwrap(); let keypair = gen_keypair(); let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1143,7 +1136,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1189,7 +1182,7 @@ mod test_prepare_proposal { }); } - let btc_denom = read_denom(&shell.wl_storage, &address::testing::btc()) + let btc_denom = read_denom(&shell.state, &address::testing::btc()) .expect("unable to read denomination from storage") .expect("unable to find denomination of btcs"); @@ -1237,10 +1230,9 @@ mod test_prepare_proposal { fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); - let apfel_denom = - read_denom(&shell.wl_storage, &address::testing::apfel()) - .expect("unable to read denomination from storage") - .expect("unable to find denomination of apfels"); + let apfel_denom = read_denom(&shell.state, &address::testing::apfel()) + .expect("unable to read denomination from storage") + .expect("unable to find denomination of apfels"); let wrapper = WrapperTx::new( Fee { @@ -1299,7 +1291,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(10.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1339,7 +1331,7 @@ mod test_prepare_proposal { let wrapper = WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1380,7 +1372,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( 1_000_000_000.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1421,7 +1413,7 @@ mod test_prepare_proposal { amount_per_gas_unit: DenominatedAmount::native( token::Amount::max(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1460,8 +1452,8 @@ mod test_prepare_proposal { let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue // sent transfers to namada nonce to 5 .transfers_to_namada = InnerEthEventsQueue::new_at(5.into()); diff --git a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs index 2414ed4757..4fe2c3b65b 100644 --- a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -11,7 +11,7 @@ use namada::ledger::protocol::get_fee_unshielding_transaction; use namada::ledger::storage::tx_queue::TxInQueue; use namada::parameters::validate_tx_bytes; use namada::proof_of_stake::storage::find_validator_by_raw_hash; -use namada::state::{TempWlStorage, WlStorage}; +use namada::state::{TempWlState, WlState}; use namada::tx::data::protocol::ProtocolTxType; use namada::vote_ext::ethereum_tx_data_variants; @@ -41,16 +41,16 @@ pub struct ValidationMeta { pub has_decrypted_txs: bool, } -impl From<&WlStorage> for ValidationMeta +impl From<&WlState> for ValidationMeta where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - fn from(wl_storage: &WlStorage) -> Self { + fn from(state: &WlState) -> Self { let max_proposal_bytes = - wl_storage.pos_queries().get_max_proposal_bytes().get(); + state.pos_queries().get_max_proposal_bytes().get(); let max_block_gas = - namada::parameters::get_max_block_gas(wl_storage).unwrap(); + namada::parameters::get_max_block_gas(state).unwrap(); let encrypted_txs_bin = EncryptedTxsBins::new(max_proposal_bytes, max_block_gas); let txs_bin = TxBin::init(max_proposal_bytes); @@ -94,7 +94,7 @@ where let native_block_proposer_address = { let tm_raw_hash_string = tm_raw_hash_to_string(&req.proposer_address); - find_validator_by_raw_hash(&self.wl_storage, tm_raw_hash_string) + find_validator_by_raw_hash(&self.state, tm_raw_hash_string) .unwrap() .expect( "Unable to find native validator address of block \ @@ -166,9 +166,9 @@ where block_time: DateTimeUtc, block_proposer: &Address, ) -> (Vec, ValidationMeta) { - let mut tx_queue_iter = self.wl_storage.storage.tx_queue.iter(); - let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); - let mut metadata = ValidationMeta::from(&self.wl_storage); + let mut tx_queue_iter = self.state.in_mem().tx_queue.iter(); + let mut temp_state = self.state.with_temp_write_log(); + let mut metadata = ValidationMeta::from(self.state.read_only()); let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let mut tx_wasm_cache = self.tx_wasm_cache.clone(); @@ -179,7 +179,7 @@ where tx_bytes, &mut tx_queue_iter, &mut metadata, - &mut temp_wl_storage, + &mut temp_state, block_time, &mut vp_wasm_cache, &mut tx_wasm_cache, @@ -187,7 +187,7 @@ where ); let error_code = ResultCode::from_u32(result.code).unwrap(); if let ResultCode::Ok = error_code { - temp_wl_storage.write_log.commit_tx(); + temp_state.write_log_mut().commit_tx(); } else { tracing::info!( "Process proposal rejected an invalid tx. Error code: \ @@ -195,13 +195,13 @@ where error_code, result.info ); - temp_wl_storage.write_log.drop_tx(); + temp_state.write_log_mut().drop_tx(); } result }) .collect(); metadata.decrypted_queue_has_remaining_txs = - !self.wl_storage.storage.tx_queue.is_empty() + !self.state.in_mem().tx_queue.is_empty() && tx_queue_iter.next().is_some(); (tx_results, metadata) } @@ -234,7 +234,7 @@ where tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, metadata: &mut ValidationMeta, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, block_time: DateTimeUtc, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, @@ -247,7 +247,7 @@ where // // NB: always keep this as the first tx check, // as it is a pretty cheap one - if !validate_tx_bytes(&self.wl_storage, tx_bytes.len()) + if !validate_tx_bytes(&self.state, tx_bytes.len()) .expect("Failed to get max tx bytes param from storage") { return TxResult { @@ -350,11 +350,9 @@ where .map_err(|err| err.to_string()) .and_then(|ext| { validate_eth_events_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage - .storage - .get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) .map(|_| TxResult { code: ResultCode::Ok.into(), @@ -378,11 +376,9 @@ where .map_err(|err| err.to_string()) .and_then(|ext| { validate_bp_roots_vext( - &self.wl_storage, + &self.state, &ext.0, - self.wl_storage - .storage - .get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) .map(|_| TxResult { code: ResultCode::Ok.into(), @@ -408,14 +404,14 @@ where .map_err(|err| err.to_string()) .and_then(|ext| { validate_valset_upd_vext( - &self.wl_storage, + &self.state, &ext, // n.b. only accept validator set updates // issued at // the current epoch (signing off on the // validators // of the next epoch) - self.wl_storage.storage.get_current_epoch().0, + self.state.in_mem().get_current_epoch().0, ) .map(|_| TxResult { code: ResultCode::Ok.into(), @@ -579,8 +575,7 @@ where } // Replay protection checks - if let Err(e) = - super::replay_protection_checks(&tx, temp_wl_storage) + if let Err(e) = super::replay_protection_checks(&tx, temp_state) { return TxResult { code: ResultCode::ReplayTx.into(), @@ -593,7 +588,7 @@ where &wrapper, get_fee_unshielding_transaction(&tx, &wrapper), block_proposer, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, ) { @@ -631,7 +626,7 @@ fn process_proposal_fee_check( wrapper: &WrapperTx, masp_transaction: Option, proposer: &Address, - temp_wl_storage: &mut TempWlStorage, + temp_state: &mut TempWlState, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result<()> @@ -641,7 +636,7 @@ where CA: 'static + WasmCacheAccess + Sync, { let minimum_gas_price = namada::ledger::parameters::read_gas_cost( - temp_wl_storage, + temp_state, &wrapper.fee.token, ) .expect("Must be able to read gas cost parameter") @@ -654,12 +649,12 @@ where wrapper, masp_transaction, minimum_gas_price, - temp_wl_storage, + temp_state, vp_wasm_cache, tx_wasm_cache, )?; - protocol::transfer_fee(temp_wl_storage, proposer, wrapper) + protocol::transfer_fee(temp_state, proposer, wrapper) .map_err(Error::TxApply) } @@ -705,7 +700,7 @@ mod test_process_proposal { }; let ext = ethereum_events::Vext { validator_addr: addr.clone(), - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), ethereum_events: vec![event], } .sign(protocol_key); @@ -743,8 +738,8 @@ mod test_process_proposal { #[test] fn check_rejected_bp_roots_bridge_inactive() { let (mut shell, _a, _b, _c) = test_utils::setup_at_height(1); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let protocol_key = shell.mode.get_protocol_key().expect("Test failed"); let addr = shell.mode.get_validator_address().expect("Test failed"); @@ -755,7 +750,7 @@ mod test_process_proposal { ) .sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: addr.clone(), sig, } @@ -915,7 +910,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, public_key, Epoch(0), @@ -967,7 +962,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::from_uint(100, 0).expect("Test failed"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1027,11 +1022,11 @@ mod test_process_proposal { let keypair = gen_keypair(); // reduce address balance to match the 100 token min fee let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage + .state .write(&balance_key, Amount::native_whole(99)) .unwrap(); let keypair = gen_keypair(); @@ -1041,7 +1036,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::from_uint(1, 0).expect("Test failed"), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1094,11 +1089,11 @@ mod test_process_proposal { let keypair = crate::wallet::defaults::daewon_keypair(); // reduce address balance to match the 100 token min fee let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage + .state .write(&balance_key, Amount::native_whole(99)) .unwrap(); shell.commit(); @@ -1109,7 +1104,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::native_whole(1_000_100), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1167,7 +1162,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::native_whole(i as u64), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1228,7 +1223,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1283,7 +1278,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Default::default(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, pk: keypair.ref_to(), epoch: Epoch(0), @@ -1389,7 +1384,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1406,12 +1401,11 @@ mod test_process_proposal { ))); // Write wrapper hash to storage - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let wrapper_unsigned_hash = wrapper.header_hash(); let hash_key = replay_protection::last_key(&wrapper_unsigned_hash); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); @@ -1448,20 +1442,19 @@ mod test_process_proposal { // Add unshielded balance for fee payment let balance_key = token::storage_key::balance_key( - &shell.wl_storage.storage.native_token, + &shell.state.in_mem().native_token, &Address::from(&keypair.ref_to()), ); shell - .wl_storage - .storage - .write(&balance_key, Amount::native_whole(1000).serialize_to_vec()) + .state + .write(&balance_key, Amount::native_whole(1000)) .unwrap(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1515,7 +1508,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1532,11 +1525,10 @@ mod test_process_proposal { ))); // Write inner hash to storage - let mut batch = namada::state::testing::TestStorage::batch(); + let mut batch = namada::state::testing::TestState::batch(); let hash_key = replay_protection::last_key(&wrapper.raw_header_hash()); shell - .wl_storage - .storage + .state .write_replay_protection_entry(&mut batch, &hash_key) .expect("Test failed"); @@ -1576,7 +1568,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1596,7 +1588,7 @@ mod test_process_proposal { new_wrapper.update_header(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair_2.ref_to(), Epoch(0), @@ -1632,7 +1624,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( Amount::zero(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1694,7 +1686,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1737,7 +1729,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1782,14 +1774,14 @@ mod test_process_proposal { let (shell, _recv, _, _) = test_utils::setup(); let block_gas_limit = - namada::parameters::get_max_block_gas(&shell.wl_storage).unwrap(); + namada::parameters::get_max_block_gas(&shell.state).unwrap(); let keypair = super::test_utils::gen_keypair(); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1831,7 +1823,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(100.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -1868,10 +1860,9 @@ mod test_process_proposal { fn test_fee_non_whitelisted_token() { let (shell, _recv, _, _) = test_utils::setup(); - let apfel_denom = - read_denom(&shell.wl_storage, &address::testing::apfel()) - .expect("unable to read denomination from storage") - .expect("unable to find denomination of apfels"); + let apfel_denom = read_denom(&shell.state, &address::testing::apfel()) + .expect("unable to read denomination from storage") + .expect("unable to find denomination of apfels"); let mut wrapper = Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( @@ -1923,7 +1914,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -1968,7 +1959,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( 1_000_000_000.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2013,7 +2004,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( token::Amount::max(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, crate::wallet::defaults::albert_keypair().ref_to(), Epoch(0), @@ -2059,7 +2050,7 @@ mod test_process_proposal { Tx::from_type(TxType::Wrapper(Box::new(WrapperTx::new( Fee { amount_per_gas_unit: DenominatedAmount::native(0.into()), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2076,7 +2067,7 @@ mod test_process_proposal { ))); let wrapper = wrapper.to_bytes(); for height in [1u64, 2] { - if let Some(b) = shell.wl_storage.storage.last_block.as_mut() { + if let Some(b) = shell.state.in_mem_mut().last_block.as_mut() { b.height = height.into(); } let response = { @@ -2114,7 +2105,7 @@ mod test_process_proposal { let max_tx_bytes: u32 = { let key = get_max_tx_bytes_key(); shell - .wl_storage + .state .read(&key) .expect("Failed to read from storage") .expect("Max tx bytes should have been written to storage") @@ -2128,7 +2119,7 @@ mod test_process_proposal { amount_per_gas_unit: DenominatedAmount::native( 100.into(), ), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, keypair.ref_to(), Epoch(0), @@ -2183,8 +2174,8 @@ mod test_process_proposal { let (mut shell, _recv, _, _) = test_utils::setup_at_height(LAST_HEIGHT); shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue // sent transfers to namada nonce to 5 .transfers_to_namada = InnerEthEventsQueue::new_at(5.into()); diff --git a/crates/apps/src/lib/node/ledger/shell/queries.rs b/crates/apps/src/lib/node/ledger/shell/queries.rs index b4cc45b069..f88fb303b8 100644 --- a/crates/apps/src/lib/node/ledger/shell/queries.rs +++ b/crates/apps/src/lib/node/ledger/shell/queries.rs @@ -19,7 +19,7 @@ where /// INVARIANT: This method must be stateless. pub fn query(&self, query: request::Query) -> response::Query { let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: self.state.read_only(), event_log: self.event_log(), vp_wasm_cache: self.vp_wasm_cache.read_only(), tx_wasm_cache: self.tx_wasm_cache.read_only(), @@ -56,7 +56,7 @@ where ) -> token::Amount { // Storage read must not fail, but there might be no value, in which // case default (0) is returned - token::read_balance(&self.wl_storage, token, owner) + token::read_balance(&self.state, token, owner) .expect("Token balance read in the protocol must not fail") } } @@ -99,7 +99,7 @@ mod test_queries { for (curr_epoch, curr_block_height, can_send) in epoch_assertions { - shell.wl_storage.storage.begin_block( + shell.state.in_mem_mut().begin_block( BlockHash::default(), curr_block_height.into()).unwrap(); if prev_epoch != Some(curr_epoch) { @@ -107,7 +107,7 @@ mod test_queries { shell.start_new_epoch_in(EPOCH_NUM_BLOCKS); } if let Some(b) = - shell.wl_storage.storage.last_block.as_mut() + shell.state.in_mem_mut().last_block.as_mut() { b.height = BlockHeight(curr_block_height - 1); } @@ -119,23 +119,23 @@ mod test_queries { ); assert_eq!( shell - .wl_storage + .state .pos_queries() .get_epoch(curr_block_height.into()), Some(Epoch(curr_epoch)) ); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .must_send_valset_upd(SendValsetUpd::Now), can_send, ); let params = - shell.wl_storage.pos_queries().get_pos_params(); + shell.state.pos_queries().get_pos_params(); let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -144,7 +144,7 @@ mod test_queries { let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), diff --git a/crates/apps/src/lib/node/ledger/shell/testing/node.rs b/crates/apps/src/lib/node/ledger/shell/testing/node.rs index 18f61463e1..ffc6484ca7 100644 --- a/crates/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/crates/apps/src/lib/node/ledger/shell/testing/node.rs @@ -319,7 +319,7 @@ impl MockNode { } pub fn current_epoch(&self) -> Epoch { - self.shell.lock().unwrap().wl_storage.storage.last_epoch + self.shell.lock().unwrap().state.in_mem().last_epoch } pub fn next_epoch(&mut self) -> Epoch { @@ -327,15 +327,15 @@ impl MockNode { let mut locked = self.shell.lock().unwrap(); let next_epoch_height = - locked.wl_storage.storage.get_last_block_height() + 1; - locked.wl_storage.storage.next_epoch_min_start_height = + locked.state.in_mem().get_last_block_height() + 1; + locked.state.in_mem_mut().next_epoch_min_start_height = next_epoch_height; - locked.wl_storage.storage.next_epoch_min_start_time = + locked.state.in_mem_mut().next_epoch_min_start_time = DateTimeUtc::now(); let next_epoch_min_start_height = - locked.wl_storage.storage.next_epoch_min_start_height; + locked.state.in_mem().next_epoch_min_start_height; if let Some(LastBlock { height, .. }) = - locked.wl_storage.storage.last_block.as_mut() + locked.state.in_mem_mut().last_block.as_mut() { *height = next_epoch_min_start_height; } @@ -348,8 +348,8 @@ impl MockNode { self.shell .lock() .unwrap() - .wl_storage - .storage + .state + .in_mem() .get_current_epoch() .0 } @@ -358,11 +358,11 @@ impl MockNode { fn prepare_request(&self) -> (Vec, Vec) { let (val1, ck) = { let locked = self.shell.lock().unwrap(); - let params = locked.wl_storage.pos_queries().get_pos_params(); - let current_epoch = locked.wl_storage.storage.get_current_epoch().0; + let params = locked.state.pos_queries().get_pos_params(); + let current_epoch = locked.state.in_mem().get_current_epoch().0; let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &locked.wl_storage, + &locked.state, current_epoch, ) .unwrap() @@ -371,7 +371,7 @@ impl MockNode { let val1 = consensus_set[0].clone(); let ck = validator_consensus_key_handle(&val1.address) - .get(&locked.wl_storage, current_epoch, ¶ms) + .get(&locked.state, current_epoch, ¶ms) .unwrap() .unwrap(); (val1, ck) @@ -399,11 +399,8 @@ impl MockNode { let (proposer_address, votes) = self.prepare_request(); let mut locked = self.shell.lock().unwrap(); - let height = locked - .wl_storage - .storage - .get_last_block_height() - .next_height(); + let height = + locked.state.in_mem().get_last_block_height().next_height(); // check if we have protocol txs to be included // in the finalize block request @@ -518,11 +515,8 @@ impl MockNode { ..Default::default() }; let mut locked = self.shell.lock().unwrap(); - let height = locked - .wl_storage - .storage - .get_last_block_height() - .next_height(); + let height = + locked.state.in_mem().get_last_block_height().next_height(); let (result, tx_results) = locked.process_proposal(req); let mut errors: Vec<_> = tx_results @@ -672,8 +666,8 @@ impl<'a> Client for &'a MockNode { self.shell .lock() .unwrap() - .wl_storage - .storage + .state + .in_mem() .last_block .as_ref() .map(|b| b.height) @@ -690,7 +684,7 @@ impl<'a> Client for &'a MockNode { }; let borrowed = self.shell.lock().unwrap(); let ctx = RequestCtx { - wl_storage: &borrowed.wl_storage, + state: &borrowed.state, event_log: borrowed.event_log(), vp_wasm_cache: borrowed.vp_wasm_cache.read_only(), tx_wasm_cache: borrowed.tx_wasm_cache.read_only(), @@ -723,16 +717,16 @@ impl<'a> Client for &'a MockNode { version: "test".to_string(), app_version: 0, last_block_height: locked - .wl_storage - .storage + .state + .in_mem() .last_block .as_ref() .map(|b| b.height.0 as u32) .unwrap_or_default() .into(), last_block_app_hash: locked - .wl_storage - .storage + .state + .in_mem() .last_block .as_ref() .map(|b| b.hash.0) diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs index 2f0454bbb4..03261bf3f8 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions.rs @@ -64,7 +64,7 @@ where _ => unreachable!("{VALIDATOR_EXPECT_MSG}"), }; sign_ethereum_events( - &self.wl_storage, + &self.state, validator_addr, protocol_key, ethereum_events, @@ -89,7 +89,7 @@ where _ => unreachable!("{VALIDATOR_EXPECT_MSG}"), }; sign_bridge_pool_root( - &self.wl_storage, + &self.state, validator_addr, eth_hot_key, protocol_key, @@ -110,7 +110,7 @@ where .mode .get_eth_bridge_keypair() .expect("{VALIDATOR_EXPECT_MSG}"); - sign_validator_set_update(&self.wl_storage, validator_addr, eth_hot_key) + sign_validator_set_update(&self.state, validator_addr, eth_hot_key) } /// Given a slice of [`TxBytes`], return an iterator over the @@ -140,7 +140,7 @@ where .ethereum_events .iter() .any(|event| { - self.wl_storage + self.state .ethbridge_queries() .validate_eth_event_nonce(event) }) @@ -157,7 +157,7 @@ where // will eventually be evicted, getting replaced // by newer txs. (!self - .wl_storage + .state .ethbridge_queries() .valset_upd_seen(ext.data.signing_epoch.next())) .then(|| tx_bytes.clone()) diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs index 6e2b6db25a..1320a9f8f5 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/bridge_pool_vext.rs @@ -29,9 +29,9 @@ where > + 'iter { vote_extensions.into_iter().map(|vote_extension| { validate_bp_roots_vext( - &self.wl_storage, + &self.state, &vote_extension, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), )?; Ok(vote_extension) }) @@ -90,15 +90,11 @@ mod test_bp_vote_extensions { validators_handle .at(&1.into()) .at(&token::Amount::native_whole(100)) - .insert( - &mut shell.wl_storage, - ValidatorPosition(1), - bertha_address(), - ) + .insert(&mut shell.state, ValidatorPosition(1), bertha_address()) .expect("Test failed"); // change pipeline length to 1 - let mut params = shell.wl_storage.pos_queries().get_pos_params(); + let mut params = shell.state.pos_queries().get_pos_params(); params.owned.pipeline_len = 1; let consensus_key = gen_keypair(); @@ -107,7 +103,7 @@ mod test_bp_vote_extensions { let cold_key = gen_secp256k1_keypair(); become_validator( - &mut shell.wl_storage, + &mut shell.state, BecomeValidator { params: ¶ms, address: &bertha_address(), @@ -127,7 +123,7 @@ mod test_bp_vote_extensions { // we advance forward to the next epoch let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() @@ -136,7 +132,7 @@ mod test_bp_vote_extensions { let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), @@ -159,19 +155,19 @@ mod test_bp_vote_extensions { let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new(&hot_key, to_sign).sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: bertha_address(), sig, } .sign(&bertha_keypair()); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &vote_ext.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_ok() ); @@ -189,8 +185,8 @@ mod test_bp_vote_extensions { .get_validator_address() .expect("Test failed") .clone(); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new( @@ -199,7 +195,7 @@ mod test_bp_vote_extensions { ) .sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } @@ -210,9 +206,9 @@ mod test_bp_vote_extensions { ); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &vote_ext.0, - shell.wl_storage.storage.get_last_block_height(), + shell.state.in_mem().get_last_block_height(), ) .is_ok() ) @@ -229,8 +225,8 @@ mod test_bp_vote_extensions { .get_validator_address() .expect("Test failed") .clone(); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new( @@ -239,7 +235,7 @@ mod test_bp_vote_extensions { ) .sig; let vote_ext = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } @@ -265,21 +261,21 @@ mod test_bp_vote_extensions { .get_validator_address() .expect("Test failed") .clone(); - shell.wl_storage.storage.block.height = - shell.wl_storage.storage.get_last_block_height(); + shell.state.in_mem_mut().block.height = + shell.state.in_mem().get_last_block_height(); shell.commit(); let to_sign = get_bp_bytes_to_sign(); let sig = Signed::<_, SignableEthMessage>::new(&signing_key, to_sign).sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height(), ) @@ -306,16 +302,16 @@ mod test_bp_vote_extensions { ) .sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(&bertha_keypair()); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -338,9 +334,9 @@ mod test_bp_vote_extensions { assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -352,7 +348,7 @@ mod test_bp_vote_extensions { fn test_block_height_too_high() { let (shell, _, _, _) = setup_at_height(3u64); reject_incorrect_block_number( - shell.wl_storage.storage.get_last_block_height() + 1, + shell.state.in_mem().get_last_block_height() + 1, &shell, ); } @@ -378,16 +374,16 @@ mod test_bp_vote_extensions { ) .sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -406,16 +402,16 @@ mod test_bp_vote_extensions { ) .sig; let bp_root = bridge_pool_roots::Vext { - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address, sig, } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -427,28 +423,28 @@ mod test_bp_vote_extensions { fn test_vext_for_old_height() { let (mut shell, _recv, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell.mode.get_validator_address().unwrap().clone(); - shell.wl_storage.storage.block.height = 2.into(); + shell.state.in_mem_mut().block.height = 2.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(2.into()) .unwrap(), KeccakHash([1; 32]) ); - shell.wl_storage.storage.block.height = 3.into(); - shell.wl_storage.delete(&key).expect("Test failed"); + shell.state.in_mem_mut().block.height = 3.into(); + shell.state.delete(&key).expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(3.into()) .unwrap(), @@ -468,7 +464,7 @@ mod test_bp_vote_extensions { .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height() ) @@ -488,7 +484,7 @@ mod test_bp_vote_extensions { .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height() ) @@ -502,28 +498,28 @@ mod test_bp_vote_extensions { fn test_wrong_height_for_root() { let (mut shell, _recv, _, _oracle_control_recv) = setup_at_height(1u64); let address = shell.mode.get_validator_address().unwrap().clone(); - shell.wl_storage.storage.block.height = 2.into(); + shell.state.in_mem_mut().block.height = 2.into(); let key = get_key_from_hash(&KeccakHash([1; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(2.into()) .unwrap(), KeccakHash([1; 32]) ); - shell.wl_storage.storage.block.height = 3.into(); - shell.wl_storage.delete(&key).expect("Test failed"); + shell.state.in_mem_mut().block.height = 3.into(); + shell.state.delete(&key).expect("Test failed"); let key = get_key_from_hash(&KeccakHash([2; 32])); - let height = shell.wl_storage.storage.block.height; - shell.wl_storage.write(&key, height).expect("Test failed"); + let height = shell.state.in_mem().block.height; + shell.state.write(&key, height).expect("Test failed"); shell.commit(); assert_eq!( shell - .wl_storage + .state .ethbridge_queries() .get_bridge_pool_root_at_height(3.into()) .unwrap(), @@ -543,7 +539,7 @@ mod test_bp_vote_extensions { .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_bp_roots_vext( - &shell.wl_storage, + &shell.state, &bp_root.0, shell.get_current_decision_height() ) diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs index 3954404203..e98d000de5 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/eth_events.rs @@ -19,7 +19,7 @@ where /// Checks the channel from the Ethereum oracle monitoring /// the fullnode and retrieves all seen Ethereum events. pub fn new_ethereum_events(&mut self) -> Vec { - let queries = self.wl_storage.ethbridge_queries(); + let queries = self.state.ethbridge_queries(); match &mut self.mode { ShellMode::Validator { eth_oracle: @@ -54,9 +54,9 @@ where > + 'iter { vote_extensions.into_iter().map(|vote_extension| { validate_eth_events_vext( - &self.wl_storage, + &self.state, &vote_extension, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), )?; Ok(vote_extension) }) @@ -86,7 +86,7 @@ where vote_extensions: Vec>, ) -> Option { #[allow(clippy::question_mark)] - if self.wl_storage.storage.last_block.is_none() { + if self.state.in_mem().last_block.is_none() { return None; } @@ -161,6 +161,7 @@ mod test_vote_extensions { use namada::state::collections::lazy_map::{NestedSubKey, SubKey}; use namada::tendermint::abci::types::VoteInfo; use namada::vote_ext::ethereum_events; + use namada_sdk::storage::StorageWrite; use super::validate_eth_events_vext; use crate::node::ledger::shell::test_utils::*; @@ -174,22 +175,24 @@ mod test_vote_extensions { // write bp nonce to storage shell - .wl_storage - .storage - .write(&bridge_pool::get_nonce_key(), nonce.serialize_to_vec()) + .state + .write_bytes( + &bridge_pool::get_nonce_key(), + nonce.serialize_to_vec(), + ) .expect("Test failed"); // write nam nonce to the eth events queue shell - .wl_storage - .storage + .state + .in_mem_mut() .eth_events_queue .transfers_to_namada = InnerEthEventsQueue::new_at(nonce); // eth transfers with the same nonce as the bp nonce in storage are // valid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToEthereum { nonce, @@ -202,7 +205,7 @@ mod test_vote_extensions { // eth transfers with different nonces are invalid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToEthereum { nonce: nonce + 1, @@ -213,7 +216,7 @@ mod test_vote_extensions { .ok_or(()) .expect_err("Test failed"); shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToEthereum { nonce: nonce - 1, @@ -226,7 +229,7 @@ mod test_vote_extensions { // nam transfers with nonces >= the nonce in storage are valid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce, @@ -236,7 +239,7 @@ mod test_vote_extensions { .ok_or(()) .expect("Test failed"); shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce: nonce + 5, @@ -248,7 +251,7 @@ mod test_vote_extensions { // nam transfers with lower nonces are invalid shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce: nonce - 1, @@ -258,7 +261,7 @@ mod test_vote_extensions { .ok_or(()) .expect_err("Test failed"); shell - .wl_storage + .state .ethbridge_queries() .validate_eth_event_nonce(&EthereumEvent::TransfersToNamada { nonce: nonce - 2, @@ -372,7 +375,7 @@ mod test_vote_extensions { .sign(&signing_key); assert!( validate_eth_events_vext( - &shell.wl_storage, + &shell.state, ðereum_events, shell.get_current_decision_height(), ) @@ -411,11 +414,11 @@ mod test_vote_extensions { } .sign(shell.mode.get_protocol_key().expect("Test failed")); - assert_eq!(shell.wl_storage.storage.get_current_epoch().0.0, 0); + assert_eq!(shell.state.in_mem().get_current_epoch().0.0, 0); // remove all validators of the next epoch let validators_handle = consensus_validator_set_handle().at(&1.into()); let consensus_in_mem = validators_handle - .iter(&shell.wl_storage) + .iter(&shell.state) .expect("Test failed") .map(|val| { let ( @@ -431,23 +434,23 @@ mod test_vote_extensions { for (val_stake, val_position) in consensus_in_mem.into_iter() { validators_handle .at(&val_stake) - .remove(&mut shell.wl_storage, &val_position) + .remove(&mut shell.state, &val_position) .expect("Test failed"); } // we advance forward to the next epoch let consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, Epoch::default(), ) .unwrap() .into_iter() .collect(); - let params = shell.wl_storage.pos_queries().get_pos_params(); + let params = shell.state.pos_queries().get_pos_params(); let val1 = consensus_set[0].clone(); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address.clone(), Epoch::default(), @@ -467,17 +470,17 @@ mod test_vote_extensions { assert_eq!(shell.start_new_epoch(Some(req)).0, 1); assert!( shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk(&signing_key.ref_to(), None) .is_err() ); let prev_epoch = - Epoch(shell.wl_storage.storage.get_current_epoch().0.0 - 1); + Epoch(shell.state.in_mem().get_current_epoch().0.0 - 1); assert!( shell .shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk( &signing_key.ref_to(), @@ -487,12 +490,8 @@ mod test_vote_extensions { ); assert!( - validate_eth_events_vext( - &shell.wl_storage, - &vote_ext, - signed_height - ) - .is_ok() + validate_eth_events_vext(&shell.state, &vote_ext, signed_height) + .is_ok() ); } @@ -516,19 +515,19 @@ mod test_vote_extensions { }], relayer: gen_established_address(), }], - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address.clone(), }; ethereum_events.block_height = - shell.wl_storage.storage.get_last_block_height() + 1; + shell.state.in_mem().get_last_block_height() + 1; let signed_vext = ethereum_events .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_eth_events_vext( - &shell.wl_storage, + &shell.state, &signed_vext, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) @@ -552,16 +551,16 @@ mod test_vote_extensions { }], relayer: gen_established_address(), }], - block_height: shell.wl_storage.storage.get_last_block_height(), + block_height: shell.state.in_mem().get_last_block_height(), validator_addr: address.clone(), } .sign(shell.mode.get_protocol_key().expect("Test failed")); assert!( validate_eth_events_vext( - &shell.wl_storage, + &shell.state, &vote_ext, - shell.wl_storage.storage.get_last_block_height() + shell.state.in_mem().get_last_block_height() ) .is_err() ) diff --git a/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs b/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs index 5f76445c7a..05b202e46e 100644 --- a/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs +++ b/crates/apps/src/lib/node/ledger/shell/vote_extensions/val_set_update.rs @@ -31,9 +31,9 @@ where > + '_ { vote_extensions.into_iter().map(|vote_extension| { validate_valset_upd_vext( - &self.wl_storage, + &self.state, &vote_extension, - self.wl_storage.storage.get_current_epoch().0, + self.state.in_mem().get_current_epoch().0, )?; Ok(vote_extension) }) @@ -60,7 +60,7 @@ where vote_extensions: Vec, ) -> Option { #[allow(clippy::question_mark)] - if self.wl_storage.storage.last_block.is_none() { + if self.state.in_mem().last_block.is_none() { return None; } @@ -142,12 +142,12 @@ mod test_vote_extensions { let eth_bridge_key = shell.mode.get_eth_bridge_keypair().expect("Test failed"); - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; let next_epoch = signing_epoch.next(); let voting_powers = { shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -168,7 +168,7 @@ mod test_vote_extensions { ); assert!( validate_valset_upd_vext( - &shell.wl_storage, + &shell.state, &validator_set_update.unwrap(), signing_epoch, ) @@ -186,11 +186,11 @@ mod test_vote_extensions { let bertha_addr = wallet::defaults::bertha_address(); (test_utils::gen_secp256k1_keypair(), bertha_key, bertha_addr) }; - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; let voting_powers = { let next_epoch = signing_epoch.next(); shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -210,7 +210,7 @@ mod test_vote_extensions { ); assert!( validate_valset_upd_vext( - &shell.wl_storage, + &shell.state, &validator_set_update.unwrap(), signing_epoch, ) @@ -228,13 +228,13 @@ mod test_vote_extensions { // validators from the current epoch sign over validator // set of the next epoch - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; assert_eq!(signing_epoch.0, 0); // remove all validators of the next epoch let validators_handle = consensus_validator_set_handle().at(&1.into()); let consensus_in_mem = validators_handle - .iter(&shell.wl_storage) + .iter(&shell.state) .expect("Test failed") .map(|val| { let ( @@ -250,7 +250,7 @@ mod test_vote_extensions { for (val_stake, val_position) in consensus_in_mem.into_iter() { validators_handle .at(&val_stake) - .remove(&mut shell.wl_storage, &val_position) + .remove(&mut shell.state, &val_position) .expect("Test failed"); } @@ -270,7 +270,7 @@ mod test_vote_extensions { let voting_powers = { let next_epoch = signing_epoch.next(); shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -288,10 +288,10 @@ mod test_vote_extensions { assert!(vote_ext.data.voting_powers.is_empty()); // we advance forward to the next epoch - let params = shell.wl_storage.pos_queries().get_pos_params(); + let params = shell.state.pos_queries().get_pos_params(); let mut consensus_set: Vec = read_consensus_validator_set_addresses_with_stake( - &shell.wl_storage, + &shell.state, 0.into(), ) .unwrap() @@ -300,7 +300,7 @@ mod test_vote_extensions { assert_eq!(consensus_set.len(), 1); let val1 = consensus_set.remove(0); let pkh1 = get_pkh_from_address( - &shell.wl_storage, + &shell.state, ¶ms, val1.address, Epoch::default(), @@ -320,16 +320,16 @@ mod test_vote_extensions { assert_eq!(shell.start_new_epoch(Some(req)).0, 1); assert!( shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk(&protocol_key.ref_to(), None) .is_err() ); - let prev_epoch = shell.wl_storage.storage.get_current_epoch().0 - 1; + let prev_epoch = shell.state.in_mem().get_current_epoch().0 - 1; assert!( shell .shell - .wl_storage + .state .pos_queries() .get_validator_from_protocol_pk( &protocol_key.ref_to(), @@ -340,12 +340,8 @@ mod test_vote_extensions { // check validation of the vext passes assert!( - validate_valset_upd_vext( - &shell.wl_storage, - &vote_ext, - signing_epoch - ) - .is_ok() + validate_valset_upd_vext(&shell.state, &vote_ext, signing_epoch) + .is_ok() ); } @@ -360,13 +356,13 @@ mod test_vote_extensions { let eth_bridge_key = shell.mode.get_eth_bridge_keypair().expect("Test failed"); - let signing_epoch = shell.wl_storage.storage.get_current_epoch().0; + let signing_epoch = shell.state.in_mem().get_current_epoch().0; #[allow(clippy::redundant_clone)] let validator_set_update = { let voting_powers = { let next_epoch = signing_epoch.next(); shell - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -386,7 +382,7 @@ mod test_vote_extensions { }; assert!( validate_valset_upd_vext( - &shell.wl_storage, + &shell.state, &validator_set_update.unwrap(), signing_epoch, ) diff --git a/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs index b86073834c..26b9ecd8f0 100644 --- a/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/crates/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -135,7 +135,7 @@ impl AbcippShim { begin_block_request.header.proposer_address, ); let block_proposer = find_validator_by_raw_hash( - &self.service.wl_storage, + &self.service.state, tm_raw_hash_string, ) .unwrap() diff --git a/crates/apps/src/lib/node/ledger/storage/mod.rs b/crates/apps/src/lib/node/ledger/storage/mod.rs index 551cf41628..7da4d37fc9 100644 --- a/crates/apps/src/lib/node/ledger/storage/mod.rs +++ b/crates/apps/src/lib/node/ledger/storage/mod.rs @@ -9,14 +9,16 @@ use arse_merkle_tree::blake2b::Blake2bHasher; use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::H256; use blake2b_rs::{Blake2b, Blake2bBuilder}; -use namada::state::{State, StorageHasher}; +use namada::state::StorageHasher; +use namada_sdk::state::FullAccessState; #[derive(Default)] pub struct PersistentStorageHasher(Blake2bHasher); pub type PersistentDB = rocksdb::RocksDB; -pub type PersistentStorage = State; +pub type PersistentState = + FullAccessState; impl Hasher for PersistentStorageHasher { fn write_bytes(&mut self, h: &[u8]) { @@ -66,12 +68,10 @@ mod tests { use namada::ledger::gas::STORAGE_ACCESS_GAS_PER_BYTE; use namada::ledger::ibc::storage::ibc_key; use namada::ledger::parameters::{EpochDuration, Parameters}; - use namada::state::write_log::WriteLog; - use namada::state::{ - self, StorageRead, StorageWrite, StoreType, WlStorage, DB, - }; + use namada::state::{self, StorageRead, StorageWrite, StoreType, DB}; use namada::token::conversion::update_allowed_conversions; use namada::{decode, encode, parameters}; + use namada_sdk::state::StateRead; use proptest::collection::vec; use proptest::prelude::*; use proptest::test_runner::Config; @@ -84,12 +84,12 @@ mod tests { fn test_crud_value() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); let key = Key::parse("key").expect("cannot parse the key string"); @@ -98,21 +98,21 @@ mod tests { let value_bytes_len = value_bytes.len(); // before insertion - let (result, gas) = storage.has_key(&key).expect("has_key failed"); + let (result, gas) = state.db_has_key(&key).expect("has_key failed"); assert!(!result); assert_eq!(gas, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE); - let (result, gas) = storage.read(&key).expect("read failed"); + let (result, gas) = state.db_read(&key).expect("read failed"); assert_eq!(result, None); assert_eq!(gas, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE); // insert - storage.write(&key, value_bytes).expect("write failed"); + state.db_write(&key, value_bytes).expect("write failed"); // read - let (result, gas) = storage.has_key(&key).expect("has_key failed"); + let (result, gas) = state.db_has_key(&key).expect("has_key failed"); assert!(result); assert_eq!(gas, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE); - let (result, gas) = storage.read(&key).expect("read failed"); + let (result, gas) = state.db_read(&key).expect("read failed"); let read_value: u64 = decode(result.expect("value doesn't exist")) .expect("decoding failed"); assert_eq!(read_value, value); @@ -123,12 +123,12 @@ mod tests { ); // delete - storage.delete(&key).expect("delete failed"); + state.db_delete(&key).expect("delete failed"); // read again - let (result, _) = storage.has_key(&key).expect("has_key failed"); + let (result, _) = state.db_has_key(&key).expect("has_key failed"); assert!(!result); - let (result, _) = storage.read(&key).expect("read failed"); + let (result, _) = state.db_read(&key).expect("read failed"); assert_eq!(result, None); } @@ -136,21 +136,21 @@ mod tests { fn test_commit_block() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); let key = Key::parse("key").expect("cannot parse the key string"); let value: u64 = 1; let value_bytes = encode(&value); - let mut wl_storage = WlStorage::new(WriteLog::default(), storage); // initialize parameter storage let params = Parameters { max_tx_bytes: 1024 * 1024, @@ -172,50 +172,45 @@ mod tests { fee_unshielding_descriptions_limit: 0, minimum_gas_price: Default::default(), }; - parameters::init_storage(¶ms, &mut wl_storage) - .expect("Test failed"); + parameters::init_storage(¶ms, &mut state).expect("Test failed"); // insert and commit - wl_storage - .storage - .write(&key, value_bytes.clone()) - .expect("write failed"); - wl_storage.storage.block.epoch = wl_storage.storage.block.epoch.next(); - wl_storage - .storage + state.db_write(&key, &value_bytes).expect("write failed"); + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() .block .pred_epochs .new_epoch(BlockHeight(100)); - // make wl_storage to update conversion for a new epoch - update_allowed_conversions(&mut wl_storage) + // update conversion for a new epoch + update_allowed_conversions(&mut state) .expect("update conversions failed"); - wl_storage.commit_block().expect("commit failed"); + state.commit_block().expect("commit failed"); // save the last state and the storage - let root = wl_storage.storage.merkle_root().0; - let hash = wl_storage.storage.get_block_hash().0; - let address_gen = wl_storage.storage.address_gen.clone(); - drop(wl_storage); + let root = state.in_mem().merkle_root().0; + let hash = state.in_mem().get_block_hash().0; + let address_gen = state.in_mem().address_gen.clone(); + + // Release DB lock + drop(state); - // load the last state - let mut storage = PersistentStorage::open( + // Load the last state + let state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); - storage - .load_last_state() - .expect("loading the last state failed"); let (loaded_root, height) = - storage.get_state().expect("no block exists"); + state.in_mem().get_state().expect("no block exists"); assert_eq!(loaded_root.0, root); assert_eq!(height, 100); - assert_eq!(storage.get_block_hash().0, hash); - assert_eq!(storage.address_gen, address_gen); - let (val, _) = storage.read(&key).expect("read failed"); + assert_eq!(state.in_mem().get_block_hash().0, hash); + assert_eq!(state.in_mem().address_gen, address_gen); + let (val, _) = state.db_read(&key).expect("read failed"); assert_eq!(val.expect("no value"), value_bytes); } @@ -223,17 +218,14 @@ mod tests { fn test_iter() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); - storage - .begin_block(BlockHash::default(), BlockHeight(100)) - .expect("begin_block failed"); let mut expected = Vec::new(); let prefix = Key::parse("prefix").expect("cannot parse the key string"); @@ -243,15 +235,15 @@ mod tests { .expect("cannot push the key segment"); let value_bytes = encode(&(i as u64)); // insert - storage - .write(&key, value_bytes.clone()) + state + .db_write(&key, value_bytes.clone()) .expect("write failed"); expected.push((key.to_string(), value_bytes)); } - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); - let (iter, gas) = storage.iter_prefix(&prefix); + state.commit_block().expect("commit failed"); + + let (iter, gas) = state.db_iter_prefix(&prefix); assert_eq!(gas, (prefix.len() as u64) * STORAGE_ACCESS_GAS_PER_BYTE); for (k, v, gas) in iter { match expected.pop() { @@ -270,34 +262,38 @@ mod tests { fn test_validity_predicate() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(100)) .expect("begin_block failed"); - let addr = storage.address_gen.generate_address("test".as_bytes()); + let addr = state + .in_mem_mut() + .address_gen + .generate_address("test".as_bytes()); let key = Key::validity_predicate(&addr); // not exist let (vp, gas) = - storage.validity_predicate(&addr).expect("VP load failed"); + state.validity_predicate(&addr).expect("VP load failed"); assert_eq!(vp, None); assert_eq!(gas, (key.len() as u64) * STORAGE_ACCESS_GAS_PER_BYTE); // insert let vp1 = Hash::sha256("vp1".as_bytes()); - storage.write(&key, vp1).expect("write failed"); + state.db_write(&key, vp1).expect("write failed"); // check let (vp_code_hash, gas) = - storage.validity_predicate(&addr).expect("VP load failed"); + state.validity_predicate(&addr).expect("VP load failed"); assert_eq!(vp_code_hash.expect("no VP"), vp1); assert_eq!( gas, @@ -338,12 +334,12 @@ mod tests { ) -> namada::state::Result<()> { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); @@ -365,27 +361,29 @@ mod tests { let key = Key::parse("key").expect("cannot parse the key string"); for (height, write_value) in blocks_write_value.clone() { let hash = BlockHash::default(); - storage.begin_block(hash, height)?; + state.in_mem_mut().begin_block(hash, height)?; assert_eq!( - height, storage.block.height, + height, + state.in_mem().block.height, "sanity check - height is as expected" ); if write_value { - let value_bytes = encode(&storage.block.height); - storage.write(&key, value_bytes)?; + let value_bytes = encode(&state.in_mem().block.height); + state.db_write(&key, value_bytes)?; } else { - storage.delete(&key)?; + state.db_delete(&key)?; } - let batch = PersistentStorage::batch(); - storage.commit_block(batch)?; + + state.commit_block()?; } // 2. We try to read from these heights to check that we get back // expected value if was written at that block height or // `None` if it was deleted. for (height, write_value) in blocks_write_value.clone() { - let (value_bytes, _gas) = storage.read_with_height(&key, height)?; + let (value_bytes, _gas) = + state.db_read_with_height(&key, height)?; if write_value { let value_bytes = value_bytes.unwrap_or_else(|| { panic!("Couldn't read from height {height}") @@ -405,16 +403,17 @@ mod tests { let is_last_write = blocks_write_value.last().unwrap().1; // The upper bound is arbitrary. - for height in storage.get_last_block_height().0 - ..storage.get_last_block_height().0 + 10 + for height in state.in_mem().get_last_block_height().0 + ..state.in_mem().get_last_block_height().0 + 10 { let height = BlockHeight::from(height); - let (value_bytes, _gas) = storage.read_with_height(&key, height)?; + let (value_bytes, _gas) = + state.db_read_with_height(&key, height)?; if is_last_write { let value_bytes = value_bytes.expect("Should have been written"); let value: BlockHeight = decode(value_bytes).unwrap(); - assert_eq!(value, storage.get_last_block_height()); + assert_eq!(value, state.in_mem().get_last_block_height()); } else if value_bytes.is_some() { let value: BlockHeight = decode(value_bytes.unwrap()).unwrap(); panic!("Expected no value at height {height}, got {}", value,); @@ -430,12 +429,12 @@ mod tests { ) -> namada::state::Result<()> { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); @@ -455,54 +454,59 @@ mod tests { // write values at Height 0 like init_storage for i in 0..num_keys { let key = ibc_key(format!("key{}", i)).unwrap(); - let value_bytes = encode(&storage.block.height); - storage.write(&key, value_bytes)?; + let value_bytes = encode(&state.in_mem().block.height); + state.db_write(&key, value_bytes)?; } let key = bridge_pool::get_signed_root_key(); let root_proof = BridgePoolRootProof::new((KeccakHash::default(), Uint::default())); let bytes = encode(&root_proof); - storage.write(&key, bytes)?; + state.db_write(&key, bytes)?; // Update and commit let hash = BlockHash::default(); let height = BlockHeight(1); - storage.begin_block(hash, height)?; + state.in_mem_mut().begin_block(hash, height)?; // Epoch 0 - storage.block.pred_epochs.new_epoch(height); - let mut batch = PersistentStorage::batch(); + state.in_mem_mut().block.pred_epochs.new_epoch(height); + let mut batch = PersistentState::batch(); for (height, key, write_type) in blocks_write_type.clone() { - if height != storage.block.height { + if height != state.in_mem().block.height { // to check the root later - roots.insert(storage.block.height, storage.merkle_root()); - if storage.block.height.0 % 5 == 0 { + roots.insert( + state.in_mem().block.height, + state.in_mem().merkle_root(), + ); + if state.in_mem().block.height.0 % 5 == 0 { // new epoch every 5 heights - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(storage.block.height); + state.in_mem_mut().block.epoch = + state.in_mem().block.epoch.next(); + let height = state.in_mem().block.height; + state.in_mem_mut().block.pred_epochs.new_epoch(height); } - storage.commit_block(batch)?; + state.commit_block_from_batch(batch)?; let hash = BlockHash::default(); - storage - .begin_block(hash, storage.block.height.next_height())?; - batch = PersistentStorage::batch(); + let next_height = state.in_mem().block.height.next_height(); + state.in_mem_mut().begin_block(hash, next_height)?; + batch = PersistentState::batch(); } match write_type { 0 => { // no update } 1 => { - storage.delete(&key)?; + state.db_delete(&key)?; } 2 => { - let value_bytes = encode(&storage.block.height); - storage.write(&key, value_bytes)?; + let value_bytes = encode(&state.in_mem().block.height); + state.db_write(&key, value_bytes)?; } 3 => { - storage.batch_delete_subspace_val(&mut batch, &key)?; + state.batch_delete_subspace_val(&mut batch, &key)?; } _ => { - let value_bytes = encode(&storage.block.height); - storage.batch_write_subspace_val( + let value_bytes = encode(&state.in_mem().block.height); + state.batch_write_subspace_val( &mut batch, &key, value_bytes, @@ -510,8 +514,8 @@ mod tests { } } } - roots.insert(storage.block.height, storage.merkle_root()); - storage.commit_block(batch)?; + roots.insert(state.in_mem().block.height, state.in_mem().merkle_root()); + state.commit_block_from_batch(batch)?; let mut current_state = HashMap::new(); for i in 0..num_keys { @@ -520,7 +524,7 @@ mod tests { } // Check a Merkle tree for (height, key, write_type) in blocks_write_type { - let tree = storage.get_merkle_tree(height, Some(StoreType::Ibc))?; + let tree = state.get_merkle_tree(height, Some(StoreType::Ibc))?; assert_eq!(tree.root().0, roots.get(&height).unwrap().0); match write_type { 0 => { @@ -549,11 +553,11 @@ mod tests { fn test_prune_merkle_tree_stores() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let mut storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), - None, Some(5), is_merklized_storage_key, ); @@ -562,40 +566,51 @@ mod tests { // the first nonce isn't written for a test skipping pruning let nonce = Uint::default(); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), new_epoch_start) .expect("begin_block failed"); let key = ibc_key("key").unwrap(); let value: u64 = 1; - storage.write(&key, encode(&value)).expect("write failed"); + state.db_write(&key, encode(&value)).expect("write failed"); + + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(new_epoch_start); - storage.block.pred_epochs.new_epoch(new_epoch_start); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + state.commit_block().expect("commit failed"); let new_epoch_start = BlockHeight(6); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), new_epoch_start) .expect("begin_block failed"); let key = ibc_key("key2").unwrap(); let value: u64 = 2; - storage.write(&key, encode(&value)).expect("write failed"); + state.db_write(&key, encode(&value)).expect("write failed"); // the second nonce isn't written for a test skipping pruning let nonce = nonce + 1; - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(new_epoch_start); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(new_epoch_start); + + state.commit_block().expect("commit failed"); - let result = storage.get_merkle_tree(1.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(1.into(), Some(StoreType::Ibc)); assert!(result.is_ok(), "The tree at Height 1 should be restored"); let new_epoch_start = BlockHeight(11); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), new_epoch_start) .expect("begin_block failed"); @@ -603,27 +618,32 @@ mod tests { let root_proof = BridgePoolRootProof::new((KeccakHash::default(), nonce)); let bytes = encode(&root_proof); - storage.write(&signed_root_key, bytes).unwrap(); + state.db_write(&signed_root_key, bytes).unwrap(); + + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(new_epoch_start); - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(new_epoch_start); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + state.commit_block().expect("commit failed"); - let result = storage.get_merkle_tree(1.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(1.into(), Some(StoreType::Ibc)); assert!(result.is_err(), "The tree at Height 1 should be pruned"); - let result = storage.get_merkle_tree(5.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(5.into(), Some(StoreType::Ibc)); assert!( result.is_err(), "The tree at Height 5 shouldn't be able to be restored" ); - let result = storage.get_merkle_tree(6.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(6.into(), Some(StoreType::Ibc)); assert!(result.is_ok(), "The ibc tree should be restored"); let result = - storage.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); + state.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); assert!(result.is_ok(), "The bridge pool tree should be restored"); - storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(12)) .expect("begin_block failed"); @@ -631,18 +651,22 @@ mod tests { let root_proof = BridgePoolRootProof::new((KeccakHash::default(), nonce)); let bytes = encode(&root_proof); - storage.write(&signed_root_key, bytes).unwrap(); - storage.block.epoch = storage.block.epoch.next(); - storage.block.pred_epochs.new_epoch(BlockHeight(12)); - let batch = PersistentStorage::batch(); - storage.commit_block(batch).expect("commit failed"); + state.db_write(&signed_root_key, bytes).unwrap(); + state.in_mem_mut().block.epoch = state.in_mem().block.epoch.next(); + state + .in_mem_mut() + .block + .pred_epochs + .new_epoch(BlockHeight(12)); + + state.commit_block().expect("commit failed"); // ibc tree should be able to be restored - let result = storage.get_merkle_tree(6.into(), Some(StoreType::Ibc)); + let result = state.get_merkle_tree(6.into(), Some(StoreType::Ibc)); assert!(result.is_ok(), "The ibc tree should be restored"); // bridge pool tree should be pruned because of the nonce let result = - storage.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); + state.get_merkle_tree(6.into(), Some(StoreType::BridgePool)); assert!(result.is_err(), "The bridge pool tree should be pruned"); } @@ -651,18 +675,14 @@ mod tests { fn test_persistent_storage_prefix_iter() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, is_merklized_storage_key, ); - let mut storage = WlStorage { - storage, - write_log: Default::default(), - }; let prefix = storage::Key::parse("prefix").unwrap(); let mismatched_prefix = storage::Key::parse("different").unwrap(); @@ -671,14 +691,14 @@ mod tests { for i in sub_keys.iter() { let key = prefix.push(i).unwrap(); - storage.write(&key, i).unwrap(); + state.write(&key, i).unwrap(); let key = mismatched_prefix.push(i).unwrap(); - storage.write(&key, i / 2).unwrap(); + state.write(&key, i / 2).unwrap(); } // Then try to iterate over their prefix - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); @@ -690,10 +710,10 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_block().unwrap(); + state.commit_block().unwrap(); // Again, try to iterate over their prefix - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); itertools::assert_equal(iter, expected); @@ -705,13 +725,13 @@ mod tests { ); for i in more_sub_keys.iter() { let key = prefix.push(i).unwrap(); - storage.write(&key, i).unwrap(); + state.write(&key, i).unwrap(); let key = mismatched_prefix.push(i).unwrap(); - storage.write(&key, i / 2).unwrap(); + state.write(&key, i / 2).unwrap(); } - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); @@ -727,11 +747,11 @@ mod tests { let delete_keys = [2, 0, -10, 123]; for i in delete_keys.iter() { let key = prefix.push(i).unwrap(); - storage.delete(&key).unwrap() + state.delete(&key).unwrap() } // Check that iter_prefix doesn't return deleted keys anymore - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); let expected = merged @@ -741,10 +761,10 @@ mod tests { itertools::assert_equal(iter, expected.clone()); // Commit genesis state - storage.commit_block().unwrap(); + state.commit_block().unwrap(); // And check again - let iter = state::iter_prefix(&storage, &prefix) + let iter = state::iter_prefix(&state, &prefix) .unwrap() .map(Result::unwrap); itertools::assert_equal(iter, expected); @@ -766,21 +786,17 @@ mod tests { fn test_persistent_storage_writing_without_merklizing_or_diffs() { let db_path = TempDir::new().expect("Unable to create a temporary DB directory"); - let storage = PersistentStorage::open( + let mut state = PersistentState::open( db_path.path(), + None, ChainId::default(), address::testing::nam(), None, - None, merkle_tree_key_filter, ); - let mut wls = WlStorage { - storage, - write_log: Default::default(), - }; // Start the first block let first_height = BlockHeight::first(); - wls.storage.block.height = first_height; + state.in_mem_mut().block.height = first_height; let key1 = test_key_1(); let val1 = 1u64; @@ -788,61 +804,60 @@ mod tests { let val2 = 2u64; // Standard write of key-val-1 - wls.write(&key1, val1).unwrap(); + state.write(&key1, val1).unwrap(); - // Read from WlStorage should return val1 - let res = wls.read::(&key1).unwrap().unwrap(); + // Read from TestState should return val1 + let res = state.read::(&key1).unwrap().unwrap(); assert_eq!(res, val1); // Read from Storage shouldn't return val1 because the block hasn't been // committed - let (res, _) = wls.storage.read(&key1).unwrap(); + let (res, _) = state.db_read(&key1).unwrap(); assert!(res.is_none()); // Write key-val-2 without merklizing or diffs - wls.write(&key2, val2).unwrap(); + state.write(&key2, val2).unwrap(); - // Read from WlStorage should return val2 - let res = wls.read::(&key2).unwrap().unwrap(); + // Read from TestState should return val2 + let res = state.read::(&key2).unwrap().unwrap(); assert_eq!(res, val2); // Commit block and storage changes - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); - let second_height = wls.storage.block.height; + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem_mut().block.height.next_height(); + let second_height = state.in_mem().block.height; // Read key1 from Storage should return val1 - let (res1, _) = wls.storage.read(&key1).unwrap(); + let (res1, _) = state.db_read(&key1).unwrap(); let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); assert_eq!(res1, val1); // Check merkle tree inclusion of key-val-1 explicitly - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); assert!(is_merklized1); // Key2 should be in storage. Confirm by reading from - // WlStorage and also by reading Storage subspace directly - let res2 = wls.read::(&key2).unwrap().unwrap(); + // TestState and also by reading Storage subspace directly + let res2 = state.read::(&key2).unwrap().unwrap(); assert_eq!(res2, val2); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap().unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); // Check explicitly that key-val-2 is not in merkle tree - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized2); // Check that the proper diffs exist for key-val-1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, false) .unwrap() .unwrap(); @@ -851,15 +866,13 @@ mod tests { // Check that there are diffs for key-val-2 in block 0, since all keys // need to have diffs for at least 1 block for rollback purposes - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, false) .unwrap() .unwrap(); @@ -867,84 +880,77 @@ mod tests { assert_eq!(res2, val2); // Delete the data then commit the block - wls.delete(&key1).unwrap(); - wls.delete(&key2).unwrap(); - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); + state.delete(&key1).unwrap(); + state.delete(&key2).unwrap(); + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem().block.height.next_height(); // Check the key-vals are removed from the storage subspace - let res1 = wls.read::(&key1).unwrap(); - let res2 = wls.read::(&key2).unwrap(); + let res1 = state.read::(&key1).unwrap(); + let res2 = state.read::(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); - let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + let res1 = state.db().read_subspace_val(&key1).unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); // Check that the key-vals don't exist in the merkle tree anymore - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized1 && !is_merklized2); // Check that key-val-1 diffs are properly updated for blocks 0 and 1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, first_height, false) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, second_height, true) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, second_height, false) .unwrap(); assert!(res1.is_none()); // Check that key-val-2 diffs don't exist for block 0 anymore - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, first_height, false) .unwrap(); assert!(res2.is_none()); // Check that the block 1 diffs for key-val-2 include an "old" value of // val2 and no "new" value - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, second_height, true) .unwrap() .unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, second_height, false) .unwrap(); assert!(res2.is_none()); diff --git a/crates/benches/host_env.rs b/crates/benches/host_env.rs index 1e97a5936d..a3955d44c4 100644 --- a/crates/benches/host_env.rs +++ b/crates/benches/host_env.rs @@ -194,13 +194,13 @@ fn write_log_read(c: &mut Criterion) { group.throughput(criterion::Throughput::Bytes(throughput_len)); // Generate random bytes for the value and write it to storage let value: Vec = (0..value_len).map(|_| rand::random()).collect(); - shell.wl_storage.write_log.write(&key, value).unwrap(); + shell.state.write_log_mut().write(&key, value).unwrap(); group.bench_function( format!("key: {key}, bytes: {throughput_len}"), |b| { b.iter_with_large_drop(|| { - shell.wl_storage.write_log.read(&key).0.unwrap() + shell.state.write_log().read(&key).0.unwrap() }) }, ); @@ -228,19 +228,13 @@ fn storage_read(c: &mut Criterion) { // NOTE: just like for storage writes, we don't have control on when // data is actually flushed to disk, so just benchmark the read function // without caring if data is actually in memory or on disk - shell.wl_storage.storage.write(&key, &value).unwrap(); + shell.state.db_write(&key, &value).unwrap(); group.bench_function( format!("key: {key}, bytes: {throughput_len}"), |b| { b.iter_with_large_drop(|| { - shell - .wl_storage - .storage - .db - .read_subspace_val(&key) - .unwrap() - .unwrap() + shell.state.db().read_subspace_val(&key).unwrap().unwrap() }) }, ); @@ -273,7 +267,7 @@ fn write_log_write(c: &mut Criterion) { (0..value_len).map(|_| rand::random()).collect() }, |value| { - shell.wl_storage.write_log.write(&key, value).unwrap() + shell.state.write_log_mut().write(&key, value).unwrap() }, criterion::BatchSize::SmallInput, ) @@ -298,7 +292,7 @@ fn storage_write(c: &mut Criterion) { // so we set this as the throughput parameter let throughput_len = value_len + key.len() as u64; group.throughput(criterion::Throughput::Bytes(throughput_len)); - let block_height = shell.wl_storage.storage.block.height; + let block_height = shell.state.in_mem().block.height; group.bench_function( format!("key: {key}, bytes: {throughput_len}"), @@ -315,9 +309,8 @@ fn storage_write(c: &mut Criterion) { // just benchmark the write operation here without // focusing on the hardware write shell - .wl_storage - .storage - .db + .state + .db_mut() .write_subspace_val(block_height, &key, value, true) .unwrap(); }, diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index 3806d4f7c7..e9c9708b50 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -46,6 +46,7 @@ use namada::sdk::masp_primitives::transaction::Transaction; use namada::state::{Epoch, StorageRead, StorageWrite, TxIndex}; use namada::token::{Amount, Transfer}; use namada::tx::{Code, Section, Tx}; +use namada::validity_predicate::VpSentinel; use namada_apps::bench_utils::{ generate_foreign_key_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, @@ -106,13 +107,13 @@ fn governance(c: &mut Criterion) { let content_section = Section::ExtraData(Code::new(vec![], None)); let params = - proof_of_stake::storage::read_pos_params(&shell.wl_storage) + proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let voting_start_epoch = Epoch(2 + params.pipeline_len + params.unbonding_len); // Must start after current epoch debug_assert_eq!( - shell.wl_storage.get_block_epoch().unwrap().next(), + shell.state.get_block_epoch().unwrap().next(), voting_start_epoch ); shell.generate_tx( @@ -137,12 +138,12 @@ fn governance(c: &mut Criterion) { let max_proposal_content_key = namada::governance::storage::keys::get_max_proposal_content_key(); let max_code_size: u64 = shell - .wl_storage + .state .read(&max_code_size_key) .expect("Error while reading from storage") .expect("Missing max_code_size parameter in storage"); let max_proposal_content_size: u64 = shell - .wl_storage + .state .read(&max_proposal_content_key) .expect("Error while reading from storage") .expect( @@ -158,13 +159,13 @@ fn governance(c: &mut Criterion) { )); let params = - proof_of_stake::storage::read_pos_params(&shell.wl_storage) + proof_of_stake::storage::read_pos_params(&shell.state) .unwrap(); let voting_start_epoch = Epoch(2 + params.pipeline_len + params.unbonding_len); // Must start after current epoch debug_assert_eq!( - shell.wl_storage.get_block_epoch().unwrap().next(), + shell.state.get_block_epoch().unwrap().next(), voting_start_epoch ); shell.generate_tx( @@ -192,20 +193,22 @@ fn governance(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let governance = GovernanceVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Governance), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -265,15 +268,15 @@ fn governance(c: &mut Criterion) { // shell.execute_tx(&tx); // let (verifiers, keys_changed) = shell -// .wl_storage +// .state // .write_log // .verifiers_and_changed_keys(&BTreeSet::default()); // let slash_fund = SlashFundVp { // ctx: Ctx::new( // &Address::Internal(InternalAddress::SlashFund), -// &shell.wl_storage.storage, -// &shell.wl_storage.write_log, +// &shell.state.storage, +// &shell.state.write_log, // &tx, // &TxIndex(0), // @@ -366,20 +369,22 @@ fn ibc(c: &mut Criterion) { shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -432,20 +437,22 @@ fn vp_multitoken(c: &mut Criterion) { let mut shell = BenchShell::default(); shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let multitoken = MultitokenVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Multitoken), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -502,20 +509,12 @@ fn setup_storage_for_masp_verification( // Update the anchor in storage let tree_key = namada::token::storage_key::masp_commitment_tree_key(); - let updated_tree: CommitmentTree = shielded_ctx - .shell - .wl_storage - .read(&tree_key) - .unwrap() - .unwrap(); + let updated_tree: CommitmentTree = + shielded_ctx.shell.state.read(&tree_key).unwrap().unwrap(); let anchor_key = namada::token::storage_key::masp_commitment_anchor_key( updated_tree.root(), ); - shielded_ctx - .shell - .wl_storage - .write(&anchor_key, ()) - .unwrap(); + shielded_ctx.shell.state.write(&anchor_key, ()).unwrap(); shielded_ctx.shell.commit_block(); let (mut shielded_ctx, signed_tx) = match bench_name { @@ -550,20 +549,22 @@ fn masp(c: &mut Criterion) { setup_storage_for_masp_verification(bench_name); let (verifiers, keys_changed) = shielded_ctx .shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let masp = MaspVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Masp), - &shielded_ctx.shell.wl_storage.storage, - &shielded_ctx.shell.wl_storage.write_log, + &shielded_ctx.shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shielded_ctx.shell.vp_wasm_cache.clone(), @@ -625,7 +626,7 @@ fn pgf(c: &mut Criterion) { let mut shell = BenchShell::default(); namada::governance::pgf::storage::keys::stewards_handle() .insert( - &mut shell.wl_storage, + &mut shell.state, defaults::albert_address(), StewardDetail::base(defaults::albert_address()), ) @@ -665,20 +666,22 @@ fn pgf(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let pgf = PgfVp { ctx: Ctx::new( &Address::Internal(InternalAddress::Pgf), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -704,8 +707,7 @@ fn pgf(c: &mut Criterion) { fn eth_bridge_nut(c: &mut Criterion) { let mut shell = BenchShell::default(); - let native_erc20_addres = - read_native_erc20_address(&shell.wl_storage).unwrap(); + let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); let signed_tx = { let data = PendingTransfer { @@ -720,7 +722,7 @@ fn eth_bridge_nut(c: &mut Criterion) { gas_fee: GasFee { amount: Amount::from(100), payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, }; shell.generate_tx( @@ -736,22 +738,24 @@ fn eth_bridge_nut(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::Nut(native_erc20_addres)); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let nut = NonUsableTokens { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -774,8 +778,7 @@ fn eth_bridge_nut(c: &mut Criterion) { fn eth_bridge(c: &mut Criterion) { let mut shell = BenchShell::default(); - let native_erc20_addres = - read_native_erc20_address(&shell.wl_storage).unwrap(); + let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); let signed_tx = { let data = PendingTransfer { @@ -790,7 +793,7 @@ fn eth_bridge(c: &mut Criterion) { gas_fee: GasFee { amount: Amount::from(100), payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, }; shell.generate_tx( @@ -806,21 +809,23 @@ fn eth_bridge(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::EthBridge); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let eth_bridge = EthBridge { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -847,8 +852,7 @@ fn eth_bridge_pool(c: &mut Criterion) { // numerous accesses to storage that we already account for, so no need to // benchmark specific sections of it like for the ibc native vp let mut shell = BenchShell::default(); - let native_erc20_addres = - read_native_erc20_address(&shell.wl_storage).unwrap(); + let native_erc20_addres = read_native_erc20_address(&shell.state).unwrap(); // Whitelist NAM token let cap_key = whitelist::Key { @@ -856,24 +860,21 @@ fn eth_bridge_pool(c: &mut Criterion) { suffix: whitelist::KeyType::Cap, } .into(); - shell - .wl_storage - .write(&cap_key, Amount::from(1_000)) - .unwrap(); + shell.state.write(&cap_key, Amount::from(1_000)).unwrap(); let whitelisted_key = whitelist::Key { asset: native_erc20_addres, suffix: whitelist::KeyType::Whitelisted, } .into(); - shell.wl_storage.write(&whitelisted_key, true).unwrap(); + shell.state.write(&whitelisted_key, true).unwrap(); let denom_key = whitelist::Key { asset: native_erc20_addres, suffix: whitelist::KeyType::Denomination, } .into(); - shell.wl_storage.write(&denom_key, 0).unwrap(); + shell.state.write(&denom_key, 0).unwrap(); let signed_tx = { let data = PendingTransfer { @@ -888,7 +889,7 @@ fn eth_bridge_pool(c: &mut Criterion) { gas_fee: GasFee { amount: Amount::from(100), payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, }; shell.generate_tx( @@ -904,21 +905,23 @@ fn eth_bridge_pool(c: &mut Criterion) { shell.execute_tx(&signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::EthBridgePool); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let bridge_pool = BridgePoolVp { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -957,13 +960,10 @@ fn parameters(c: &mut Criterion) { // Simulate governance proposal to modify a parameter let min_proposal_fund_key = namada::governance::storage::keys::get_min_proposal_fund_key(); - shell - .wl_storage - .write(&min_proposal_fund_key, 1_000) - .unwrap(); + shell.state.write(&min_proposal_fund_key, 1_000).unwrap(); let proposal_key = namada::governance::storage::keys::get_proposal_execution_key(0); - shell.wl_storage.write(&proposal_key, 0).unwrap(); + shell.state.write(&proposal_key, 0).unwrap(); // Return a dummy tx for validation let mut tx = @@ -977,21 +977,23 @@ fn parameters(c: &mut Criterion) { }; let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::Parameters); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let parameters = ParametersVp { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1033,13 +1035,10 @@ fn pos(c: &mut Criterion) { // Simulate governance proposal to modify a parameter let min_proposal_fund_key = namada::governance::storage::keys::get_min_proposal_fund_key(); - shell - .wl_storage - .write(&min_proposal_fund_key, 1_000) - .unwrap(); + shell.state.write(&min_proposal_fund_key, 1_000).unwrap(); let proposal_key = namada::governance::storage::keys::get_proposal_execution_key(0); - shell.wl_storage.write(&proposal_key, 0).unwrap(); + shell.state.write(&proposal_key, 0).unwrap(); // Return a dummy tx for validation let mut tx = @@ -1053,21 +1052,23 @@ fn pos(c: &mut Criterion) { }; let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); let vp_address = Address::Internal(InternalAddress::PoS); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let pos = PosVP { ctx: Ctx::new( &vp_address, - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, &signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1154,20 +1155,22 @@ fn ibc_vp_validate_action(c: &mut Criterion) { shell.execute_tx(signed_tx); let tx_data = signed_tx.data().unwrap(); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -1252,20 +1255,22 @@ fn ibc_vp_execute_action(c: &mut Criterion) { shell.execute_tx(signed_tx); let tx_data = signed_tx.data().unwrap(); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ibc = Ibc { ctx: Ctx::new( &Address::Internal(InternalAddress::Ibc), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, + &shell.state, signed_tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), diff --git a/crates/benches/process_wrapper.rs b/crates/benches/process_wrapper.rs index 4dad8ae840..da26796e78 100644 --- a/crates/benches/process_wrapper.rs +++ b/crates/benches/process_wrapper.rs @@ -3,7 +3,6 @@ use namada::core::address; use namada::core::key::RefTo; use namada::core::storage::BlockHeight; use namada::core::time::DateTimeUtc; -use namada::ledger::storage::TempWlStorage; use namada::token::{Amount, DenominatedAmount, Transfer}; use namada::tx::data::{Fee, WrapperTx}; use namada::tx::Signature; @@ -15,7 +14,7 @@ fn process_tx(c: &mut Criterion) { let mut shell = BenchShell::default(); // Advance chain height to allow the inclusion of wrapper txs by the block // space allocator - shell.wl_storage.storage.last_block.as_mut().unwrap().height = + shell.state.in_mem_mut().last_block.as_mut().unwrap().height = BlockHeight(2); let mut tx = shell.generate_tx( @@ -60,10 +59,10 @@ fn process_tx(c: &mut Criterion) { b.iter_batched( || { ( - shell.wl_storage.storage.tx_queue.clone(), + shell.state.in_mem().tx_queue.clone(), // Prevent block out of gas and replay protection - TempWlStorage::new(&shell.wl_storage.storage), - ValidationMeta::from(&shell.wl_storage), + shell.state.with_temp_write_log(), + ValidationMeta::from(shell.state.read_only()), shell.vp_wasm_cache.clone(), shell.tx_wasm_cache.clone(), defaults::daewon_address(), @@ -71,7 +70,7 @@ fn process_tx(c: &mut Criterion) { }, |( tx_queue, - mut temp_wl_storage, + mut temp_state, mut validation_meta, mut vp_wasm_cache, mut tx_wasm_cache, @@ -84,7 +83,7 @@ fn process_tx(c: &mut Criterion) { &wrapper, &mut tx_queue.iter(), &mut validation_meta, - &mut temp_wl_storage, + &mut temp_state, datetime, &mut vp_wasm_cache, &mut tx_wasm_cache, diff --git a/crates/benches/txs.rs b/crates/benches/txs.rs index 22119c3c5c..04a40f74b7 100644 --- a/crates/benches/txs.rs +++ b/crates/benches/txs.rs @@ -279,13 +279,12 @@ fn withdraw(c: &mut Criterion) { }; shell.execute_tx(&unbond_tx); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); // Advance Epoch for pipeline and unbonding length - let params = proof_of_stake::storage::read_pos_params( - &shell.wl_storage, - ) - .unwrap(); + let params = + proof_of_stake::storage::read_pos_params(&shell.state) + .unwrap(); let advance_epochs = params.pipeline_len + params.unbonding_len; @@ -327,8 +326,8 @@ fn redelegate(c: &mut Criterion) { || { let shell = BenchShell::default(); // Find the other genesis validator - let current_epoch = shell.wl_storage.get_block_epoch().unwrap(); - let validators = namada::proof_of_stake::storage::read_consensus_validator_set_addresses(&shell.inner.wl_storage, current_epoch).unwrap(); + let current_epoch = shell.state.get_block_epoch().unwrap(); + let validators = namada::proof_of_stake::storage::read_consensus_validator_set_addresses(&shell.inner.state, current_epoch).unwrap(); let validator_2 = validators.into_iter().find(|addr| addr != &defaults::validator_address()).expect("There must be another validator to redelegate to"); // Prepare the redelegation tx (shell, redelegation(validator_2)) @@ -479,7 +478,7 @@ fn init_proposal(c: &mut Criterion) { let max_proposal_content_key = namada::governance::storage::keys::get_max_proposal_content_key(); let max_code_size: u64 = shell - .wl_storage + .state .read(&max_code_size_key) .expect("Error while reading from storage") .expect( @@ -487,7 +486,7 @@ fn init_proposal(c: &mut Criterion) { storage", ); let max_proposal_content_size: u64 = shell - .wl_storage + .state .read(&max_proposal_content_key) .expect("Error while reading from storage") .expect( @@ -645,7 +644,7 @@ fn become_validator(c: &mut Criterion) { let mut shell = BenchShell::default(); // Initialize the account to be able to use it shell - .wl_storage + .state .write_bytes( &namada::core::storage::Key::validity_predicate( &address, @@ -832,11 +831,11 @@ fn unjail_validator(c: &mut Criterion) { let mut shell = BenchShell::default(); // Jail the validator - let pos_params = read_pos_params(&shell.wl_storage).unwrap(); - let current_epoch = shell.wl_storage.storage.block.epoch; + let pos_params = read_pos_params(&shell.state).unwrap(); + let current_epoch = shell.state.in_mem().block.epoch; let evidence_epoch = current_epoch.prev(); proof_of_stake::slashing::slash( - &mut shell.wl_storage, + &mut shell.state, &pos_params, current_epoch, evidence_epoch, @@ -847,7 +846,7 @@ fn unjail_validator(c: &mut Criterion) { ) .unwrap(); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); // Advance by slash epoch offset epochs for _ in 0..=pos_params.slash_processing_epoch_offset() { @@ -868,7 +867,7 @@ fn tx_bridge_pool(c: &mut Criterion) { let data = PendingTransfer { transfer: namada::core::eth_bridge_pool::TransferToEthereum { kind: namada::core::eth_bridge_pool::TransferToEthereumKind::Erc20, - asset: read_native_erc20_address(&shell.wl_storage).unwrap(), + asset: read_native_erc20_address(&shell.state).unwrap(), recipient: namada::core::ethereum_events::EthAddress([1u8; 20]), sender: defaults::albert_address(), amount: Amount::from(1), @@ -876,7 +875,7 @@ fn tx_bridge_pool(c: &mut Criterion) { gas_fee: GasFee { amount: Amount::from(100), payer: defaults::albert_address(), - token: shell.wl_storage.storage.native_token.clone(), + token: shell.state.in_mem().native_token.clone(), }, }; let tx = shell.generate_tx( @@ -902,7 +901,7 @@ fn resign_steward(c: &mut Criterion) { let mut shell = BenchShell::default(); namada::governance::pgf::storage::keys::stewards_handle() .insert( - &mut shell.wl_storage, + &mut shell.state, defaults::albert_address(), StewardDetail::base(defaults::albert_address()), ) @@ -931,7 +930,7 @@ fn update_steward_commission(c: &mut Criterion) { let mut shell = BenchShell::default(); namada::governance::pgf::storage::keys::stewards_handle() .insert( - &mut shell.wl_storage, + &mut shell.state, defaults::albert_address(), StewardDetail::base(defaults::albert_address()), ) @@ -995,16 +994,16 @@ fn reactivate_validator(c: &mut Criterion) { let mut shell = BenchShell::default(); // Deactivate the validator - let pos_params = read_pos_params(&shell.wl_storage).unwrap(); - let current_epoch = shell.wl_storage.storage.block.epoch; + let pos_params = read_pos_params(&shell.state).unwrap(); + let current_epoch = shell.state.in_mem().block.epoch; proof_of_stake::deactivate_validator( - &mut shell.wl_storage, + &mut shell.state, &defaults::validator_address(), current_epoch, ) .unwrap(); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); // Advance by slash epoch offset epochs for _ in 0..=pos_params.pipeline_len { @@ -1054,10 +1053,9 @@ fn claim_rewards(c: &mut Criterion) { let mut shell = BenchShell::default(); // Advance Epoch for pipeline and unbonding length - let params = proof_of_stake::storage::read_pos_params( - &shell.wl_storage, - ) - .unwrap(); + let params = + proof_of_stake::storage::read_pos_params(&shell.state) + .unwrap(); let advance_epochs = params.pipeline_len + params.unbonding_len; diff --git a/crates/benches/vps.rs b/crates/benches/vps.rs index d552c32a9a..87d1d49094 100644 --- a/crates/benches/vps.rs +++ b/crates/benches/vps.rs @@ -1,3 +1,4 @@ +use std::cell::RefCell; use std::collections::BTreeSet; use criterion::{criterion_group, criterion_main, Criterion}; @@ -137,12 +138,15 @@ fn vp_user(c: &mut Criterion) { let mut shell = BenchShell::default(); shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); group.bench_function(bench_name, |b| { b.iter(|| { + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); assert!( // NOTE: the wasm code is always in cache so we don't // include here the cost to read and compile the vp code @@ -151,11 +155,8 @@ fn vp_user(c: &mut Criterion) { signed_tx, &TxIndex(0), &defaults::albert_address(), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &mut VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ), + &shell.state, + &gas_meter, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -271,37 +272,37 @@ fn vp_implicit(c: &mut Criterion) { if bench_name != "reveal_pk" { // Reveal public key shell.execute_tx(&reveal_pk); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); } if bench_name == "transfer" || bench_name == "pos" { // Transfer some tokens to the implicit address shell.execute_tx(&received_transfer); - shell.wl_storage.commit_tx(); + shell.state.commit_tx(); shell.commit_block(); } // Run the tx to validate shell.execute_tx(tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); group.bench_function(bench_name, |b| { b.iter(|| { + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); assert!( run::vp( vp_code_hash, tx, &TxIndex(0), &Address::from(&implicit_account.to_public()), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &mut VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ), + &shell.state, + &gas_meter, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), @@ -437,23 +438,23 @@ fn vp_validator(c: &mut Criterion) { shell.execute_tx(signed_tx); let (verifiers, keys_changed) = shell - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&BTreeSet::default()); group.bench_function(bench_name, |b| { b.iter(|| { + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); assert!( run::vp( vp_code_hash, signed_tx, &TxIndex(0), &defaults::validator_address(), - &shell.wl_storage.storage, - &shell.wl_storage.write_log, - &mut VpGasMeter::new_from_tx_meter( - &TxGasMeter::new_from_sub_limit(u64::MAX.into()) - ), + &shell.state, + &gas_meter, &keys_changed, &verifiers, shell.vp_wasm_cache.clone(), diff --git a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index 47679b74da..e9fcc3caef 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -7,7 +7,7 @@ use namada_core::key::{common, SignableEthMessage}; use namada_core::storage::BlockHeight; use namada_core::token::Amount; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use namada_tx::data::TxResult; use namada_tx::Signed; @@ -25,7 +25,7 @@ use crate::storage::vote_tallies::{self, BridgePoolRoot}; /// Sign the latest Bridge pool root, and return the associated /// vote extension protocol transaction. pub fn sign_bridge_pool_root( - wl_storage: &WlStorage, + state: &WlState, validator_addr: &Address, eth_hot_key: &common::SecretKey, protocol_key: &common::SecretKey, @@ -34,18 +34,15 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - if !wl_storage.ethbridge_queries().is_bridge_active() { + if !state.ethbridge_queries().is_bridge_active() { return None; } - let bp_root = wl_storage.ethbridge_queries().get_bridge_pool_root().0; - let nonce = wl_storage - .ethbridge_queries() - .get_bridge_pool_nonce() - .to_bytes(); + let bp_root = state.ethbridge_queries().get_bridge_pool_root().0; + let nonce = state.ethbridge_queries().get_bridge_pool_nonce().to_bytes(); let to_sign = keccak_hash([bp_root.as_slice(), nonce.as_slice()].concat()); let signed = Signed::<_, SignableEthMessage>::new(eth_hot_key, to_sign); let ext = bridge_pool_roots::Vext { - block_height: wl_storage.storage.get_last_block_height(), + block_height: state.in_mem().get_last_block_height(), validator_addr: validator_addr.clone(), sig: signed.sig, }; @@ -61,7 +58,7 @@ where /// validators, the signature is made available for bridge /// pool proofs. pub fn apply_derived_tx( - wl_storage: &mut WlStorage, + state: &mut WlState, vext: MultiSignedVext, ) -> Result where @@ -76,14 +73,14 @@ where "Applying state updates derived from signatures of the Ethereum \ bridge pool root and nonce." ); - let voting_powers = utils::get_voting_powers(wl_storage, &vext)?; + let voting_powers = utils::get_voting_powers(state, &vext)?; let root_height = vext.iter().next().unwrap().data.block_height; - let (partial_proof, seen_by) = parse_vexts(wl_storage, vext); + let (partial_proof, seen_by) = parse_vexts(state, vext); // return immediately if a complete proof has already been acquired let bp_key = vote_tallies::Keys::from((&partial_proof, root_height)); let seen = - votes::storage::maybe_read_seen(wl_storage, &bp_key)?.unwrap_or(false); + votes::storage::maybe_read_seen(state, &bp_key)?.unwrap_or(false); if seen { tracing::debug!( ?root_height, @@ -94,19 +91,14 @@ where } // apply updates to the bridge pool root. - let (mut changed, confirmed_update) = apply_update( - wl_storage, - bp_key, - partial_proof, - seen_by, - &voting_powers, - )?; + let (mut changed, confirmed_update) = + apply_update(state, bp_key, partial_proof, seen_by, &voting_powers)?; // if the root is confirmed, update storage and add // relevant key to changed. if let Some(proof) = confirmed_update { let signed_root_key = get_signed_root_key(); - let should_write_root = wl_storage + let should_write_root = state .read::<(BridgePoolRoot, BlockHeight)>(&signed_root_key) .expect( "Reading a signed Bridge pool root from storage should not \ @@ -127,12 +119,9 @@ where ?root_height, "New Bridge pool root proof acquired" ); - wl_storage - .write(&signed_root_key, (proof, root_height)) - .expect( - "Writing a signed Bridge pool root to storage should not \ - fail.", - ); + state.write(&signed_root_key, (proof, root_height)).expect( + "Writing a signed Bridge pool root to storage should not fail.", + ); changed.insert(get_signed_root_key()); } else { tracing::debug!( @@ -161,7 +150,7 @@ impl GetVoters for &MultiSignedVext { /// Convert a set of signatures over bridge pool roots and nonces (at a certain /// height) into a partial proof and a new set of votes. fn parse_vexts( - wl_storage: &WlStorage, + state: &WlState, multisigned: MultiSignedVext, ) -> (BridgePoolRoot, Votes) where @@ -169,19 +158,19 @@ where H: 'static + StorageHasher + Sync, { let height = multisigned.iter().next().unwrap().data.block_height; - let epoch = wl_storage.pos_queries().get_epoch(height); - let root = wl_storage + let epoch = state.pos_queries().get_epoch(height); + let root = state .ethbridge_queries() .get_bridge_pool_root_at_height(height) .expect("A BP root should be available at the given height"); - let nonce = wl_storage + let nonce = state .ethbridge_queries() .get_bridge_pool_nonce_at_height(height); let mut partial_proof = BridgePoolRootProof::new((root, nonce)); partial_proof.attach_signature_batch(multisigned.clone().into_iter().map( |SignedVext(signed)| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&signed.data.validator_addr, epoch) .unwrap(), @@ -206,7 +195,7 @@ where /// /// In all instances, the changed storage keys are returned. fn apply_update( - wl_storage: &mut WlStorage, + state: &mut WlState, bp_key: vote_tallies::Keys, mut update: BridgePoolRoot, seen_by: Votes, @@ -216,7 +205,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let partial_proof = votes::storage::read_body(wl_storage, &bp_key); + let partial_proof = votes::storage::read_body(state, &bp_key); let (vote_tracking, changed, confirmed, already_present) = if let Ok( partial, ) = @@ -229,7 +218,7 @@ where update.0.attach_signature_batch(partial.0.signatures); let new_votes = NewVotes::new(seen_by, voting_powers)?; let (vote_tracking, changed) = - votes::update::calculate(wl_storage, &bp_key, new_votes)?; + votes::update::calculate(state, &bp_key, new_votes)?; if changed.is_empty() { return Ok((changed, None)); } @@ -237,14 +226,14 @@ where (vote_tracking, changed, confirmed, true) } else { tracing::debug!(%bp_key.prefix, "No validator has signed this bridge pool update before."); - let vote_tracking = calculate_new(wl_storage, seen_by, voting_powers)?; + let vote_tracking = calculate_new(state, seen_by, voting_powers)?; let changed = bp_key.into_iter().collect(); let confirmed = vote_tracking.seen; (vote_tracking, changed, confirmed, false) }; votes::storage::write( - wl_storage, + state, &bp_key, &update, &vote_tracking, @@ -266,7 +255,7 @@ mod test_apply_bp_roots_to_storage { use namada_core::voting_power::FractionalVotingPower; use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::storage::write_pos_params; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_storage::StorageRead; use namada_vote_ext::bridge_pool_roots; @@ -285,7 +274,7 @@ mod test_apply_bp_roots_to_storage { /// The validator keys. keys: HashMap, /// Storage. - wl_storage: TestWlStorage, + state: TestState, } /// Setup storage for tests. @@ -297,7 +286,7 @@ mod test_apply_bp_roots_to_storage { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); let validator_c = address::testing::established_address_4(); - let (mut wl_storage, keys) = test_utils::setup_storage_with_validators( + let (mut state, keys) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b.clone(), Amount::native_whole(100)), @@ -305,30 +294,31 @@ mod test_apply_bp_roots_to_storage { ]), ); // First commit - wl_storage.storage.block.height = 1.into(); - wl_storage.commit_block().unwrap(); + state.in_mem_mut().block.height = 1.into(); + state.commit_block().unwrap(); - vp::bridge_pool::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut state); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &KeccakHash([1; 32]), 99.into(), ); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &KeccakHash([1; 32]), 100.into(), ); - wl_storage + state .write(&get_key_from_hash(&KeccakHash([1; 32])), BlockHeight(101)) .expect("Test failed"); - wl_storage + state .write(&get_nonce_key(), Uint::from(42)) .expect("Test failed"); + state.commit_block().unwrap(); TestPackage { validators: [validator_a, validator_b, validator_c], keys, - wl_storage, + state, } } @@ -342,10 +332,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -356,8 +346,7 @@ mod test_apply_bp_roots_to_storage { } .sign(&keys[&validators[0]].protocol); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), 100.into(), @@ -374,8 +363,7 @@ mod test_apply_bp_roots_to_storage { .sign(&keys[&validators[2]].protocol); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let expected: BTreeSet = [bp_root_key.seen_by(), bp_root_key.voting_power()] @@ -392,10 +380,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let mut vexts: MultiSignedVext = bridge_pool_roots::Vext { @@ -415,7 +403,7 @@ mod test_apply_bp_roots_to_storage { .sign(&keys[&validators[1]].protocol); vexts.insert(vext); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vexts).expect("Test failed"); + apply_derived_tx(&mut state, vexts).expect("Test failed"); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), 100.into(), @@ -434,10 +422,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -447,8 +435,7 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let hot_key = &keys[&validators[1]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -458,8 +445,7 @@ mod test_apply_bp_roots_to_storage { } .sign(&keys[&validators[1]].protocol); let TxResult { changed_keys, .. } = - apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), 100.into(), @@ -481,10 +467,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let bp_root_key = vote_tallies::Keys::from(( &BridgePoolRoot(BridgePoolRootProof::new((root, nonce))), @@ -499,13 +485,12 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); - let voting_power = wl_storage + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); + let voting_power = state .read::(&bp_root_key.voting_power()) .expect("Test failed") .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!( voting_power, FractionalVotingPower::new_u64(5, 12).unwrap() @@ -518,13 +503,12 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); - let voting_power = wl_storage + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); + let voting_power = state .read::(&bp_root_key.voting_power()) .expect("Test failed") .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!(voting_power, FractionalVotingPower::new_u64(5, 6).unwrap()); } @@ -534,10 +518,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; @@ -553,11 +537,10 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let seen: bool = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen()) .expect("Test failed") .expect("Test failed") @@ -573,11 +556,10 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let seen: bool = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen()) .expect("Test failed") .expect("Test failed") @@ -593,10 +575,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; @@ -612,12 +594,11 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let expected = Votes::from([(validators[0].clone(), 100.into())]); let seen_by: Votes = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen_by()) .expect("Test failed") .expect("Test failed") @@ -633,15 +614,14 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let expected = Votes::from([ (validators[0].clone(), 100.into()), (validators[1].clone(), 100.into()), ]); let seen_by: Votes = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.seen_by()) .expect("Test failed") .expect("Test failed") @@ -657,10 +637,10 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validators[0]].eth_bridge; let mut expected = @@ -673,21 +653,20 @@ mod test_apply_bp_roots_to_storage { sig: Signed::<_, SignableEthMessage>::new(hot_key, to_sign).sig, }; expected.0.attach_signature( - wl_storage + state .ethbridge_queries() .get_eth_addr_book( &validators[0], - wl_storage.pos_queries().get_epoch(100.into()), + state.pos_queries().get_epoch(100.into()), ) .expect("Test failed"), vext.sig.clone(), ); let vext = vext.sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); let proof: BridgePoolRootProof = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&bp_root_key.body()) .expect("Test failed") .expect("Test failed") @@ -705,14 +684,14 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); assert!( - wl_storage + state .read_bytes(&get_signed_root_key()) .expect("Test failed") .is_none() @@ -737,12 +716,12 @@ mod test_apply_bp_roots_to_storage { .sign(&keys[&validators[1]].protocol); vexts.insert(vext); - let epoch = wl_storage.pos_queries().get_epoch(100.into()); + let epoch = state.pos_queries().get_epoch(100.into()); let sigs: Vec<_> = vexts .iter() .map(|s| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&s.data.validator_addr, epoch) .expect("Test failed"), @@ -751,10 +730,10 @@ mod test_apply_bp_roots_to_storage { }) .collect(); - _ = apply_derived_tx(&mut wl_storage, vexts).expect("Test failed"); + _ = apply_derived_tx(&mut state, vexts).expect("Test failed"); let (proof, _): (BridgePoolRootProof, BlockHeight) = BorshDeserialize::try_from_slice( - wl_storage + state .read_bytes(&get_signed_root_key()) .expect("Test failed") .expect("Test failed") @@ -783,7 +762,7 @@ mod test_apply_bp_roots_to_storage { let validator_3_stake = Amount::native_whole(100); // start epoch 0 with validator 1 - let (mut wl_storage, keys) = test_utils::setup_storage_with_validators( + let (mut state, keys) = test_utils::setup_storage_with_validators( HashMap::from([(validator_1.clone(), validator_1_stake)]), ); @@ -792,11 +771,11 @@ mod test_apply_bp_roots_to_storage { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); + write_pos_params(&mut state, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( - &mut wl_storage, + &mut state, HashMap::from([ (validator_2.clone(), validator_2_stake), (validator_3.clone(), validator_3_stake), @@ -807,7 +786,7 @@ mod test_apply_bp_roots_to_storage { macro_rules! query_validators { () => { |epoch: u64| { - wl_storage + state .pos_queries() .get_consensus_validators(Some(epoch.into())) .iter() @@ -827,9 +806,7 @@ mod test_apply_bp_roots_to_storage { HashMap::from([(validator_1.clone(), validator_1_stake)]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(0.into())), + state.pos_queries().get_total_voting_power(Some(0.into())), validator_1_stake, ); assert_eq!( @@ -841,23 +818,21 @@ mod test_apply_bp_roots_to_storage { ]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(1.into())), + state.pos_queries().get_total_voting_power(Some(1.into())), validator_1_stake + validator_2_stake + validator_3_stake, ); // set up the bridge pool's storage - vp::bridge_pool::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut state); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &KeccakHash([1; 32]), 3.into(), ); // construct proof - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); let hot_key = &keys[&validator_1].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -867,16 +842,15 @@ mod test_apply_bp_roots_to_storage { } .sign(&keys[&validator_1].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) - .expect("Test failed"); + _ = apply_derived_tx(&mut state, vext.into()).expect("Test failed"); // query validator set of the proof // (should be the one from epoch 0) - let (_, root_height) = wl_storage + let (_, root_height) = state .ethbridge_queries() .get_signed_bridge_pool_root() .expect("Test failed"); - let root_epoch = wl_storage + let root_epoch = state .pos_queries() .get_epoch(root_height) .expect("Test failed"); @@ -894,11 +868,11 @@ mod test_apply_bp_roots_to_storage { let TestPackage { validators, keys, - mut wl_storage, + mut state, } = setup(); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); let to_sign = keccak_hash([root.0, nonce.to_bytes()].concat()); macro_rules! decide_at_height { @@ -914,7 +888,7 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[0]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) + _ = apply_derived_tx(&mut state, vext.into()) .expect("Test failed"); let hot_key = &keys[&validators[1]].eth_bridge; let vext = bridge_pool_roots::Vext { @@ -927,7 +901,7 @@ mod test_apply_bp_roots_to_storage { .sig, } .sign(&keys[&validators[1]].protocol); - _ = apply_derived_tx(&mut wl_storage, vext.into()) + _ = apply_derived_tx(&mut state, vext.into()) .expect("Test failed"); }; } @@ -936,7 +910,7 @@ mod test_apply_bp_roots_to_storage { decide_at_height!(100); // check the signed root in storage - let root_in_storage = wl_storage + let root_in_storage = state .read::<(BridgePoolRoot, BlockHeight)>(&get_signed_root_key()) .expect("Test failed - storage read failed") .expect("Test failed - no signed root in storage"); @@ -950,7 +924,7 @@ mod test_apply_bp_roots_to_storage { decide_at_height!(99); // check the signed root in storage is unchanged - let root_in_storage = wl_storage + let root_in_storage = state .read::<(BridgePoolRoot, BlockHeight)>(&get_signed_root_key()) .expect("Test failed - storage read failed") .expect("Test failed - no signed root in storage"); diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index b1f3c7ad00..02849dd184 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -19,7 +19,7 @@ use namada_core::ethereum_structs::EthBridgeEvent; use namada_core::hints; use namada_core::storage::{BlockHeight, Key, KeySeg}; use namada_parameters::read_epoch_duration_parameter; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::storage_key::{balance_key, minted_balance_key}; @@ -36,7 +36,7 @@ use crate::{token, ADDRESS as BRIDGE_ADDRESS}; /// confirmed [`EthereumEvent::TransfersToNamada`], mint the corresponding /// transferred assets to the appropriate receiver addresses. pub(super) fn act_on( - wl_storage: &mut WlStorage, + state: &mut WlState, event: EthereumEvent, ) -> Result<(BTreeSet, BTreeSet)> where @@ -46,7 +46,7 @@ where match event { EthereumEvent::TransfersToNamada { transfers, nonce } => { act_on_transfers_to_namada( - wl_storage, + state, TransfersToNamada { transfers, nonce }, ) } @@ -54,7 +54,7 @@ where ref transfers, ref relayer, .. - } => act_on_transfers_to_eth(wl_storage, transfers, relayer), + } => act_on_transfers_to_eth(state, transfers, relayer), _ => { tracing::debug!(?event, "No actions taken for Ethereum event"); Ok(Default::default()) @@ -63,7 +63,7 @@ where } fn act_on_transfers_to_namada<'tx, D, H>( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer_event: TransfersToNamada, ) -> Result<(BTreeSet, BTreeSet)> where @@ -74,15 +74,15 @@ where let mut changed_keys = BTreeSet::new(); // we need to collect the events into a separate // buffer because of rust's borrowing rules :| - let confirmed_events: Vec<_> = wl_storage - .storage + let confirmed_events: Vec<_> = state + .in_mem_mut() .eth_events_queue .transfers_to_namada .push_and_iter(transfer_event) .collect(); for TransfersToNamada { transfers, .. } in confirmed_events { update_transfers_to_namada_state( - wl_storage, + state, &mut changed_keys, transfers.iter(), )?; @@ -95,7 +95,7 @@ where } fn update_transfers_to_namada_state<'tx, D, H>( - wl_storage: &mut WlStorage, + state: &mut WlState, changed_keys: &mut BTreeSet, transfers: impl IntoIterator, ) -> Result<()> @@ -103,7 +103,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let wrapped_native_erc20 = read_native_erc20_address(wl_storage)?; + let wrapped_native_erc20 = read_native_erc20_address(state)?; for transfer in transfers { tracing::debug!( ?transfer, @@ -116,7 +116,7 @@ where } = transfer; let mut changed = if asset != &wrapped_native_erc20 { let (asset_count, changed) = - mint_eth_assets(wl_storage, asset, receiver, amount)?; + mint_eth_assets(state, asset, receiver, amount)?; // TODO: query denomination of the whitelisted token from storage, // and print this amount with the proper formatting; for now, use // NAM's formatting @@ -136,12 +136,7 @@ where } changed } else { - redeem_native_token( - wl_storage, - &wrapped_native_erc20, - receiver, - amount, - )? + redeem_native_token(state, &wrapped_native_erc20, receiver, amount)? }; changed_keys.append(&mut changed) } @@ -150,7 +145,7 @@ where /// Redeems `amount` of the native token for `receiver` from escrow. fn redeem_native_token( - wl_storage: &mut WlStorage, + state: &mut WlState, native_erc20: &EthAddress, receiver: &Address, amount: &token::Amount, @@ -160,49 +155,41 @@ where H: 'static + StorageHasher + Sync, { let eth_bridge_native_token_balance_key = - balance_key(&wl_storage.storage.native_token, &BRIDGE_ADDRESS); + balance_key(&state.in_mem().native_token, &BRIDGE_ADDRESS); let receiver_native_token_balance_key = - balance_key(&wl_storage.storage.native_token, receiver); + balance_key(&state.in_mem().native_token, receiver); let native_werc20_supply_key = minted_balance_key(&erc20_token_address(native_erc20)); - update::amount( - wl_storage, - ð_bridge_native_token_balance_key, - |balance| { - tracing::debug!( - %eth_bridge_native_token_balance_key, - ?balance, - "Existing value found", - ); - balance.spend(amount)?; - tracing::debug!( - %eth_bridge_native_token_balance_key, - ?balance, - "New value calculated", - ); - Ok(()) - }, - )?; - update::amount( - wl_storage, - &receiver_native_token_balance_key, - |balance| { - tracing::debug!( - %receiver_native_token_balance_key, - ?balance, - "Existing value found", - ); - balance.receive(amount)?; - tracing::debug!( - %receiver_native_token_balance_key, - ?balance, - "New value calculated", - ); - Ok(()) - }, - )?; - update::amount(wl_storage, &native_werc20_supply_key, |balance| { + update::amount(state, ð_bridge_native_token_balance_key, |balance| { + tracing::debug!( + %eth_bridge_native_token_balance_key, + ?balance, + "Existing value found", + ); + balance.spend(amount)?; + tracing::debug!( + %eth_bridge_native_token_balance_key, + ?balance, + "New value calculated", + ); + Ok(()) + })?; + update::amount(state, &receiver_native_token_balance_key, |balance| { + tracing::debug!( + %receiver_native_token_balance_key, + ?balance, + "Existing value found", + ); + balance.receive(amount)?; + tracing::debug!( + %receiver_native_token_balance_key, + ?balance, + "New value calculated", + ); + Ok(()) + })?; + update::amount(state, &native_werc20_supply_key, |balance| { tracing::debug!( %native_werc20_supply_key, ?balance, @@ -236,7 +223,7 @@ where /// If the given asset is not whitelisted or has exceeded the /// token caps, mint NUTs, too. fn mint_eth_assets( - wl_storage: &mut WlStorage, + state: &mut WlState, asset: &EthAddress, receiver: &Address, &amount: &token::Amount, @@ -247,7 +234,7 @@ where { let mut changed_keys = BTreeSet::default(); - let asset_count = wl_storage + let asset_count = state .ethbridge_queries() .get_eth_assets_to_mint(asset, amount); @@ -268,7 +255,7 @@ where for (token, ref amount) in assets_to_mint { let balance_key = balance_key(&token, receiver); - update::amount(wl_storage, &balance_key, |balance| { + update::amount(state, &balance_key, |balance| { tracing::debug!( %balance_key, ?balance, @@ -285,7 +272,7 @@ where _ = changed_keys.insert(balance_key); let supply_key = minted_balance_key(&token); - update::amount(wl_storage, &supply_key, |supply| { + update::amount(state, &supply_key, |supply| { tracing::debug!( %supply_key, ?supply, @@ -306,7 +293,7 @@ where } fn act_on_transfers_to_eth( - wl_storage: &mut WlStorage, + state: &mut WlState, transfers: &[TransferToEthereum], relayer: &Address, ) -> Result<(BTreeSet, BTreeSet)> @@ -323,12 +310,12 @@ where // halts the Ethereum bridge, since nonces will fall out // of sync between Namada and Ethereum let nonce_key = get_nonce_key(); - increment_bp_nonce(&nonce_key, wl_storage)?; + increment_bp_nonce(&nonce_key, state)?; changed_keys.insert(nonce_key); // all keys of pending transfers let prefix = BRIDGE_POOL_ADDRESS.to_db_key().into(); - let mut pending_keys: HashSet = wl_storage + let mut pending_keys: HashSet = state .iter_prefix(&prefix) .context("Failed to iterate over storage")? .map(|(k, _, _)| { @@ -339,7 +326,7 @@ where // Remove the completed transfers from the bridge pool for event in transfers { let (pending_transfer, key) = if let Some((pending, key)) = - wl_storage.ethbridge_queries().lookup_transfer_to_eth(event) + state.ethbridge_queries().lookup_transfer_to_eth(event) { (pending, key) } else { @@ -352,7 +339,7 @@ where and burning any Ethereum assets in Namada" ); changed_keys.append(&mut update_transferred_asset_balances( - wl_storage, + state, &pending_transfer, )?); let pool_balance_key = @@ -360,14 +347,14 @@ where let relayer_rewards_key = balance_key(&pending_transfer.gas_fee.token, relayer); // give the relayer the gas fee for this transfer. - update::amount(wl_storage, &relayer_rewards_key, |balance| { + update::amount(state, &relayer_rewards_key, |balance| { balance.receive(&pending_transfer.gas_fee.amount) })?; // the gas fee is removed from escrow. - update::amount(wl_storage, &pool_balance_key, |balance| { + update::amount(state, &pool_balance_key, |balance| { balance.spend(&pending_transfer.gas_fee.amount) })?; - wl_storage.delete(&key)?; + state.delete(&key)?; _ = pending_keys.remove(&key); _ = changed_keys.insert(key); _ = changed_keys.insert(pool_balance_key); @@ -382,21 +369,21 @@ where } // TODO the timeout height is min_num_blocks of an epoch for now - let epoch_duration = read_epoch_duration_parameter(wl_storage)?; + let epoch_duration = read_epoch_duration_parameter(state)?; let timeout_offset = epoch_duration.min_num_of_blocks; // Check time out and refund - if wl_storage.storage.block.height.0 > timeout_offset { + if state.in_mem().block.height.0 > timeout_offset { let timeout_height = - BlockHeight(wl_storage.storage.block.height.0 - timeout_offset); + BlockHeight(state.in_mem().block.height.0 - timeout_offset); for key in pending_keys { let inserted_height = BlockHeight::try_from_slice( - &wl_storage.storage.block.tree.get(&key)?, + &state.in_mem().block.tree.get(&key)?, ) .expect("BlockHeight should be decoded"); if inserted_height <= timeout_height { let (mut keys, mut new_tx_events) = - refund_transfer(wl_storage, key)?; + refund_transfer(state, key)?; changed_keys.append(&mut keys); tx_events.append(&mut new_tx_events); } @@ -408,23 +395,23 @@ where fn increment_bp_nonce( nonce_key: &Key, - wl_storage: &mut WlStorage, + state: &mut WlState, ) -> Result<()> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let next_nonce = wl_storage + let next_nonce = state .ethbridge_queries() .get_bridge_pool_nonce() .checked_increment() .expect("Bridge pool nonce has overflowed"); - wl_storage.write(nonce_key, next_nonce)?; + state.write(nonce_key, next_nonce)?; Ok(()) } fn refund_transfer( - wl_storage: &mut WlStorage, + state: &mut WlState, key: Key, ) -> Result<(BTreeSet, BTreeSet)> where @@ -434,15 +421,15 @@ where let mut changed_keys = BTreeSet::default(); let mut tx_events = BTreeSet::default(); - let transfer = match wl_storage.read_bytes(&key)? { + let transfer = match state.read_bytes(&key)? { Some(v) => PendingTransfer::try_from_slice(&v[..])?, None => unreachable!(), }; - changed_keys.append(&mut refund_transfer_fees(wl_storage, &transfer)?); - changed_keys.append(&mut refund_transferred_assets(wl_storage, &transfer)?); + changed_keys.append(&mut refund_transfer_fees(state, &transfer)?); + changed_keys.append(&mut refund_transferred_assets(state, &transfer)?); // Delete the key from the bridge pool - wl_storage.delete(&key)?; + state.delete(&key)?; _ = changed_keys.insert(key); // Emit expiration event @@ -454,7 +441,7 @@ where } fn refund_transfer_fees( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer: &PendingTransfer, ) -> Result> where @@ -467,10 +454,10 @@ where balance_key(&transfer.gas_fee.token, &transfer.gas_fee.payer); let pool_balance_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); - update::amount(wl_storage, &payer_balance_key, |balance| { + update::amount(state, &payer_balance_key, |balance| { balance.receive(&transfer.gas_fee.amount) })?; - update::amount(wl_storage, &pool_balance_key, |balance| { + update::amount(state, &pool_balance_key, |balance| { balance.spend(&transfer.gas_fee.amount) })?; @@ -481,7 +468,7 @@ where } fn refund_transferred_assets( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer: &PendingTransfer, ) -> Result> where @@ -490,7 +477,7 @@ where { let mut changed_keys = BTreeSet::default(); - let native_erc20_addr = match wl_storage + let native_erc20_addr = match state .read_bytes(&bridge_storage::native_erc20_key())? { Some(v) => EthAddress::try_from_slice(&v[..])?, @@ -500,9 +487,9 @@ where }; let (source, target) = if transfer.transfer.asset == native_erc20_addr { let escrow_balance_key = - balance_key(&wl_storage.storage.native_token, &BRIDGE_ADDRESS); + balance_key(&state.in_mem().native_token, &BRIDGE_ADDRESS); let sender_balance_key = balance_key( - &wl_storage.storage.native_token, + &state.in_mem().native_token, &transfer.transfer.sender, ); (escrow_balance_key, sender_balance_key) @@ -512,10 +499,10 @@ where let sender_balance_key = balance_key(&token, &transfer.transfer.sender); (escrow_balance_key, sender_balance_key) }; - update::amount(wl_storage, &source, |balance| { + update::amount(state, &source, |balance| { balance.spend(&transfer.transfer.amount) })?; - update::amount(wl_storage, &target, |balance| { + update::amount(state, &target, |balance| { balance.receive(&transfer.transfer.amount) })?; @@ -528,7 +515,7 @@ where /// Burns any transferred ERC20s other than wNAM. If NAM is transferred, /// update the wNAM supply key. fn update_transferred_asset_balances( - wl_storage: &mut WlStorage, + state: &mut WlState, transfer: &PendingTransfer, ) -> Result> where @@ -537,7 +524,7 @@ where { let mut changed_keys = BTreeSet::default(); - let maybe_addr = wl_storage.read(&bridge_storage::native_erc20_key())?; + let maybe_addr = state.read(&bridge_storage::native_erc20_key())?; let Some(native_erc20_addr) = maybe_addr else { return Err(eyre::eyre!("Could not read wNam key from storage")); }; @@ -553,7 +540,7 @@ where unreachable!("Attempted to mint wNAM NUTs!"); } let supply_key = minted_balance_key(&token); - update::amount(wl_storage, &supply_key, |supply| { + update::amount(state, &supply_key, |supply| { supply.receive(&transfer.transfer.amount) })?; _ = changed_keys.insert(supply_key); @@ -564,13 +551,13 @@ where // other asset kinds must be burned let escrow_balance_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); - update::amount(wl_storage, &escrow_balance_key, |balance| { + update::amount(state, &escrow_balance_key, |balance| { balance.spend(&transfer.transfer.amount) })?; _ = changed_keys.insert(escrow_balance_key); let supply_key = minted_balance_key(&token); - update::amount(wl_storage, &supply_key, |supply| { + update::amount(state, &supply_key, |supply| { supply.spend(&transfer.transfer.amount) })?; _ = changed_keys.insert(supply_key); @@ -587,7 +574,6 @@ mod tests { use eyre::Result; use namada_core::address::gen_established_address; use namada_core::address::testing::{gen_implicit_address, nam, wnam}; - use namada_core::borsh::BorshSerializeExt; use namada_core::eth_bridge_pool::GasFee; use namada_core::ethereum_events::testing::{ arbitrary_keccak_hash, arbitrary_nonce, DAI_ERC20_ETH_ADDRESS, @@ -596,25 +582,23 @@ mod tests { use namada_core::token::Amount; use namada_core::{address, eth_bridge_pool}; use namada_parameters::{update_epoch_parameter, EpochDuration}; - use namada_state::testing::TestWlStorage; - use namada_storage::mockdb::MockDBWriteBatch; + use namada_state::testing::TestState; use super::*; use crate::storage::bridge_pool::get_pending_key; use crate::storage::wrapped_erc20s; use crate::test_utils::{self, stored_keys_count}; - fn init_storage(wl_storage: &mut TestWlStorage) { + fn init_storage(state: &mut TestState) { // set the timeout height offset let timeout_offset = 10; let epoch_duration = EpochDuration { min_num_of_blocks: timeout_offset, min_duration: DurationSecs(5), }; - update_epoch_parameter(wl_storage, &epoch_duration) - .expect("Test failed"); + update_epoch_parameter(state, &epoch_duration).expect("Test failed"); // set native ERC20 token - wl_storage + state .write(&bridge_storage::native_erc20_key(), wnam()) .expect("Test failed"); } @@ -689,7 +673,7 @@ mod tests { } fn init_bridge_pool_transfers( - wl_storage: &mut TestWlStorage, + state: &mut TestState, assets_transferred: A, ) -> Vec where @@ -718,10 +702,7 @@ mod tests { }, }; let key = get_pending_key(&transfer); - wl_storage - .storage - .write(&key, transfer.serialize_to_vec()) - .expect("Test failed"); + state.write(&key, &transfer).expect("Test failed"); pending_transfers.push(transfer); } @@ -729,11 +710,9 @@ mod tests { } #[inline] - fn init_bridge_pool( - wl_storage: &mut TestWlStorage, - ) -> Vec { + fn init_bridge_pool(state: &mut TestState) -> Vec { init_bridge_pool_transfers( - wl_storage, + state, (0..2) .map(|i| { ( @@ -752,7 +731,7 @@ mod tests { } fn init_balance( - wl_storage: &mut TestWlStorage, + state: &mut TestState, pending_transfers: &Vec, ) { for transfer in pending_transfers { @@ -760,12 +739,10 @@ mod tests { let payer = address::testing::established_address_2(); let payer_key = balance_key(&transfer.gas_fee.token, &payer); let payer_balance = Amount::from(0); - wl_storage - .write(&payer_key, payer_balance) - .expect("Test failed"); + state.write(&payer_key, payer_balance).expect("Test failed"); let escrow_key = balance_key(&transfer.gas_fee.token, &BRIDGE_POOL_ADDRESS); - update::amount(wl_storage, &escrow_key, |balance| { + update::amount(state, &escrow_key, |balance| { let gas_fee = Amount::from_u64(1); balance.receive(&gas_fee) }) @@ -775,43 +752,41 @@ mod tests { // native ERC20 let sender_key = balance_key(&nam(), &transfer.transfer.sender); let sender_balance = Amount::from(0); - wl_storage + state .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); let escrow_balance = Amount::from(10); - wl_storage + state .write(&escrow_key, escrow_balance) .expect("Test failed"); } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); let sender_balance = Amount::from(0); - wl_storage + state .write(&sender_key, sender_balance) .expect("Test failed"); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); let escrow_balance = Amount::from(10); - wl_storage + state .write(&escrow_key, escrow_balance) .expect("Test failed"); - update::amount( - wl_storage, - &minted_balance_key(&token), - |supply| supply.receive(&transfer.transfer.amount), - ) + update::amount(state, &minted_balance_key(&token), |supply| { + supply.receive(&transfer.transfer.amount) + }) .expect("Test failed"); }; } } #[test] - /// Test that we do not make any changes to wl_storage when acting on most + /// Test that we do not make any changes to state when acting on most /// events fn test_act_on_does_nothing_for_other_events() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - let initial_stored_keys_count = stored_keys_count(&wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + let initial_stored_keys_count = stored_keys_count(&state); let events = vec![EthereumEvent::ValidatorSetUpdate { nonce: arbitrary_nonce(), bridge_validator_hash: arbitrary_keccak_hash(), @@ -819,9 +794,9 @@ mod tests { }]; for event in events { - act_on(&mut wl_storage, event.clone()).unwrap(); + act_on(&mut state, event.clone()).unwrap(); assert_eq!( - stored_keys_count(&wl_storage), + stored_keys_count(&state), initial_stored_keys_count, "storage changed unexpectedly while acting on event: {:#?}", event @@ -830,13 +805,13 @@ mod tests { } #[test] - /// Test that wl_storage is indeed changed when we act on a non-empty + /// Test that state is indeed changed when we act on a non-empty /// TransfersToNamada batch fn test_act_on_changes_storage_for_transfers_to_namada() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - let initial_stored_keys_count = stored_keys_count(&wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + let initial_stored_keys_count = stored_keys_count(&state); let amount = Amount::from(100); let receiver = address::testing::established_address_1(); let transfers = vec![TransferToNamada { @@ -849,12 +824,9 @@ mod tests { transfers, }; - act_on(&mut wl_storage, event).unwrap(); + act_on(&mut state, event).unwrap(); - assert_eq!( - stored_keys_count(&wl_storage), - initial_stored_keys_count + 2 - ); + assert_eq!(stored_keys_count(&state), initial_stored_keys_count + 2); } /// Parameters to test minting DAI in Namada. @@ -881,11 +853,11 @@ mod tests { }; assert_eq!(self.transferred_amount, nut_amount + erc20_amount); - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); if !dai_token_cap.is_zero() { test_utils::whitelist_tokens( - &mut wl_storage, + &mut state, [( DAI_ERC20_ETH_ADDRESS, test_utils::WhitelistMeta { @@ -904,7 +876,7 @@ mod tests { }]; update_transfers_to_namada_state( - &mut wl_storage, + &mut state, &mut BTreeSet::new(), &transfers, ) @@ -924,7 +896,7 @@ mod tests { for key in vec![receiver_balance_key, wdai_supply_key] { let value: Option = - wl_storage.read(&key).unwrap(); + state.read(&key).unwrap(); if expected_amount.is_zero() { assert_matches!(value, None); } else { @@ -972,12 +944,12 @@ mod tests { /// that pending transfers are deleted from the Bridge pool, the /// Bridge pool nonce is updated and escrowed assets are burned. fn test_act_on_changes_storage_for_transfers_to_eth() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - init_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + init_storage(&mut state); let native_erc20 = - read_native_erc20_address(&wl_storage).expect("Test failed"); + read_native_erc20_address(&state).expect("Test failed"); let random_erc20 = EthAddress([0xff; 20]); let random_erc20_token = wrapped_erc20s::nut(&random_erc20); let random_erc20_2 = EthAddress([0xee; 20]); @@ -991,7 +963,7 @@ mod tests { 19, ]); let pending_transfers = init_bridge_pool_transfers( - &mut wl_storage, + &mut state, [ (native_erc20, TransferData::default()), (random_erc20, TransferDataBuilder::new().kind_nut().build()), @@ -1015,7 +987,7 @@ mod tests { ), ], ); - init_balance(&mut wl_storage, &pending_transfers); + init_balance(&mut state, &pending_transfers); let pending_keys: HashSet = pending_transfers.iter().map(get_pending_key).collect(); let relayer = gen_established_address("random"); @@ -1037,20 +1009,20 @@ mod tests { &BRIDGE_POOL_ADDRESS, ); let mut bp_nam_balance_pre = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_nam_balance_key) .expect("Test failed") .expect("Test failed"), ) .expect("Test failed"); let mut bp_erc_balance_pre = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_erc_balance_key) .expect("Test failed") .expect("Test failed"), ) .expect("Test failed"); - let (mut changed_keys, _) = act_on(&mut wl_storage, event).unwrap(); + let (mut changed_keys, _) = act_on(&mut state, event).unwrap(); for erc20 in [ random_erc20_token, @@ -1080,15 +1052,12 @@ mod tests { let prefix = BRIDGE_POOL_ADDRESS.to_db_key().into(); assert_eq!( - wl_storage - .iter_prefix(&prefix) - .expect("Test failed") - .count(), + state.iter_prefix(&prefix).expect("Test failed").count(), // NOTE: we should have one write -- the bridge pool nonce update 1 ); let relayer_nam_balance = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&payer_nam_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), @@ -1096,7 +1065,7 @@ mod tests { .expect("Test failed"); assert_eq!(relayer_nam_balance, Amount::from(3)); let relayer_erc_balance = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&payer_erc_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), @@ -1105,14 +1074,14 @@ mod tests { assert_eq!(relayer_erc_balance, Amount::from(2)); let bp_nam_balance_post = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_nam_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), ) .expect("Test failed"); let bp_erc_balance_post = Amount::try_from_slice( - &wl_storage + &state .read_bytes(&pool_erc_balance_key) .expect("Test failed: read error") .expect("Test failed: no value in storage"), @@ -1132,19 +1101,16 @@ mod tests { /// Test that the transfers time out in the bridge pool then the refund when /// we act on a TransfersToEthereum fn test_act_on_timeout_for_transfers_to_eth() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - init_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + init_storage(&mut state); // Height 0 - let pending_transfers = init_bridge_pool(&mut wl_storage); - init_balance(&mut wl_storage, &pending_transfers); - wl_storage - .storage - .commit_block(MockDBWriteBatch) - .expect("Test failed"); + let pending_transfers = init_bridge_pool(&mut state); + init_balance(&mut state, &pending_transfers); + state.commit_block().expect("Test failed"); // pending transfers time out - wl_storage.storage.block.height += 10 + 1; + state.in_mem_mut().block.height += 10 + 1; // new pending transfer let transfer = PendingTransfer { transfer: eth_bridge_pool::TransferToEthereum { @@ -1161,15 +1127,9 @@ mod tests { }, }; let key = get_pending_key(&transfer); - wl_storage - .storage - .write(&key, transfer.serialize_to_vec()) - .expect("Test failed"); - wl_storage - .storage - .commit_block(MockDBWriteBatch) - .expect("Test failed"); - wl_storage.storage.block.height += 1; + state.write(&key, transfer).expect("Test failed"); + state.commit_block().expect("Test failed"); + state.in_mem_mut().block.height += 1; // This should only refund let event = EthereumEvent::TransfersToEthereum { @@ -1177,15 +1137,12 @@ mod tests { transfers: vec![], relayer: gen_implicit_address(), }; - let _ = act_on(&mut wl_storage, event).unwrap(); + let _ = act_on(&mut state, event).unwrap(); // The latest transfer is still pending let prefix = BRIDGE_POOL_ADDRESS.to_db_key().into(); assert_eq!( - wl_storage - .iter_prefix(&prefix) - .expect("Test failed") - .count(), + state.iter_prefix(&prefix).expect("Test failed").count(), // NOTE: we should have two writes -- one of them being // the bridge pool nonce update 2 @@ -1197,13 +1154,13 @@ mod tests { .fold(Amount::from(0), |acc, t| acc + t.gas_fee.amount); let payer = address::testing::established_address_2(); let payer_key = balance_key(&nam(), &payer); - let value = wl_storage.read_bytes(&payer_key).expect("Test failed"); + let value = state.read_bytes(&payer_key).expect("Test failed"); let payer_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(payer_balance, expected); let pool_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - let value = wl_storage.read_bytes(&pool_key).expect("Test failed"); + let value = state.read_bytes(&pool_key).expect("Test failed"); let pool_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(pool_balance, Amount::from(0)); @@ -1212,15 +1169,13 @@ mod tests { for transfer in pending_transfers { if transfer.transfer.asset == wnam() { let sender_key = balance_key(&nam(), &transfer.transfer.sender); - let value = - wl_storage.read_bytes(&sender_key).expect("Test failed"); + let value = state.read_bytes(&sender_key).expect("Test failed"); let sender_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(sender_balance, transfer.transfer.amount); let escrow_key = balance_key(&nam(), &BRIDGE_ADDRESS); - let value = - wl_storage.read_bytes(&escrow_key).expect("Test failed"); + let value = state.read_bytes(&escrow_key).expect("Test failed"); let escrow_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); @@ -1228,15 +1183,13 @@ mod tests { } else { let token = transfer.token_address(); let sender_key = balance_key(&token, &transfer.transfer.sender); - let value = - wl_storage.read_bytes(&sender_key).expect("Test failed"); + let value = state.read_bytes(&sender_key).expect("Test failed"); let sender_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); assert_eq!(sender_balance, transfer.transfer.amount); let escrow_key = balance_key(&token, &BRIDGE_POOL_ADDRESS); - let value = - wl_storage.read_bytes(&escrow_key).expect("Test failed"); + let value = state.read_bytes(&escrow_key).expect("Test failed"); let escrow_balance = Amount::try_from_slice(&value.expect("Test failed")) .expect("Test failed"); @@ -1247,8 +1200,8 @@ mod tests { #[test] fn test_redeem_native_token() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); let receiver = address::testing::established_address_1(); let amount = Amount::from(100); @@ -1258,7 +1211,7 @@ mod tests { &receiver, ); assert!( - wl_storage + state .read_bytes(&receiver_wnam_balance_key) .unwrap() .is_none() @@ -1267,28 +1220,28 @@ mod tests { let bridge_pool_initial_balance = Amount::from(100_000_000); let bridge_pool_native_token_balance_key = token::storage_key::balance_key( - &wl_storage.storage.native_token, + &state.in_mem().native_token, &BRIDGE_ADDRESS, ); let bridge_pool_native_erc20_supply_key = minted_balance_key(&wrapped_erc20s::token(&wnam())); StorageWrite::write( - &mut wl_storage, + &mut state, &bridge_pool_native_token_balance_key, bridge_pool_initial_balance, )?; StorageWrite::write( - &mut wl_storage, + &mut state, &bridge_pool_native_erc20_supply_key, amount, )?; let receiver_native_token_balance_key = token::storage_key::balance_key( - &wl_storage.storage.native_token, + &state.in_mem().native_token, &receiver, ); let changed_keys = - redeem_native_token(&mut wl_storage, &wnam(), &receiver, &amount)?; + redeem_native_token(&mut state, &wnam(), &receiver, &amount)?; assert_eq!( changed_keys, @@ -1299,21 +1252,15 @@ mod tests { ]) ); assert_eq!( - StorageRead::read( - &wl_storage, - &bridge_pool_native_token_balance_key - )?, + StorageRead::read(&state, &bridge_pool_native_token_balance_key)?, Some(bridge_pool_initial_balance - amount) ); assert_eq!( - StorageRead::read(&wl_storage, &receiver_native_token_balance_key)?, + StorageRead::read(&state, &receiver_native_token_balance_key)?, Some(amount) ); assert_eq!( - StorageRead::read( - &wl_storage, - &bridge_pool_native_erc20_supply_key - )?, + StorageRead::read(&state, &bridge_pool_native_erc20_supply_key)?, Some(Amount::zero()) ); @@ -1321,7 +1268,7 @@ mod tests { // // wNAM is never minted, it's converted back to NAM assert!( - wl_storage + state .read_bytes(&receiver_wnam_balance_key) .unwrap() .is_none() @@ -1333,16 +1280,16 @@ mod tests { /// Auxiliary function to test wrapped Ethereum ERC20s functionality. fn test_wrapped_erc20s_aux(mut f: F) where - F: FnMut(&mut TestWlStorage, EthereumEvent), + F: FnMut(&mut TestState, EthereumEvent), { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - init_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); + state.commit_block().expect("Test failed"); + init_storage(&mut state); let native_erc20 = - read_native_erc20_address(&wl_storage).expect("Test failed"); + read_native_erc20_address(&state).expect("Test failed"); let pending_transfers = init_bridge_pool_transfers( - &mut wl_storage, + &mut state, [ (native_erc20, TransferData::default()), ( @@ -1371,7 +1318,7 @@ mod tests { ), ], ); - init_balance(&mut wl_storage, &pending_transfers); + init_balance(&mut state, &pending_transfers); let transfers = pending_transfers .into_iter() .map(|ref transfer| { @@ -1385,7 +1332,7 @@ mod tests { transfers, relayer, }; - f(&mut wl_storage, event) + f(&mut state, event) } #[test] @@ -1400,7 +1347,7 @@ mod tests { kind: eth_bridge_pool::TransferToEthereumKind, } - test_wrapped_erc20s_aux(|wl_storage, event| { + test_wrapped_erc20s_aux(|state, event| { let transfers = match &event { EthereumEvent::TransfersToEthereum { transfers, .. } => { transfers.iter() @@ -1408,7 +1355,7 @@ mod tests { _ => panic!("Test failed"), }; let native_erc20 = - read_native_erc20_address(wl_storage).expect("Test failed"); + read_native_erc20_address(state).expect("Test failed"); let deltas = transfers .filter_map( |event @ TransferToEthereum { asset, amount, .. }| { @@ -1416,7 +1363,7 @@ mod tests { return None; } let kind = { - let (pending, _) = wl_storage + let (pending, _) = state .ethbridge_queries() .lookup_transfer_to_eth(event) .expect("Test failed"); @@ -1430,13 +1377,13 @@ mod tests { wrapped_erc20s::nut(asset) } }; - let prev_balance = wl_storage + let prev_balance = state .read(&balance_key( &erc20_token, &BRIDGE_POOL_ADDRESS, )) .expect("Test failed"); - let prev_supply = wl_storage + let prev_supply = state .read(&minted_balance_key(&erc20_token)) .expect("Test failed"); Some(Delta { @@ -1450,7 +1397,7 @@ mod tests { ) .collect::>(); - _ = act_on(wl_storage, event).unwrap(); + _ = act_on(state, event).unwrap(); for Delta { kind, @@ -1478,11 +1425,11 @@ mod tests { } }; - let balance: token::Amount = wl_storage + let balance: token::Amount = state .read(&balance_key(&erc20_token, &BRIDGE_POOL_ADDRESS)) .expect("Read must succeed") .expect("Balance must exist"); - let supply: token::Amount = wl_storage + let supply: token::Amount = state .read(&minted_balance_key(&erc20_token)) .expect("Read must succeed") .expect("Balance must exist"); @@ -1499,44 +1446,44 @@ mod tests { /// Namada and instead are kept in escrow, under the Ethereum bridge /// account. fn test_wrapped_nam_not_burned() { - test_wrapped_erc20s_aux(|wl_storage, event| { + test_wrapped_erc20s_aux(|state, event| { let native_erc20 = - read_native_erc20_address(wl_storage).expect("Test failed"); + read_native_erc20_address(state).expect("Test failed"); let wnam = wrapped_erc20s::token(&native_erc20); let escrow_balance_key = balance_key(&nam(), &BRIDGE_ADDRESS); // check pre supply assert!( - wl_storage + state .read_bytes(&balance_key(&wnam, &BRIDGE_POOL_ADDRESS)) .expect("Test failed") .is_none() ); assert!( - wl_storage + state .read_bytes(&minted_balance_key(&wnam)) .expect("Test failed") .is_none() ); // check pre balance - let pre_escrowed_balance: token::Amount = wl_storage + let pre_escrowed_balance: token::Amount = state .read(&escrow_balance_key) .expect("Read must succeed") .expect("Balance must exist"); - _ = act_on(wl_storage, event).unwrap(); + _ = act_on(state, event).unwrap(); // check post supply - the wNAM minted supply should increase // by the transferred amount assert!( - wl_storage + state .read_bytes(&balance_key(&wnam, &BRIDGE_POOL_ADDRESS)) .expect("Test failed") .is_none() ); assert_eq!( - wl_storage + state .read::(&minted_balance_key(&wnam)) .expect("Reading from storage should not fail") .expect("The wNAM supply should have been updated"), @@ -1544,7 +1491,7 @@ mod tests { ); // check post balance - let post_escrowed_balance: token::Amount = wl_storage + let post_escrowed_balance: token::Amount = state .read(&escrow_balance_key) .expect("Read must succeed") .expect("Balance must exist"); @@ -1559,8 +1506,8 @@ mod tests { #[test] #[should_panic(expected = "Attempted to mint wNAM NUTs!")] fn test_wnam_doesnt_mint_nuts() { - let mut wl_storage = TestWlStorage::default(); - test_utils::bootstrap_ethereum_bridge(&mut wl_storage); + let mut state = TestState::default(); + test_utils::bootstrap_ethereum_bridge(&mut state); let transfer = PendingTransfer { transfer: eth_bridge_pool::TransferToEthereum { @@ -1577,6 +1524,6 @@ mod tests { }, }; - _ = update_transferred_asset_balances(&mut wl_storage, &transfer); + _ = update_transferred_asset_balances(&mut state, &transfer); } } diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs index afd596c8a4..063815a35e 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -16,7 +16,7 @@ use namada_core::storage::{BlockHeight, Epoch, Key}; use namada_core::token::Amount; use namada_proof_of_stake::pos_queries::PosQueries; use namada_state::tx_queue::ExpiredTx; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::data::TxResult; use namada_vote_ext::ethereum_events::{MultiSignedEthEvent, SignedVext, Vext}; @@ -43,7 +43,7 @@ impl utils::GetVoters for &HashSet { /// __INVARIANT__: Assume `ethereum_events` are sorted in ascending /// order. pub fn sign_ethereum_events( - wl_storage: &WlStorage, + state: &WlState, validator_addr: &Address, protocol_key: &common::SecretKey, ethereum_events: Vec, @@ -52,12 +52,12 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - if !wl_storage.ethbridge_queries().is_bridge_active() { + if !state.ethbridge_queries().is_bridge_active() { return None; } let ext = Vext { - block_height: wl_storage.storage.get_last_block_height(), + block_height: state.in_mem().get_last_block_height(), validator_addr: validator_addr.clone(), ethereum_events, }; @@ -81,14 +81,14 @@ where /// This function is deterministic based on some existing blockchain state and /// the passed `events`. pub fn apply_derived_tx( - wl_storage: &mut WlStorage, + state: &mut WlState, events: Vec, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let mut changed_keys = timeout_events(wl_storage)?; + let mut changed_keys = timeout_events(state)?; if events.is_empty() { return Ok(TxResult { changed_keys, @@ -105,17 +105,17 @@ where .into_iter() .filter_map(|multisigned| { // NB: discard events with outdated nonces - wl_storage + state .ethbridge_queries() .validate_eth_event_nonce(&multisigned.event) .then(|| EthMsgUpdate::from(multisigned)) }) .collect(); - let voting_powers = utils::get_voting_powers(wl_storage, &updates)?; + let voting_powers = utils::get_voting_powers(state, &updates)?; let (mut apply_updates_keys, eth_bridge_events) = - apply_updates(wl_storage, updates, voting_powers)?; + apply_updates(state, updates, voting_powers)?; changed_keys.append(&mut apply_updates_keys); Ok(TxResult { @@ -131,7 +131,7 @@ where /// The `voting_powers` map must contain a voting power for all /// `(Address, BlockHeight)`s that occur in any of the `updates`. pub(super) fn apply_updates( - wl_storage: &mut WlStorage, + state: &mut WlState, updates: HashSet, voting_powers: HashMap<(Address, BlockHeight), Amount>, ) -> Result<(ChangedKeys, BTreeSet)> @@ -152,7 +152,7 @@ where // The order in which updates are applied to storage does not matter. // The final storage state will be the same regardless. let (mut changed, newly_confirmed) = - apply_update(wl_storage, update.clone(), &voting_powers)?; + apply_update(state, update.clone(), &voting_powers)?; changed_keys.append(&mut changed); if newly_confirmed { confirmed.push(update.body); @@ -167,8 +167,7 @@ where // Right now, the order in which events are acted on does not matter. // For `TransfersToNamada` events, they can happen in any order. for event in confirmed { - let (mut changed, mut new_tx_events) = - events::act_on(wl_storage, event)?; + let (mut changed, mut new_tx_events) = events::act_on(state, event)?; changed_keys.append(&mut changed); tx_events.append(&mut new_tx_events); } @@ -181,7 +180,7 @@ where /// The `voting_powers` map must contain a voting power for all /// `(Address, BlockHeight)`s that occur in `update`. fn apply_update( - wl_storage: &mut WlStorage, + state: &mut WlState, update: EthMsgUpdate, voting_powers: &HashMap<(Address, BlockHeight), Amount>, ) -> Result<(ChangedKeys, bool)> @@ -191,7 +190,7 @@ where { let eth_msg_keys = vote_tallies::Keys::from(&update.body); let exists_in_storage = if let Some(seen) = - votes::storage::maybe_read_seen(wl_storage, ð_msg_keys)? + votes::storage::maybe_read_seen(state, ð_msg_keys)? { if seen { tracing::debug!(?update, "Ethereum event is already seen"); @@ -206,7 +205,7 @@ where if !exists_in_storage { tracing::debug!(%eth_msg_keys.prefix, "Ethereum event not seen before by any validator"); let vote_tracking = - calculate_new(wl_storage, update.seen_by, voting_powers)?; + calculate_new(state, update.seen_by, voting_powers)?; let changed = eth_msg_keys.into_iter().collect(); let confirmed = vote_tracking.seen; (vote_tracking, changed, confirmed, false) @@ -218,7 +217,7 @@ where let new_votes = NewVotes::new(update.seen_by.clone(), voting_powers)?; let (vote_tracking, changed) = - votes::update::calculate(wl_storage, ð_msg_keys, new_votes)?; + votes::update::calculate(state, ð_msg_keys, new_votes)?; if changed.is_empty() { return Ok((changed, false)); } @@ -228,7 +227,7 @@ where }; votes::storage::write( - wl_storage, + state, ð_msg_keys, &update.body, &vote_tracking, @@ -238,18 +237,18 @@ where Ok((changed, confirmed)) } -fn timeout_events(wl_storage: &mut WlStorage) -> Result +fn timeout_events(state: &mut WlState) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { let mut changed = ChangedKeys::new(); - for keys in get_timed_out_eth_events(wl_storage)? { + for keys in get_timed_out_eth_events(state)? { tracing::debug!( %keys.prefix, "Ethereum event timed out", ); - if let Some(event) = votes::storage::delete(wl_storage, &keys)? { + if let Some(event) = votes::storage::delete(state, &keys)? { tracing::debug!( %keys.prefix, "Queueing Ethereum event for retransmission", @@ -260,8 +259,8 @@ where // replaying ethereum events has no effect on the ledger. // however, we may need to revisit this code if we ever // implement slashing on double voting of ethereum events. - wl_storage - .storage + state + .in_mem_mut() .expired_txs_queue .push(ExpiredTx::EthereumEvent(event)); } @@ -272,14 +271,14 @@ where } fn get_timed_out_eth_events( - wl_storage: &mut WlStorage, + state: &mut WlState, ) -> Result>> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let unbonding_len = wl_storage.pos_queries().get_pos_params().unbonding_len; - let current_epoch = wl_storage.storage.last_epoch; + let unbonding_len = state.pos_queries().get_pos_params().unbonding_len; + let current_epoch = state.in_mem().last_epoch; if current_epoch.0 <= unbonding_len { return Ok(Vec::new()); } @@ -290,7 +289,7 @@ where let mut is_timed_out = false; let mut is_seen = false; let mut results = Vec::new(); - for (key, val, _) in votes::storage::iter_prefix(wl_storage, &prefix)? { + for (key, val, _) in votes::storage::iter_prefix(state, &prefix)? { let key = Key::parse(key).expect("The key should be parsable"); if let Some(keys) = vote_tallies::eth_event_keys(&key) { match &cur_keys { @@ -344,8 +343,7 @@ mod tests { }; use namada_core::ethereum_events::{EthereumEvent, TransferToNamada}; use namada_core::voting_power::FractionalVotingPower; - use namada_state::testing::TestWlStorage; - use namada_storage::mockdb::MockDBWriteBatch; + use namada_state::testing::TestState; use namada_storage::StorageRead; use super::*; @@ -391,9 +389,9 @@ mod tests { (sole_validator.clone(), BlockHeight(100)), validator_stake, )]); - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); test_utils::whitelist_tokens( - &mut wl_storage, + &mut state, [( DAI_ERC20_ETH_ADDRESS, test_utils::WhitelistMeta { @@ -404,7 +402,7 @@ mod tests { ); let (changed_keys, _) = - apply_updates(&mut wl_storage, updates, voting_powers)?; + apply_updates(&mut state, updates, voting_powers)?; let eth_msg_keys: vote_tallies::Keys = (&body).into(); let wrapped_erc20_token = wrapped_erc20s::token(&asset); @@ -421,34 +419,34 @@ mod tests { changed_keys ); - let body_bytes = wl_storage.read_bytes(ð_msg_keys.body())?; + let body_bytes = state.read_bytes(ð_msg_keys.body())?; let body_bytes = body_bytes.unwrap(); assert_eq!(EthereumEvent::try_from_slice(&body_bytes)?, body); - let seen_bytes = wl_storage.read_bytes(ð_msg_keys.seen())?; + let seen_bytes = state.read_bytes(ð_msg_keys.seen())?; let seen_bytes = seen_bytes.unwrap(); assert!(bool::try_from_slice(&seen_bytes)?); - let seen_by_bytes = wl_storage.read_bytes(ð_msg_keys.seen_by())?; + let seen_by_bytes = state.read_bytes(ð_msg_keys.seen_by())?; let seen_by_bytes = seen_by_bytes.unwrap(); assert_eq!( Votes::try_from_slice(&seen_by_bytes)?, Votes::from([(sole_validator, BlockHeight(100))]) ); - let voting_power = wl_storage + let voting_power = state .read::(ð_msg_keys.voting_power())? .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!(voting_power, FractionalVotingPower::WHOLE); let epoch_bytes = - wl_storage.read_bytes(ð_msg_keys.voting_started_epoch())?; + state.read_bytes(ð_msg_keys.voting_started_epoch())?; let epoch_bytes = epoch_bytes.unwrap(); assert_eq!(Epoch::try_from_slice(&epoch_bytes)?, Epoch(0)); - let wrapped_erc20_balance_bytes = wl_storage - .read_bytes(&balance_key(&wrapped_erc20_token, &receiver))?; + let wrapped_erc20_balance_bytes = + state.read_bytes(&balance_key(&wrapped_erc20_token, &receiver))?; let wrapped_erc20_balance_bytes = wrapped_erc20_balance_bytes.unwrap(); assert_eq!( Amount::try_from_slice(&wrapped_erc20_balance_bytes)?, @@ -456,7 +454,7 @@ mod tests { ); let wrapped_erc20_supply_bytes = - wl_storage.read_bytes(&minted_balance_key(&wrapped_erc20_token))?; + state.read_bytes(&minted_balance_key(&wrapped_erc20_token))?; let wrapped_erc20_supply_bytes = wrapped_erc20_supply_bytes.unwrap(); assert_eq!( Amount::try_from_slice(&wrapped_erc20_supply_bytes)?, @@ -472,12 +470,12 @@ mod tests { /// that it is recorded in storage fn test_apply_derived_tx_new_event_mint_immediately() { let sole_validator = address::testing::established_address_2(); - let (mut wl_storage, _) = + let (mut state, _) = test_utils::setup_storage_with_validators(HashMap::from_iter( vec![(sole_validator.clone(), Amount::native_whole(100))], )); test_utils::whitelist_tokens( - &mut wl_storage, + &mut state, [( DAI_ERC20_ETH_ADDRESS, test_utils::WhitelistMeta { @@ -498,7 +496,7 @@ mod tests { }; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([(sole_validator, BlockHeight(100))]), @@ -542,7 +540,7 @@ mod tests { fn test_apply_derived_tx_new_event_dont_mint() { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b, Amount::native_whole(100)), @@ -560,7 +558,7 @@ mod tests { }; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([(validator_a, BlockHeight(100))]), @@ -594,7 +592,7 @@ mod tests { pub fn test_apply_derived_tx_duplicates() -> Result<()> { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b, Amount::native_whole(100)), @@ -618,7 +616,7 @@ mod tests { let multisigneds = vec![multisigned.clone(), multisigned]; - let result = apply_derived_tx(&mut wl_storage, multisigneds); + let result = apply_derived_tx(&mut state, multisigneds); let tx_result = match result { Ok(tx_result) => tx_result, Err(err) => panic!("unexpected error: {:#?}", err), @@ -637,17 +635,17 @@ mod tests { "One vote for the Ethereum event should have been recorded", ); - let seen_by_bytes = wl_storage.read_bytes(ð_msg_keys.seen_by())?; + let seen_by_bytes = state.read_bytes(ð_msg_keys.seen_by())?; let seen_by_bytes = seen_by_bytes.unwrap(); assert_eq!( Votes::try_from_slice(&seen_by_bytes)?, Votes::from([(validator_a, BlockHeight(100))]) ); - let voting_power = wl_storage + let voting_power = state .read::(ð_msg_keys.voting_power())? .expect("Test failed") - .fractional_stake(&wl_storage); + .fractional_stake(&state); assert_eq!(voting_power, FractionalVotingPower::HALF); Ok(()) @@ -715,7 +713,7 @@ mod tests { pub fn test_timeout_events() { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b, Amount::native_whole(100)), @@ -732,7 +730,7 @@ mod tests { }], }; let _result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([( @@ -744,15 +742,15 @@ mod tests { let prev_keys = vote_tallies::Keys::from(&event); // commit then update the epoch - wl_storage.storage.commit_block(MockDBWriteBatch).unwrap(); + state.commit_block().unwrap(); let unbonding_len = - namada_proof_of_stake::storage::read_pos_params(&wl_storage) + namada_proof_of_stake::storage::read_pos_params(&state) .expect("Test failed") .unbonding_len + 1; - wl_storage.storage.last_epoch = - wl_storage.storage.last_epoch + unbonding_len; - wl_storage.storage.block.epoch = wl_storage.storage.last_epoch + 1_u64; + state.in_mem_mut().last_epoch = + state.in_mem().last_epoch + unbonding_len; + state.in_mem_mut().block.epoch = state.in_mem().last_epoch + 1_u64; let new_event = EthereumEvent::TransfersToNamada { nonce: 1.into(), @@ -763,7 +761,7 @@ mod tests { }], }; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: new_event.clone(), signers: BTreeSet::from([(validator_a, BlockHeight(100))]), @@ -792,14 +790,14 @@ mod tests { "New event should be inserted and the previous one should be \ deleted", ); - assert!(wl_storage.read_bytes(&prev_keys.body()).unwrap().is_none()); - assert!(wl_storage.read_bytes(&new_keys.body()).unwrap().is_some()); + assert!(state.read_bytes(&prev_keys.body()).unwrap().is_none()); + assert!(state.read_bytes(&new_keys.body()).unwrap().is_some()); } /// Helper fn to [`test_timeout_events_before_state_upds`]. fn check_event_keys( keys: &Keys, - wl_storage: &TestWlStorage, + state: &TestState, result: Result, mut assert: F, ) where @@ -809,19 +807,16 @@ mod tests { Ok(tx_result) => tx_result, Err(err) => panic!("unexpected error: {:#?}", err), }; - assert(KeyKind::Body, wl_storage.read_bytes(&keys.body()).unwrap()); - assert(KeyKind::Seen, wl_storage.read_bytes(&keys.seen()).unwrap()); - assert( - KeyKind::SeenBy, - wl_storage.read_bytes(&keys.seen_by()).unwrap(), - ); + assert(KeyKind::Body, state.read_bytes(&keys.body()).unwrap()); + assert(KeyKind::Seen, state.read_bytes(&keys.seen()).unwrap()); + assert(KeyKind::SeenBy, state.read_bytes(&keys.seen_by()).unwrap()); assert( KeyKind::VotingPower, - wl_storage.read_bytes(&keys.voting_power()).unwrap(), + state.read_bytes(&keys.voting_power()).unwrap(), ); assert( KeyKind::Epoch, - wl_storage.read_bytes(&keys.voting_started_epoch()).unwrap(), + state.read_bytes(&keys.voting_started_epoch()).unwrap(), ); assert_eq!( tx_result.changed_keys, @@ -842,7 +837,7 @@ mod tests { fn test_timeout_events_before_state_upds() { let validator_a = address::testing::established_address_2(); let validator_b = address::testing::established_address_3(); - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), Amount::native_whole(100)), (validator_b.clone(), Amount::native_whole(100)), @@ -861,54 +856,54 @@ mod tests { let keys = vote_tallies::Keys::from(&event); let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event: event.clone(), signers: BTreeSet::from([(validator_a, BlockHeight(100))]), }], ); - check_event_keys(&keys, &wl_storage, result, |key_kind, value| match ( - key_kind, value, - ) { - (_, None) => panic!("Test failed"), - (KeyKind::VotingPower, Some(power)) => { - let power = EpochedVotingPower::try_from_slice(&power) - .expect("Test failed") - .fractional_stake(&wl_storage); - assert_eq!(power, FractionalVotingPower::HALF); + check_event_keys(&keys, &state, result, |key_kind, value| { + match (key_kind, value) { + (_, None) => panic!("Test failed"), + (KeyKind::VotingPower, Some(power)) => { + let power = EpochedVotingPower::try_from_slice(&power) + .expect("Test failed") + .fractional_stake(&state); + assert_eq!(power, FractionalVotingPower::HALF); + } + (_, Some(_)) => {} } - (_, Some(_)) => {} }); // commit then update the epoch - wl_storage.storage.commit_block(MockDBWriteBatch).unwrap(); + state.commit_block().unwrap(); let unbonding_len = - namada_proof_of_stake::storage::read_pos_params(&wl_storage) + namada_proof_of_stake::storage::read_pos_params(&state) .expect("Test failed") .unbonding_len + 1; - wl_storage.storage.last_epoch = - wl_storage.storage.last_epoch + unbonding_len; - wl_storage.storage.block.epoch = wl_storage.storage.last_epoch + 1_u64; + state.in_mem_mut().last_epoch = + state.in_mem().last_epoch + unbonding_len; + state.in_mem_mut().block.epoch = state.in_mem().last_epoch + 1_u64; let result = apply_derived_tx( - &mut wl_storage, + &mut state, vec![MultiSignedEthEvent { event, signers: BTreeSet::from([(validator_b, BlockHeight(100))]), }], ); - check_event_keys(&keys, &wl_storage, result, |key_kind, value| match ( - key_kind, value, - ) { - (_, None) => panic!("Test failed"), - (KeyKind::VotingPower, Some(power)) => { - let power = EpochedVotingPower::try_from_slice(&power) - .expect("Test failed") - .fractional_stake(&wl_storage); - assert_eq!(power, FractionalVotingPower::HALF); + check_event_keys(&keys, &state, result, |key_kind, value| { + match (key_kind, value) { + (_, None) => panic!("Test failed"), + (KeyKind::VotingPower, Some(power)) => { + let power = EpochedVotingPower::try_from_slice(&power) + .expect("Test failed") + .fractional_stake(&state); + assert_eq!(power, FractionalVotingPower::HALF); + } + (_, Some(_)) => {} } - (_, Some(_)) => {} }); } @@ -916,7 +911,7 @@ mod tests { /// not result in votes in storage. #[test] fn test_apply_derived_tx_outdated_nonce() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let new_multisigned = |nonce: u64| { let (validator, _) = test_utils::default_validator(); @@ -941,7 +936,7 @@ mod tests { ($nonce:expr) => { let (multisigned, event) = new_multisigned($nonce); let tx_result = - apply_derived_tx(&mut wl_storage, vec![multisigned])?; + apply_derived_tx(&mut state, vec![multisigned])?; let eth_msg_keys = vote_tallies::Keys::from(&event); assert!( @@ -949,9 +944,7 @@ mod tests { "The Ethereum event should have been seen", ); assert_eq!( - wl_storage - .ethbridge_queries() - .get_next_nam_transfers_nonce(), + state.ethbridge_queries().get_next_nam_transfers_nonce(), ($nonce + 1).into(), "The transfers to Namada nonce should have been \ incremented", @@ -962,7 +955,7 @@ mod tests { ($nonce:expr) => { let (multisigned, event) = new_multisigned($nonce); let tx_result = - apply_derived_tx(&mut wl_storage, vec![multisigned])?; + apply_derived_tx(&mut state, vec![multisigned])?; let eth_msg_keys = vote_tallies::Keys::from(&event); assert!( @@ -970,9 +963,7 @@ mod tests { "The Ethereum event should have been ignored", ); assert_eq!( - wl_storage - .ethbridge_queries() - .get_next_nam_transfers_nonce(), + state.ethbridge_queries().get_next_nam_transfers_nonce(), NEXT_NONCE_TO_PROCESS.into(), "The transfers to Namada nonce should not have changed", ); diff --git a/crates/ethereum_bridge/src/protocol/transactions/read.rs b/crates/ethereum_bridge/src/protocol/transactions/read.rs index 63663a4eb0..9d45d2cbe4 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/read.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/read.rs @@ -3,46 +3,45 @@ use borsh::BorshDeserialize; use eyre::{eyre, Result}; use namada_core::storage; use namada_core::token::Amount; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::StorageRead; /// Returns the stored Amount, or 0 if not stored pub(super) fn amount_or_default( - wl_storage: &WlStorage, + state: &WlState, key: &storage::Key, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - Ok(maybe_value(wl_storage, key)?.unwrap_or_default()) + Ok(maybe_value(state, key)?.unwrap_or_default()) } /// Read some arbitrary value from storage, erroring if it's not found pub(super) fn value( - wl_storage: &WlStorage, + state: &WlState, key: &storage::Key, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - maybe_value(wl_storage, key)? - .ok_or_else(|| eyre!("no value found at {}", key)) + maybe_value(state, key)?.ok_or_else(|| eyre!("no value found at {}", key)) } /// Try to read some arbitrary value from storage, returning `None` if nothing /// is read. This will still error if there is data stored at `key` but it is /// not deserializable to `T`. pub(super) fn maybe_value( - wl_storage: &WlStorage, + state: &WlState, key: &storage::Key, ) -> Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let maybe_val = wl_storage.read_bytes(key)?; + let maybe_val = state.read_bytes(key)?; let bytes = match maybe_val { Some(bytes) => bytes, None => return Ok(None), @@ -56,14 +55,14 @@ mod tests { use assert_matches::assert_matches; use namada_core::storage; use namada_core::token::Amount; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_storage::StorageWrite; use crate::protocol::transactions::read; #[test] fn test_amount_returns_zero_for_uninitialized_storage() { - let fake_storage = TestWlStorage::default(); + let fake_storage = TestState::default(); let amt = read::amount_or_default( &fake_storage, &storage::Key::parse("some arbitrary key with no stored value") @@ -77,7 +76,7 @@ mod tests { fn test_amount_returns_stored_amount() { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = Amount::from(1_000_000); - let mut fake_storage = TestWlStorage::default(); + let mut fake_storage = TestState::default(); fake_storage.write(&key, amount).unwrap(); let amt = read::amount_or_default(&fake_storage, &key).unwrap(); @@ -88,7 +87,7 @@ mod tests { fn test_amount_errors_if_not_amount() { let key = storage::Key::parse("some arbitrary key").unwrap(); let amount = "not an Amount type"; - let mut fake_storage = TestWlStorage::default(); + let mut fake_storage = TestState::default(); fake_storage.write(&key, amount).unwrap(); assert_matches!(read::amount_or_default(&fake_storage, &key), Err(_)); diff --git a/crates/ethereum_bridge/src/protocol/transactions/update.rs b/crates/ethereum_bridge/src/protocol/transactions/update.rs index 7d3d5d2e8f..752ec4cef9 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/update.rs @@ -4,12 +4,12 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::hash::StorageHasher; use namada_core::storage; use namada_core::token::{Amount, AmountError}; -use namada_state::{DBIter, WlStorage, DB}; +use namada_state::{DBIter, WlState, DB}; use namada_storage::StorageWrite; /// Reads the `Amount` from key, applies update then writes it back pub fn amount( - wl_storage: &mut WlStorage, + state: &mut WlState, key: &storage::Key, update: impl FnOnce(&mut Amount) -> Result<(), AmountError>, ) -> Result @@ -17,16 +17,16 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let mut amount = super::read::amount_or_default(wl_storage, key)?; + let mut amount = super::read::amount_or_default(state, key)?; update(&mut amount)?; - wl_storage.write(key, amount)?; + state.write(key, amount)?; Ok(amount) } #[allow(dead_code)] /// Reads an arbitrary value, applies update then writes it back pub fn value( - wl_storage: &mut WlStorage, + state: &mut WlState, key: &storage::Key, update: impl FnOnce(&mut T), ) -> Result @@ -34,9 +34,9 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let mut value = super::read::value(wl_storage, key)?; + let mut value = super::read::value(state, key)?; update(&mut value); - wl_storage.write(key, &value)?; + state.write(key, &value)?; Ok(value) } @@ -44,7 +44,7 @@ where mod tests { use eyre::{eyre, Result}; use namada_core::storage; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_storage::{StorageRead, StorageWrite}; use super::*; @@ -55,14 +55,12 @@ mod tests { let key = storage::Key::parse("some arbitrary key") .expect("could not set up test"); let value = 21i32; - let mut wl_storage = TestWlStorage::default(); - wl_storage - .write(&key, value) - .expect("could not set up test"); + let mut state = TestState::default(); + state.write(&key, value).expect("could not set up test"); - super::value(&mut wl_storage, &key, |v: &mut i32| *v *= 2)?; + super::value(&mut state, &key, |v: &mut i32| *v *= 2)?; - let new_val = wl_storage.read_bytes(&key)?; + let new_val = state.read_bytes(&key)?; let new_val = match new_val { Some(new_val) => ::try_from_slice(&new_val)?, None => return Err(eyre!("no value found")), diff --git a/crates/ethereum_bridge/src/protocol/transactions/utils.rs b/crates/ethereum_bridge/src/protocol/transactions/utils.rs index 6694cd6f27..f1ec5ccafc 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/utils.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/utils.rs @@ -7,7 +7,7 @@ use namada_core::storage::BlockHeight; use namada_core::token; use namada_proof_of_stake::pos_queries::PosQueries; use namada_proof_of_stake::types::WeightedValidator; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; /// Proof of some arbitrary tally whose voters can be queried. pub(super) trait GetVoters { @@ -22,7 +22,7 @@ pub(super) trait GetVoters { /// which they signed some arbitrary object, and whose values are the voting /// powers of these validators at the key's given block height. pub(super) fn get_voting_powers( - wl_storage: &WlStorage, + state: &WlState, proof: P, ) -> eyre::Result> where @@ -34,7 +34,7 @@ where tracing::debug!(?voters, "Got validators who voted on at least one event"); let consensus_validators = get_consensus_validators( - wl_storage, + state, voters.iter().map(|(_, h)| h.to_owned()).collect(), ); tracing::debug!( @@ -55,7 +55,7 @@ where // TODO: we might be able to remove allocation here pub(super) fn get_consensus_validators( - wl_storage: &WlStorage, + state: &WlState, block_heights: HashSet, ) -> BTreeMap> where @@ -64,12 +64,12 @@ where { let mut consensus_validators = BTreeMap::default(); for height in block_heights.into_iter() { - let epoch = wl_storage.pos_queries().get_epoch(height).expect( + let epoch = state.pos_queries().get_epoch(height).expect( "The epoch of the last block height should always be known", ); _ = consensus_validators.insert( height, - wl_storage + state .pos_queries() .get_consensus_validators(Some(epoch)) .iter() diff --git a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs index 86ecaf0d88..bc775da79d 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs @@ -7,7 +7,7 @@ use namada_core::address::Address; use namada_core::key::common; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token::Amount; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::data::TxResult; use namada_vote_ext::validator_set_update; @@ -35,7 +35,7 @@ impl utils::GetVoters for (&validator_set_update::VextDigest, BlockHeight) { /// Sign the next set of validators, and return the associated /// vote extension protocol transaction. pub fn sign_validator_set_update( - wl_storage: &WlStorage, + state: &WlState, validator_addr: &Address, eth_hot_key: &common::SecretKey, ) -> Option @@ -43,13 +43,13 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - wl_storage + state .ethbridge_queries() .must_send_valset_upd(SendValsetUpd::Now) .then(|| { - let next_epoch = wl_storage.storage.get_current_epoch().0.next(); + let next_epoch = state.in_mem().get_current_epoch().0.next(); - let voting_powers = wl_storage + let voting_powers = state .ethbridge_queries() .get_consensus_eth_addresses(Some(next_epoch)) .iter() @@ -61,7 +61,7 @@ where let ext = validator_set_update::Vext { voting_powers, validator_addr: validator_addr.clone(), - signing_epoch: wl_storage.storage.get_current_epoch().0, + signing_epoch: state.in_mem().get_current_epoch().0, }; ext.sign(eth_hot_key) @@ -69,7 +69,7 @@ where } pub fn aggregate_votes( - wl_storage: &mut WlStorage, + state: &mut WlState, ext: validator_set_update::VextDigest, signing_epoch: Epoch, ) -> Result @@ -87,8 +87,8 @@ where "Aggregating new votes for validator set update" ); - let epoch_2nd_height = wl_storage - .storage + let epoch_2nd_height = state + .in_mem() .block .pred_epochs .get_start_height_of_epoch(signing_epoch) @@ -100,9 +100,9 @@ where .expect("The first block height of the signing epoch should be known") + 1; let voting_powers = - utils::get_voting_powers(wl_storage, (&ext, epoch_2nd_height))?; + utils::get_voting_powers(state, (&ext, epoch_2nd_height))?; let changed_keys = apply_update( - wl_storage, + state, ext, signing_epoch, epoch_2nd_height, @@ -116,7 +116,7 @@ where } fn apply_update( - wl_storage: &mut WlStorage, + state: &mut WlState, ext: validator_set_update::VextDigest, signing_epoch: Epoch, epoch_2nd_height: BlockHeight, @@ -134,14 +134,14 @@ where }; let valset_upd_keys = vote_tallies::Keys::from(&next_epoch); let maybe_proof = 'check_storage: { - let Some(seen) = votes::storage::maybe_read_seen(wl_storage, &valset_upd_keys)? else { + let Some(seen) = votes::storage::maybe_read_seen(state, &valset_upd_keys)? else { break 'check_storage None; }; if seen { tracing::debug!("Validator set update tally is already seen"); return Ok(ChangedKeys::default()); } - let proof = votes::storage::read_body(wl_storage, &valset_upd_keys)?; + let proof = votes::storage::read_body(state, &valset_upd_keys)?; Some(proof) }; @@ -161,11 +161,8 @@ where "Validator set update votes already in storage", ); let new_votes = NewVotes::new(seen_by, &voting_powers)?; - let (tally, changed) = votes::update::calculate( - wl_storage, - &valset_upd_keys, - new_votes, - )?; + let (tally, changed) = + votes::update::calculate(state, &valset_upd_keys, new_votes)?; if changed.is_empty() { return Ok(changed); } @@ -174,7 +171,7 @@ where proof.attach_signature_batch(ext.signatures.into_iter().map( |(addr, sig)| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&addr, Some(signing_epoch)) .expect("All validators should have eth keys"), @@ -189,13 +186,12 @@ where ?ext.voting_powers, "New validator set update vote aggregation started" ); - let tally = - votes::calculate_new(wl_storage, seen_by, &voting_powers)?; + let tally = votes::calculate_new(state, seen_by, &voting_powers)?; let mut proof = EthereumProof::new(ext.voting_powers); proof.attach_signature_batch(ext.signatures.into_iter().map( |(addr, sig)| { ( - wl_storage + state .ethbridge_queries() .get_eth_addr_book(&addr, Some(signing_epoch)) .expect("All validators should have eth keys"), @@ -214,7 +210,7 @@ where "Applying validator set update state changes" ); votes::storage::write( - wl_storage, + state, &valset_upd_keys, &proof, &tally, @@ -245,16 +241,16 @@ mod test_valset_upd_state_changes { /// it should have a complete proof backing it up in storage. #[test] fn test_seen_has_complete_proof() { - let (mut wl_storage, keys) = test_utils::setup_default_storage(); + let (mut state, keys) = test_utils::setup_default_storage(); - let last_height = wl_storage.storage.get_last_block_height(); - let signing_epoch = wl_storage + let last_height = state.in_mem().get_last_block_height(); + let signing_epoch = state .pos_queries() .get_epoch(last_height) .expect("The epoch of the last block height should be known"); let tx_result = aggregate_votes( - &mut wl_storage, + &mut state, validator_set_update::VextDigest::singleton( validator_set_update::Vext { voting_powers: VotingPowersMap::new(), @@ -287,13 +283,13 @@ mod test_valset_upd_state_changes { ); // check if the valset upd is marked as "seen" - let tally = votes::storage::read(&wl_storage, &valset_upd_keys) + let tally = votes::storage::read(&state, &valset_upd_keys) .expect("Test failed"); assert!(tally.seen); // read the proof in storage and make sure its signature is // from the configured validator - let proof = votes::storage::read_body(&wl_storage, &valset_upd_keys) + let proof = votes::storage::read_body(&state, &valset_upd_keys) .expect("Test failed"); assert_eq!(proof.data, VotingPowersMap::new()); @@ -303,7 +299,7 @@ mod test_valset_upd_state_changes { let addr_book = proof_sigs.pop().expect("Test failed"); assert_eq!( addr_book, - wl_storage + state .ethbridge_queries() .get_eth_addr_book( &address::testing::established_address_1(), @@ -314,10 +310,10 @@ mod test_valset_upd_state_changes { // since only one validator is configured, we should // have reached a complete proof - let total_voting_power = wl_storage + let total_voting_power = state .pos_queries() .get_total_voting_power(Some(signing_epoch)); - let validator_voting_power = wl_storage + let validator_voting_power = state .pos_queries() .get_validator_from_address( &address::testing::established_address_1(), @@ -338,7 +334,7 @@ mod test_valset_upd_state_changes { /// it should never have a complete proof backing it up in storage. #[test] fn test_not_seen_has_incomplete_proof() { - let (mut wl_storage, keys) = + let (mut state, keys) = test_utils::setup_storage_with_validators(HashMap::from_iter([ // the first validator has exactly 2/3 of the total stake ( @@ -351,14 +347,14 @@ mod test_valset_upd_state_changes { ), ])); - let last_height = wl_storage.storage.get_last_block_height(); - let signing_epoch = wl_storage + let last_height = state.in_mem().get_last_block_height(); + let signing_epoch = state .pos_queries() .get_epoch(last_height) .expect("The epoch of the last block height should be known"); let tx_result = aggregate_votes( - &mut wl_storage, + &mut state, validator_set_update::VextDigest::singleton( validator_set_update::Vext { voting_powers: VotingPowersMap::new(), @@ -391,13 +387,13 @@ mod test_valset_upd_state_changes { ); // assert the validator set update is not "seen" yet - let tally = votes::storage::read(&wl_storage, &valset_upd_keys) + let tally = votes::storage::read(&state, &valset_upd_keys) .expect("Test failed"); assert!(!tally.seen); // read the proof in storage and make sure its signature is // from the configured validator - let proof = votes::storage::read_body(&wl_storage, &valset_upd_keys) + let proof = votes::storage::read_body(&state, &valset_upd_keys) .expect("Test failed"); assert_eq!(proof.data, VotingPowersMap::new()); @@ -407,7 +403,7 @@ mod test_valset_upd_state_changes { let addr_book = proof_sigs.pop().expect("Test failed"); assert_eq!( addr_book, - wl_storage + state .ethbridge_queries() .get_eth_addr_book( &address::testing::established_address_1(), @@ -417,10 +413,10 @@ mod test_valset_upd_state_changes { ); // make sure we do not have a complete proof yet - let total_voting_power = wl_storage + let total_voting_power = state .pos_queries() .get_total_voting_power(Some(signing_epoch)); - let validator_voting_power = wl_storage + let validator_voting_power = state .pos_queries() .get_validator_from_address( &address::testing::established_address_1(), diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes.rs b/crates/ethereum_bridge/src/protocol/transactions/votes.rs index ef361f6a09..719dd91647 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes.rs @@ -10,7 +10,7 @@ use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token; use namada_core::voting_power::FractionalVotingPower; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use super::{read, ChangedKeys}; @@ -35,7 +35,7 @@ pub trait EpochedVotingPowerExt { /// the most staked tokens. fn epoch_max_voting_power( &self, - wl_storage: &WlStorage, + state: &WlState, ) -> Option where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -51,13 +51,13 @@ pub trait EpochedVotingPowerExt { #[inline] fn fractional_stake( &self, - wl_storage: &WlStorage, + state: &WlState, ) -> FractionalVotingPower where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let Some(max_voting_power) = self.epoch_max_voting_power(wl_storage) else { + let Some(max_voting_power) = self.epoch_max_voting_power(state) else { return FractionalVotingPower::NULL; }; FractionalVotingPower::new( @@ -70,12 +70,12 @@ pub trait EpochedVotingPowerExt { /// Check if the [`Tally`] associated with an [`EpochedVotingPower`] /// can be considered `seen`. #[inline] - fn has_majority_quorum(&self, wl_storage: &WlStorage) -> bool + fn has_majority_quorum(&self, state: &WlState) -> bool where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let Some(max_voting_power) = self.epoch_max_voting_power(wl_storage) else { + let Some(max_voting_power) = self.epoch_max_voting_power(state) else { return false; }; // NB: Preserve the safety property of the Tendermint protocol across @@ -96,7 +96,7 @@ pub trait EpochedVotingPowerExt { impl EpochedVotingPowerExt for EpochedVotingPower { fn epoch_max_voting_power( &self, - wl_storage: &WlStorage, + state: &WlState, ) -> Option where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -105,7 +105,7 @@ impl EpochedVotingPowerExt for EpochedVotingPower { self.keys() .copied() .map(|epoch| { - wl_storage.pos_queries().get_total_voting_power(Some(epoch)) + state.pos_queries().get_total_voting_power(Some(epoch)) }) .max() } @@ -136,7 +136,7 @@ pub struct Tally { /// Calculate a new [`Tally`] based on some validators' fractional voting powers /// as specific block heights pub fn calculate_new( - wl_storage: &WlStorage, + state: &WlState, seen_by: Votes, voting_powers: &HashMap<(Address, BlockHeight), token::Amount>, ) -> Result @@ -150,7 +150,7 @@ where .get(&(validator.to_owned(), block_height.to_owned())) { Some(&voting_power) => { - let epoch = wl_storage + let epoch = state .pos_queries() .get_epoch(*block_height) .expect("The queried epoch should be known"); @@ -168,7 +168,7 @@ where }; } - let newly_confirmed = seen_by_voting_power.has_majority_quorum(wl_storage); + let newly_confirmed = seen_by_voting_power.has_majority_quorum(state); Ok(Tally { voting_power: seen_by_voting_power, seen_by, @@ -316,7 +316,7 @@ mod tests { validator_1_stake + validator_2_stake + validator_3_stake; // start epoch 0 with validator 1 - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from([(validator_1.clone(), validator_1_stake)]), ); @@ -325,11 +325,11 @@ mod tests { pipeline_len: 1, ..Default::default() }; - write_pos_params(&mut wl_storage, ¶ms).expect("Test failed"); + write_pos_params(&mut state, ¶ms).expect("Test failed"); // insert validators 2 and 3 at epoch 1 test_utils::append_validators_to_storage( - &mut wl_storage, + &mut state, HashMap::from([ (validator_2.clone(), validator_2_stake), (validator_3.clone(), validator_3_stake), @@ -338,7 +338,7 @@ mod tests { // query validators to make sure they were inserted correctly let query_validators = |epoch: u64| { - wl_storage + state .pos_queries() .get_consensus_validators(Some(epoch.into())) .iter() @@ -352,9 +352,7 @@ mod tests { HashMap::from([(validator_1.clone(), validator_1_stake)]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(0.into())), + state.pos_queries().get_total_voting_power(Some(0.into())), validator_1_stake, ); assert_eq!( @@ -366,9 +364,7 @@ mod tests { ]) ); assert_eq!( - wl_storage - .pos_queries() - .get_total_voting_power(Some(1.into())), + state.pos_queries().get_total_voting_power(Some(1.into())), total_stake, ); @@ -378,7 +374,7 @@ mod tests { (1.into(), FractionalVotingPower::ONE_THIRD * total_stake), ]); assert_eq!( - aggregated.fractional_stake(&wl_storage), + aggregated.fractional_stake(&state), FractionalVotingPower::TWO_THIRDS ); } diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs index 494d3da5e7..86c479472c 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/storage.rs @@ -3,14 +3,14 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::hints; use namada_core::storage::Key; use namada_core::voting_power::FractionalVotingPower; -use namada_state::{DBIter, PrefixIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, PrefixIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use super::{EpochedVotingPower, EpochedVotingPowerExt, Tally, Votes}; use crate::storage::vote_tallies; pub fn write( - wl_storage: &mut WlStorage, + state: &mut WlState, keys: &vote_tallies::Keys, body: &T, tally: &Tally, @@ -21,17 +21,19 @@ where H: 'static + StorageHasher + Sync, T: BorshSerialize, { - wl_storage.write(&keys.body(), body)?; - wl_storage.write(&keys.seen(), tally.seen)?; - wl_storage.write(&keys.seen_by(), tally.seen_by.clone())?; - wl_storage.write(&keys.voting_power(), tally.voting_power.clone())?; + dbg!(&keys.prefix); + state.write(&keys.body(), body)?; + state.write(&keys.seen(), tally.seen)?; + state.write(&keys.seen_by(), tally.seen_by.clone())?; + state.write(&keys.voting_power(), tally.voting_power.clone())?; if !already_present { // add the current epoch for the inserted event - wl_storage.write( + state.write( &keys.voting_started_epoch(), - wl_storage.storage.get_current_epoch().0, + state.in_mem().get_current_epoch().0, )?; } + dbg!(state.write_log()); Ok(()) } @@ -40,7 +42,7 @@ where /// of fractional voting power behind it. #[must_use = "The storage value returned by this function must be used"] pub fn delete( - wl_storage: &mut WlStorage, + state: &mut WlState, keys: &vote_tallies::Keys, ) -> Result> where @@ -50,38 +52,38 @@ where { let opt_body = { let voting_power: EpochedVotingPower = - super::read::value(wl_storage, &keys.voting_power())?; + super::read::value(state, &keys.voting_power())?; if hints::unlikely( - voting_power.fractional_stake(wl_storage) + voting_power.fractional_stake(state) > FractionalVotingPower::ONE_THIRD, ) { - let body: T = super::read::value(wl_storage, &keys.body())?; + let body: T = super::read::value(state, &keys.body())?; Some(body) } else { None } }; - wl_storage.delete(&keys.body())?; - wl_storage.delete(&keys.seen())?; - wl_storage.delete(&keys.seen_by())?; - wl_storage.delete(&keys.voting_power())?; - wl_storage.delete(&keys.voting_started_epoch())?; + state.delete(&keys.body())?; + state.delete(&keys.seen())?; + state.delete(&keys.seen_by())?; + state.delete(&keys.voting_power())?; + state.delete(&keys.voting_started_epoch())?; Ok(opt_body) } pub fn read( - wl_storage: &WlStorage, + state: &WlState, keys: &vote_tallies::Keys, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let seen: bool = super::read::value(wl_storage, &keys.seen())?; - let seen_by: Votes = super::read::value(wl_storage, &keys.seen_by())?; + let seen: bool = super::read::value(state, &keys.seen())?; + let seen_by: Votes = super::read::value(state, &keys.seen_by())?; let voting_power: EpochedVotingPower = - super::read::value(wl_storage, &keys.voting_power())?; + super::read::value(state, &keys.voting_power())?; Ok(Tally { voting_power, @@ -91,21 +93,21 @@ where } pub fn iter_prefix<'a, D, H>( - wl_storage: &'a WlStorage, + state: &'a WlState, prefix: &Key, ) -> Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - wl_storage + state .iter_prefix(prefix) .context("Failed to iterate over the given storage prefix") } #[inline] pub fn read_body( - wl_storage: &WlStorage, + state: &WlState, keys: &vote_tallies::Keys, ) -> Result where @@ -113,12 +115,12 @@ where H: 'static + StorageHasher + Sync, T: BorshDeserialize, { - super::read::value(wl_storage, &keys.body()) + super::read::value(state, &keys.body()) } #[inline] pub fn maybe_read_seen( - wl_storage: &WlStorage, + state: &WlState, keys: &vote_tallies::Keys, ) -> Result> where @@ -126,7 +128,7 @@ where H: 'static + StorageHasher + Sync, T: BorshDeserialize, { - super::read::maybe_value(wl_storage, &keys.seen()) + super::read::maybe_value(state, &keys.seen()) } #[cfg(test)] @@ -142,7 +144,7 @@ mod tests { #[test] fn test_delete_expired_tally() { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let (validator, validator_voting_power) = test_utils::default_validator(); @@ -164,25 +166,25 @@ mod tests { seen_by: BTreeMap::from([(validator, 1.into())]), seen: false, }; - assert!(write(&mut wl_storage, &keys, &event, &tally, false).is_ok()); + assert!(write(&mut state, &keys, &event, &tally, false).is_ok()); // delete the tally and check that the body is returned - let opt_body = delete(&mut wl_storage, &keys).unwrap(); + let opt_body = delete(&mut state, &keys).unwrap(); assert_matches!(opt_body, Some(e) if e == event); // now, we write another tally, with <=1/3 voting power tally.voting_power = EpochedVotingPower::from([(0.into(), 1u64.into())]); - assert!(write(&mut wl_storage, &keys, &event, &tally, false).is_ok()); + assert!(write(&mut state, &keys, &event, &tally, false).is_ok()); // delete the tally and check that no body is returned - let opt_body = delete(&mut wl_storage, &keys).unwrap(); + let opt_body = delete(&mut state, &keys).unwrap(); assert_matches!(opt_body, None); } #[test] fn test_write_tally() { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let (validator, validator_voting_power) = test_utils::default_validator(); let event = EthereumEvent::TransfersToNamada { @@ -199,28 +201,27 @@ mod tests { seen: false, }; - let result = write(&mut wl_storage, &keys, &event, &tally, false); + let result = write(&mut state, &keys, &event, &tally, false); assert!(result.is_ok()); - let body = wl_storage.read_bytes(&keys.body()).unwrap(); + let body = state.read_bytes(&keys.body()).unwrap(); assert_eq!(body, Some(event.serialize_to_vec())); - let seen = wl_storage.read_bytes(&keys.seen()).unwrap(); + let seen = state.read_bytes(&keys.seen()).unwrap(); assert_eq!(seen, Some(tally.seen.serialize_to_vec())); - let seen_by = wl_storage.read_bytes(&keys.seen_by()).unwrap(); + let seen_by = state.read_bytes(&keys.seen_by()).unwrap(); assert_eq!(seen_by, Some(tally.seen_by.serialize_to_vec())); - let voting_power = wl_storage.read_bytes(&keys.voting_power()).unwrap(); + let voting_power = state.read_bytes(&keys.voting_power()).unwrap(); assert_eq!(voting_power, Some(tally.voting_power.serialize_to_vec())); - let epoch = - wl_storage.read_bytes(&keys.voting_started_epoch()).unwrap(); + let epoch = state.read_bytes(&keys.voting_started_epoch()).unwrap(); assert_eq!( epoch, - Some(wl_storage.storage.get_current_epoch().0.serialize_to_vec()) + Some(state.in_mem().get_current_epoch().0.serialize_to_vec()) ); } #[test] fn test_read_tally() { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let (validator, validator_voting_power) = test_utils::default_validator(); let event = EthereumEvent::TransfersToNamada { @@ -236,20 +237,20 @@ mod tests { seen_by: BTreeMap::from([(validator, 10.into())]), seen: false, }; - wl_storage.write(&keys.body(), &event).unwrap(); - wl_storage.write(&keys.seen(), tally.seen).unwrap(); - wl_storage.write(&keys.seen_by(), &tally.seen_by).unwrap(); - wl_storage + state.write(&keys.body(), &event).unwrap(); + state.write(&keys.seen(), tally.seen).unwrap(); + state.write(&keys.seen_by(), &tally.seen_by).unwrap(); + state .write(&keys.voting_power(), &tally.voting_power) .unwrap(); - wl_storage + state .write( &keys.voting_started_epoch(), - wl_storage.storage.get_block_height().0, + state.in_mem().get_block_height().0, ) .unwrap(); - let result = read(&wl_storage, &keys); + let result = read(&state, &keys); assert!(result.is_ok()); assert_eq!(result.unwrap(), tally); diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs index 7f8ff356af..d2dc8c9dc5 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -6,7 +6,7 @@ use namada_core::address::Address; use namada_core::storage::BlockHeight; use namada_core::token; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use super::{ChangedKeys, EpochedVotingPowerExt, Tally, Votes}; use crate::storage::vote_tallies; @@ -90,7 +90,7 @@ impl IntoIterator for NewVotes { /// votes from `vote_info` should be applied, and the returned changed keys will /// be empty. pub(in super::super) fn calculate( - wl_storage: &mut WlStorage, + state: &mut WlState, keys: &vote_tallies::Keys, vote_info: NewVotes, ) -> Result<(Tally, ChangedKeys)> @@ -104,7 +104,7 @@ where validators = ?vote_info.voters(), "Calculating validators' votes applied to an existing tally" ); - let tally_pre = super::storage::read(wl_storage, keys)?; + let tally_pre = super::storage::read(state, keys)?; if tally_pre.seen { return Ok((tally_pre, ChangedKeys::default())); } @@ -118,7 +118,7 @@ where "Ignoring duplicate voter" ); } - let tally_post = apply(wl_storage, &tally_pre, vote_info) + let tally_post = apply(state, &tally_pre, vote_info) .expect("We deduplicated voters already, so this should never error"); let changed_keys = keys_changed(keys, &tally_pre, &tally_post); @@ -147,7 +147,7 @@ where /// voters from `vote_info`. An error is returned if any validator which /// previously voted is present in `vote_info`. fn apply( - wl_storage: &WlStorage, + state: &WlState, tally: &Tally, vote_info: NewVotes, ) -> Result @@ -167,7 +167,7 @@ where {already_voted_height}", )); }; - let epoch = wl_storage + let epoch = state .pos_queries() .get_epoch(vote_height) .expect("The queried epoch should be known"); @@ -177,7 +177,7 @@ where *aggregated += voting_power; } - let seen_post = voting_power_post.has_majority_quorum(wl_storage); + let seen_post = voting_power_post.has_majority_quorum(state); Ok(Tally { voting_power: voting_power_post, @@ -212,7 +212,7 @@ mod tests { use namada_core::address; use namada_core::ethereum_events::EthereumEvent; use namada_core::voting_power::FractionalVotingPower; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use self::helpers::{default_event, default_total_stake, TallyParams}; use super::*; @@ -242,7 +242,7 @@ mod tests { /// Parameters to construct a test [`Tally`]. pub(super) struct TallyParams<'a> { /// Handle to storage. - pub wl_storage: &'a mut TestWlStorage, + pub state: &'a mut TestState, /// The event to be voted on. pub event: &'a EthereumEvent, /// Votes from the given validators at the given block height. @@ -258,7 +258,7 @@ mod tests { /// Write an initial [`Tally`] to storage. pub(super) fn setup(self) -> Result { let Self { - wl_storage, + state, event, votes, total_stake, @@ -278,9 +278,9 @@ mod tests { seen: seen_voting_power > FractionalVotingPower::TWO_THIRDS * total_stake, }; - votes::storage::write(wl_storage, &keys, event, &tally, false)?; + votes::storage::write(state, &keys, event, &tally, false)?; total_consensus_stake_handle().set( - wl_storage, + state, total_stake, 0u64.into(), 0, @@ -376,8 +376,8 @@ mod tests { #[test] fn test_apply_duplicate_votes() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); - test_utils::init_default_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::init_default_storage(&mut state); let validator = address::testing::established_address_1(); let already_voted_height = BlockHeight(100); @@ -385,7 +385,7 @@ mod tests { let event = default_event(); let tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( validator.clone(), @@ -402,7 +402,7 @@ mod tests { )]); let vote_info = NewVotes::new(votes, &voting_powers)?; - let result = apply(&wl_storage, &tally_pre, vote_info); + let result = apply(&state, &tally_pre, vote_info); assert!(result.is_err()); Ok(()) @@ -412,13 +412,13 @@ mod tests { /// already recorded as having been seen. #[test] fn test_calculate_already_seen() -> Result<()> { - let mut wl_storage = TestWlStorage::default(); - test_utils::init_default_storage(&mut wl_storage); + let mut state = TestState::default(); + test_utils::init_default_storage(&mut state); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -439,7 +439,7 @@ mod tests { let vote_info = NewVotes::new(votes, &voting_powers)?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!(tally_post, tally_pre); assert!(changed_keys.is_empty()); @@ -449,12 +449,12 @@ mod tests { /// Tests that an unchanged tally is returned if no votes are passed. #[test] fn test_calculate_empty() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -466,7 +466,7 @@ mod tests { let vote_info = NewVotes::new(Votes::default(), &HashMap::default())?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!(tally_post, tally_pre); assert!(changed_keys.is_empty()); @@ -477,13 +477,13 @@ mod tests { /// not yet seen. #[test] fn test_calculate_one_vote_not_seen() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let event = default_event(); let keys = vote_tallies::Keys::from(&event); let _tally_pre = TallyParams { total_stake: default_total_stake(), - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -503,7 +503,7 @@ mod tests { let vote_info = NewVotes::new(votes, &voting_powers)?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!( tally_post, @@ -529,7 +529,7 @@ mod tests { /// seen. #[test] fn test_calculate_one_vote_seen() -> Result<()> { - let (mut wl_storage, _) = test_utils::setup_default_storage(); + let (mut state, _) = test_utils::setup_default_storage(); let first_vote_stake = FractionalVotingPower::ONE_THIRD * default_total_stake(); @@ -541,7 +541,7 @@ mod tests { let keys = vote_tallies::Keys::from(&event); let _tally_pre = TallyParams { total_stake, - wl_storage: &mut wl_storage, + state: &mut state, event: &event, votes: HashSet::from([( address::testing::established_address_1(), @@ -559,7 +559,7 @@ mod tests { let vote_info = NewVotes::new(votes, &voting_powers)?; let (tally_post, changed_keys) = - calculate(&mut wl_storage, &keys, vote_info)?; + calculate(&mut state, &keys, vote_info)?; assert_eq!( tally_post, diff --git a/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs b/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs index 74adde0a07..da54631656 100644 --- a/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs +++ b/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs @@ -3,7 +3,7 @@ use namada_core::keccak::keccak_hash; use namada_core::storage::BlockHeight; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::{SignableEthMessage, Signed}; use namada_vote_ext::bridge_pool_roots; @@ -21,7 +21,7 @@ use crate::storage::eth_bridge_queries::EthBridgeQueries; /// * The validator signed over the correct height inside of the extension. /// * Check that the inner signature is valid. pub fn validate_bp_roots_vext( - wl_storage: &WlStorage, + state: &WlState, ext: &Signed, last_height: BlockHeight, ) -> Result<(), VoteExtensionError> @@ -32,7 +32,7 @@ where // NOTE: for ABCI++, we should pass // `last_height` here, instead of `ext.data.block_height` let ext_height_epoch = - match wl_storage.pos_queries().get_epoch(ext.data.block_height) { + match state.pos_queries().get_epoch(ext.data.block_height) { Some(epoch) => epoch, _ => { tracing::debug!( @@ -43,7 +43,7 @@ where return Err(VoteExtensionError::UnexpectedEpoch); } }; - if !wl_storage + if !state .ethbridge_queries() .is_bridge_active_at(ext_height_epoch) { @@ -71,7 +71,7 @@ where // get the public key associated with this validator let validator = &ext.data.validator_addr; - let (_, pk) = wl_storage + let (_, pk) = state .pos_queries() .get_validator_from_address(validator, Some(ext_height_epoch)) .map_err(|err| { @@ -96,12 +96,12 @@ where VoteExtensionError::VerifySigFailed })?; - let bp_root = wl_storage + let bp_root = state .ethbridge_queries() .get_bridge_pool_root_at_height(ext.data.block_height) .expect("We asserted that the queried height is correct") .0; - let nonce = wl_storage + let nonce = state .ethbridge_queries() .get_bridge_pool_nonce_at_height(ext.data.block_height) .to_bytes(); @@ -109,7 +109,7 @@ where keccak_hash([bp_root, nonce].concat()), ext.data.sig.clone(), ); - let pk = wl_storage + let pk = state .pos_queries() .read_validator_eth_hot_key(validator, Some(ext_height_epoch)) .expect("A validator should have an Ethereum hot key in storage."); diff --git a/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs b/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs index 7c21d4746e..1ad96a2876 100644 --- a/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs +++ b/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs @@ -2,7 +2,7 @@ use namada_core::storage::BlockHeight; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_tx::Signed; use namada_vote_ext::ethereum_events; @@ -19,7 +19,7 @@ use crate::storage::eth_bridge_queries::EthBridgeQueries; /// * There are no duplicate Ethereum events in this vote extension, and the /// events are sorted in ascending order. pub fn validate_eth_events_vext( - wl_storage: &WlStorage, + state: &WlState, ext: &Signed, last_height: BlockHeight, ) -> Result<(), VoteExtensionError> @@ -30,7 +30,7 @@ where // NOTE: for ABCI++, we should pass // `last_height` here, instead of `ext.data.block_height` let ext_height_epoch = - match wl_storage.pos_queries().get_epoch(ext.data.block_height) { + match state.pos_queries().get_epoch(ext.data.block_height) { Some(epoch) => epoch, _ => { tracing::debug!( @@ -41,7 +41,7 @@ where return Err(VoteExtensionError::UnexpectedEpoch); } }; - if !wl_storage + if !state .ethbridge_queries() .is_bridge_active_at(ext_height_epoch) { @@ -65,10 +65,10 @@ where tracing::debug!("Dropping vote extension issued at genesis"); return Err(VoteExtensionError::UnexpectedBlockHeight); } - validate_eth_events(wl_storage, &ext.data)?; + validate_eth_events(state, &ext.data)?; // get the public key associated with this validator let validator = &ext.data.validator_addr; - let (_, pk) = wl_storage + let (_, pk) = state .pos_queries() .get_validator_from_address(validator, Some(ext_height_epoch)) .map_err(|err| { @@ -102,7 +102,7 @@ where /// ascending ordering, must not contain any dupes /// and must have valid nonces. fn validate_eth_events( - wl_storage: &WlStorage, + state: &WlState, ext: ðereum_events::Vext, ) -> Result<(), VoteExtensionError> where @@ -128,11 +128,11 @@ where } // for the proposal to be valid, at least one of the // event's nonces must be valid - if ext.ethereum_events.iter().any(|event| { - wl_storage - .ethbridge_queries() - .validate_eth_event_nonce(event) - }) { + if ext + .ethereum_events + .iter() + .any(|event| state.ethbridge_queries().validate_eth_event_nonce(event)) + { Ok(()) } else { Err(VoteExtensionError::InvalidEthEventNonce) diff --git a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs index 508a878910..a63326ab4c 100644 --- a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs +++ b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs @@ -2,7 +2,7 @@ use namada_core::storage::Epoch; use namada_proof_of_stake::pos_queries::PosQueries; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_vote_ext::validator_set_update; use super::VoteExtensionError; @@ -27,7 +27,7 @@ use crate::storage::eth_bridge_queries::EthBridgeQueries; /// * The voting powers signed over were Ethereum ABI encoded, normalized to /// `2^32`, and sorted in descending order. pub fn validate_valset_upd_vext( - wl_storage: &WlStorage, + state: &WlState, ext: &validator_set_update::SignedVext, last_epoch: Epoch, ) -> Result<(), VoteExtensionError> @@ -35,7 +35,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - if wl_storage.storage.last_block.is_none() { + if state.in_mem().last_block.is_none() { tracing::debug!( "Dropping validator set update vote extension issued at genesis" ); @@ -51,7 +51,7 @@ where ); return Err(VoteExtensionError::UnexpectedEpoch); } - if wl_storage + if state .ethbridge_queries() .valset_upd_seen(signing_epoch.next()) { @@ -65,7 +65,7 @@ where // verify if the new epoch validators' voting powers in storage match // the voting powers in the vote extension let mut no_local_consensus_eth_addresses = 0; - for (eth_addr_book, namada_addr, namada_power) in wl_storage + for (eth_addr_book, namada_addr, namada_power) in state .ethbridge_queries() .get_consensus_eth_addresses(Some(signing_epoch.next())) .iter() @@ -103,7 +103,7 @@ where } // get the public key associated with this validator let validator = &ext.data.validator_addr; - let pk = wl_storage + let pk = state .pos_queries() .read_validator_eth_hot_key(validator, Some(signing_epoch)) .ok_or_else(|| { @@ -143,7 +143,7 @@ mod tests { /// next validator set in storage. #[test] fn test_superset_valsetupd_rejected() { - let (wl_storage, keys) = test_utils::setup_default_storage(); + let (state, keys) = test_utils::setup_default_storage(); let (validator, validator_stake) = test_utils::default_validator(); let hot_key_addr = { @@ -194,7 +194,7 @@ mod tests { } .sign(&keys.get(&validator).expect("Test failed").eth_bridge); - let result = validate_valset_upd_vext(&wl_storage, &ext, 0.into()); + let result = validate_valset_upd_vext(&state, &ext, 0.into()); assert_matches!( result, Err(VoteExtensionError::ExtraValidatorsInExtension) diff --git a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs index 1d4f56f117..d7fcb89a78 100644 --- a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs +++ b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs @@ -13,7 +13,7 @@ use namada_proof_of_stake::pos_queries::{ConsensusValidators, PosQueries}; use namada_proof_of_stake::storage::{ validator_eth_cold_key_handle, validator_eth_hot_key_handle, }; -use namada_state::{DBIter, StorageHasher, StoreType, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, StoreType, WlState, DB}; use namada_storage::StorageRead; use namada_vote_ext::validator_set_update::{ EthAddrBook, ValidatorSetArgs, VotingPowersMap, VotingPowersMapExt, @@ -82,7 +82,7 @@ pub trait EthBridgeQueries { fn ethbridge_queries(&self) -> EthBridgeQueriesHook<'_, Self::Storage>; } -impl EthBridgeQueries for WlStorage +impl EthBridgeQueries for WlState where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -91,39 +91,37 @@ where #[inline] fn ethbridge_queries(&self) -> EthBridgeQueriesHook<'_, Self> { - EthBridgeQueriesHook { wl_storage: self } + EthBridgeQueriesHook { state: self } } } /// A handle to [`EthBridgeQueries`]. /// -/// This type is a wrapper around a pointer to a -/// [`WlStorage`]. +/// This type is a wrapper around a pointer to a [`WlState`]. #[derive(Debug)] #[repr(transparent)] -pub struct EthBridgeQueriesHook<'db, DB> { - wl_storage: &'db DB, +pub struct EthBridgeQueriesHook<'db, S> { + state: &'db S, } -impl<'db, DB> Clone for EthBridgeQueriesHook<'db, DB> { +impl<'db, S> Clone for EthBridgeQueriesHook<'db, S> { fn clone(&self) -> Self { - Self { - wl_storage: self.wl_storage, - } + Self { state: self.state } } } -impl<'db, DB> Copy for EthBridgeQueriesHook<'db, DB> {} +impl<'s, S> Copy for EthBridgeQueriesHook<'s, S> {} -impl<'db, D, H> EthBridgeQueriesHook<'db, WlStorage> +impl<'db, D, H> EthBridgeQueriesHook<'db, WlState> where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - /// Return a handle to the inner [`WlStorage`]. + // TODO: rename `StateNEW` in docs + /// Return a handle to the inner [`StateNEW`]. #[inline] - pub fn storage(self) -> &'db WlStorage { - self.wl_storage + pub fn state(self) -> &'db WlState { + self.state } /// Check if a validator set update proof is available for @@ -135,7 +133,7 @@ where ); } let valset_upd_keys = vote_tallies::Keys::from(&epoch); - self.wl_storage + self.state .read(&valset_upd_keys.seen()) .expect("Reading a value from storage should not fail") .unwrap_or(false) @@ -145,7 +143,7 @@ where /// scheduled to be enabled at a specified epoch. pub fn check_bridge_status(self) -> EthBridgeStatus { BorshDeserialize::try_from_slice( - self.wl_storage + self.state .read_bytes(&active_key()) .expect( "Reading the Ethereum bridge active key shouldn't fail.", @@ -160,7 +158,7 @@ where /// currently active. #[inline] pub fn is_bridge_active(self) -> bool { - self.is_bridge_active_at(self.wl_storage.storage.get_current_epoch().0) + self.is_bridge_active_at(self.state.in_mem().get_current_epoch().0) } /// Behaves exactly like [`Self::is_bridge_active`], but performs @@ -177,8 +175,8 @@ where /// Get the nonce of the next transfers to Namada event to be processed. pub fn get_next_nam_transfers_nonce(self) -> Uint { - self.wl_storage - .storage + self.state + .in_mem() .eth_events_queue .transfers_to_namada .get_event_nonce() @@ -189,12 +187,10 @@ where pub fn get_bridge_pool_nonce(self) -> Uint { Uint::try_from_slice( &self - .wl_storage - .storage - .read(&bridge_pool::get_nonce_key()) + .state + .read_bytes(&bridge_pool::get_nonce_key()) .expect("Reading Bridge pool nonce shouldn't fail.") - .0 - .expect("Reading Bridge pool nonce shouldn't fail."), + .expect("Bridge pool nonce must be present."), ) .expect("Deserializing the nonce from storage should not fail.") } @@ -203,13 +199,12 @@ where pub fn get_bridge_pool_nonce_at_height(self, height: BlockHeight) -> Uint { Uint::try_from_slice( &self - .wl_storage - .storage - .db + .state + .db() .read_subspace_val_with_height( &bridge_pool::get_nonce_key(), height, - self.wl_storage.storage.get_last_block_height(), + self.state.in_mem().get_last_block_height(), ) .expect("Reading signed Bridge pool nonce shouldn't fail.") .expect("Reading signed Bridge pool nonce shouldn't fail."), @@ -220,8 +215,8 @@ where /// Get the latest root of the Ethereum bridge /// pool Merkle tree. pub fn get_bridge_pool_root(self) -> KeccakHash { - self.wl_storage - .storage + self.state + .in_mem() .block .tree .sub_root(&StoreType::BridgePool) @@ -240,7 +235,7 @@ where pub fn get_signed_bridge_pool_root( self, ) -> Option<(BridgePoolRootProof, BlockHeight)> { - self.wl_storage + self.state .read_bytes(&bridge_pool::get_signed_root_key()) .expect("Reading signed Bridge pool root shouldn't fail.") .map(|bytes| { @@ -258,8 +253,7 @@ where height: BlockHeight, ) -> Option { let base_tree = self - .wl_storage - .storage + .state .get_merkle_tree(height, Some(StoreType::BridgePool)) .ok()?; Some(base_tree.sub_root(&StoreType::BridgePool).into()) @@ -276,7 +270,7 @@ where } else { // offset of 1 => are we at the 2nd // block within the epoch? - self.wl_storage.is_deciding_offset_within_epoch(1) + self.state.is_deciding_offset_within_epoch(1) } } @@ -288,11 +282,11 @@ where validator: &Address, epoch: Option, ) -> Option { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); - let params = self.wl_storage.pos_queries().get_pos_params(); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); + let params = self.state.pos_queries().get_pos_params(); validator_eth_hot_key_handle(validator) - .get(self.wl_storage, epoch, ¶ms) + .get(self.state, epoch, ¶ms) .expect("Should be able to read eth hot key from storage") .and_then(|ref pk| pk.try_into().ok()) } @@ -305,11 +299,11 @@ where validator: &Address, epoch: Option, ) -> Option { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); - let params = self.wl_storage.pos_queries().get_pos_params(); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); + let params = self.state.pos_queries().get_pos_params(); validator_eth_cold_key_handle(validator) - .get(self.wl_storage, epoch, ¶ms) + .get(self.state, epoch, ¶ms) .expect("Should be able to read eth cold key from storage") .and_then(|ref pk| pk.try_into().ok()) } @@ -338,14 +332,14 @@ where self, epoch: Option, ) -> ConsensusEthAddresses<'db, D, H> { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); let consensus_validators = self - .wl_storage + .state .pos_queries() .get_consensus_validators(Some(epoch)); ConsensusEthAddresses { - wl_storage: self.wl_storage, + state: self.state, consensus_validators, epoch, } @@ -361,8 +355,8 @@ where where F: FnMut(&EthAddrBook) -> EthAddress, { - let epoch = epoch - .unwrap_or_else(|| self.wl_storage.storage.get_current_epoch().0); + let epoch = + epoch.unwrap_or_else(|| self.state.in_mem().get_current_epoch().0); let voting_powers_map: VotingPowersMap = self .get_consensus_eth_addresses(Some(epoch)) @@ -371,7 +365,7 @@ where .collect(); let total_power = self - .wl_storage + .state .pos_queries() .get_total_voting_power(Some(epoch)) .into(); @@ -431,7 +425,7 @@ where } .into(); - self.wl_storage + self.state .read(&key) .expect("Reading from storage should not fail") .unwrap_or(false) @@ -448,7 +442,7 @@ where } .into(); - self.wl_storage + self.state .read(&key) .expect("Reading from storage should not fail") } @@ -467,7 +461,7 @@ where } .into(); - self.wl_storage + self.state .read(&key) .expect("Reading from storage should not fail") } @@ -526,7 +520,7 @@ where transfer: &TransferToEthereum, ) -> Option<(PendingTransfer, StorageKey)> { let pending_key = bridge_pool::get_key_from_hash(&transfer.keccak256()); - self.wl_storage + self.state .read(&pending_key) .expect("Reading from storage should not fail") .zip(Some(pending_key)) @@ -612,8 +606,8 @@ where H: 'static + StorageHasher, { epoch: Epoch, - wl_storage: &'db WlStorage, - consensus_validators: ConsensusValidators<'db, WlStorage>, + state: &'db WlState, + consensus_validators: ConsensusValidators<'db, WlState>, } impl<'db, D, H> ConsensusEthAddresses<'db, D, H> @@ -628,7 +622,7 @@ where ) -> impl Iterator + 'db { self.consensus_validators.iter().map(move |validator| { let eth_addr_book = self - .wl_storage + .state .ethbridge_queries() .get_eth_addr_book(&validator.address, Some(self.epoch)) .expect("All Namada validators should have Ethereum keys"); diff --git a/crates/ethereum_bridge/src/storage/parameters.rs b/crates/ethereum_bridge/src/storage/parameters.rs index f075662313..e03563b286 100644 --- a/crates/ethereum_bridge/src/storage/parameters.rs +++ b/crates/ethereum_bridge/src/storage/parameters.rs @@ -7,7 +7,7 @@ use namada_core::ethereum_events::EthAddress; use namada_core::ethereum_structs; use namada_core::storage::Key; use namada_core::token::{DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use serde::{Deserialize, Serialize}; @@ -167,7 +167,7 @@ impl EthereumBridgeParams { /// /// If these parameters are initialized, the storage subspaces /// for the Ethereum bridge VPs are also initialized. - pub fn init_storage(&self, wl_storage: &mut WlStorage) + pub fn init_storage(&self, state: &mut WlState) where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -187,18 +187,18 @@ impl EthereumBridgeParams { let native_erc20_key = bridge_storage::native_erc20_key(); let bridge_contract_key = bridge_storage::bridge_contract_key(); let eth_start_height_key = bridge_storage::eth_start_height_key(); - wl_storage + state .write( &active_key, EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); - wl_storage + state .write(&min_confirmations_key, min_confirmations) .unwrap(); - wl_storage.write(&native_erc20_key, native_erc20).unwrap(); - wl_storage.write(&bridge_contract_key, bridge).unwrap(); - wl_storage + state.write(&native_erc20_key, native_erc20).unwrap(); + state.write(&bridge_contract_key, bridge).unwrap(); + state .write(ð_start_height_key, eth_start_height) .unwrap(); for Erc20WhitelistEntry { @@ -221,26 +221,26 @@ impl EthereumBridgeParams { suffix: whitelist::KeyType::Whitelisted, } .into(); - wl_storage.write(&key, true).unwrap(); + state.write(&key, true).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Cap, } .into(); - wl_storage.write(&key, cap).unwrap(); + state.write(&key, cap).unwrap(); let key = whitelist::Key { asset: *addr, suffix: whitelist::KeyType::Denomination, } .into(); - wl_storage.write(&key, denom).unwrap(); + state.write(&key, denom).unwrap(); } // Initialize the storage for the Ethereum Bridge VP. - vp::ethereum_bridge::init_storage(wl_storage); + vp::ethereum_bridge::init_storage(state); // Initialize the storage for the Bridge Pool VP. - vp::bridge_pool::init_storage(wl_storage); + vp::bridge_pool::init_storage(state); } } @@ -279,7 +279,7 @@ impl EthereumOracleConfig { /// present, `None` will be returned - this could be the case if the bridge /// has not been bootstrapped yet. Panics if the storage appears to be /// corrupt. - pub fn read(wl_storage: &WlStorage) -> Option + pub fn read(state: &WlState) -> Option where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, @@ -288,10 +288,9 @@ impl EthereumOracleConfig { // should not panic, when the active status key has not been // written to; simply return bridge disabled instead let has_active_key = - wl_storage.has_key(&bridge_storage::active_key()).unwrap(); + state.has_key(&bridge_storage::active_key()).unwrap(); - if !has_active_key || !wl_storage.ethbridge_queries().is_bridge_active() - { + if !has_active_key || !state.ethbridge_queries().is_bridge_active() { return None; } @@ -302,11 +301,10 @@ impl EthereumOracleConfig { // These reads must succeed otherwise the storage is corrupt or a // read failed - let min_confirmations = - must_read_key(wl_storage, &min_confirmations_key); - let native_erc20 = must_read_key(wl_storage, &native_erc20_key); - let bridge_contract = must_read_key(wl_storage, &bridge_contract_key); - let eth_start_height = must_read_key(wl_storage, ð_start_height_key); + let min_confirmations = must_read_key(state, &min_confirmations_key); + let native_erc20 = must_read_key(state, &native_erc20_key); + let bridge_contract = must_read_key(state, &bridge_contract_key); + let eth_start_height = must_read_key(state, ð_start_height_key); Some(Self { eth_start_height, @@ -341,14 +339,14 @@ where /// Reads the value of `key` from `storage` and deserializes it, or panics /// otherwise. fn must_read_key( - wl_storage: &WlStorage, + state: &WlState, key: &Key, ) -> T where D: 'static + DB + for<'iter> DBIter<'iter>, H: 'static + StorageHasher, { - StorageRead::read::(wl_storage, key).map_or_else( + StorageRead::read::(state, key).map_or_else( |err| panic!("Could not read {key}: {err:?}"), |value| { value.unwrap_or_else(|| { @@ -365,7 +363,7 @@ where mod tests { use eyre::Result; use namada_core::ethereum_events::EthAddress; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use super::*; @@ -395,7 +393,7 @@ mod tests { #[test] fn test_ethereum_bridge_config_read_write_storage() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let config = EthereumBridgeParams { erc20_whitelist: vec![], eth_start_height: Default::default(), @@ -408,9 +406,9 @@ mod tests { }, }, }; - config.init_storage(&mut wl_storage); + config.init_storage(&mut state); - let read = EthereumOracleConfig::read(&wl_storage).unwrap(); + let read = EthereumOracleConfig::read(&state).unwrap(); let config = EthereumOracleConfig::from(config); assert_eq!(config, read); @@ -418,8 +416,8 @@ mod tests { #[test] fn test_ethereum_bridge_config_uninitialized() { - let wl_storage = TestWlStorage::default(); - let read = EthereumOracleConfig::read(&wl_storage); + let state = TestState::default(); + let read = EthereumOracleConfig::read(&state); assert!(read.is_none()); } @@ -427,7 +425,7 @@ mod tests { #[test] #[should_panic(expected = "Could not read")] fn test_ethereum_bridge_config_storage_corrupt() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let config = EthereumBridgeParams { erc20_whitelist: vec![], eth_start_height: Default::default(), @@ -440,14 +438,14 @@ mod tests { }, }, }; - config.init_storage(&mut wl_storage); + config.init_storage(&mut state); let min_confirmations_key = bridge_storage::min_confirmations_key(); - wl_storage + state .write_bytes(&min_confirmations_key, vec![42, 1, 2, 3, 4]) .unwrap(); // This should panic because the min_confirmations value is not valid - EthereumOracleConfig::read(&wl_storage); + EthereumOracleConfig::read(&state); } #[test] @@ -455,15 +453,15 @@ mod tests { expected = "Ethereum bridge appears to be only partially configured!" )] fn test_ethereum_bridge_config_storage_partially_configured() { - let mut wl_storage = TestWlStorage::default(); - wl_storage + let mut state = TestState::default(); + state .write( &bridge_storage::active_key(), EthBridgeStatus::Enabled(EthBridgeEnabled::AtGenesis), ) .unwrap(); // Write a valid min_confirmations value - wl_storage + state .write( &bridge_storage::min_confirmations_key(), MinimumConfirmations::default(), @@ -471,6 +469,6 @@ mod tests { .unwrap(); // This should panic as the other config values are not written - EthereumOracleConfig::read(&wl_storage); + EthereumOracleConfig::read(&state); } } diff --git a/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs b/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs index 572ef84590..f09fd9ebe3 100644 --- a/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs +++ b/crates/ethereum_bridge/src/storage/vp/bridge_pool.rs @@ -1,6 +1,5 @@ use namada_core::ethereum_events::Uint; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; -use namada_storage::StorageWrite; +use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::storage_key::balance_key; use namada_trans_token::Amount; @@ -10,17 +9,16 @@ use crate::storage::bridge_pool::{get_nonce_key, BRIDGE_POOL_ADDRESS}; /// /// This means that the amount of escrowed gas fees is /// initialized to 0. -pub fn init_storage(wl_storage: &mut WlStorage) +pub fn init_storage(storage: &mut S) where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: StorageRead + StorageWrite, { let escrow_key = - balance_key(&wl_storage.storage.native_token, &BRIDGE_POOL_ADDRESS); - wl_storage.write(&escrow_key, Amount::default()).expect( + balance_key(&storage.get_native_token().unwrap(), &BRIDGE_POOL_ADDRESS); + storage.write(&escrow_key, Amount::default()).expect( "Initializing the escrow balance of the Bridge pool VP shouldn't fail.", ); - wl_storage + storage .write(&get_nonce_key(), Uint::from(0)) .expect("Initializing the Bridge pool nonce shouldn't fail."); } diff --git a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs index 9e79024991..c1a8dd8e16 100644 --- a/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs +++ b/crates/ethereum_bridge/src/storage/vp/ethereum_bridge.rs @@ -1,6 +1,4 @@ -use namada_core::hash::StorageHasher; -use namada_state::{DBIter, WlStorage, DB}; -use namada_storage::StorageWrite; +use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::storage_key::balance_key; use namada_trans_token::Amount; @@ -10,13 +8,13 @@ use crate::ADDRESS; /// /// This means that the amount of escrowed Nam is /// initialized to 0. -pub fn init_storage(wl_storage: &mut WlStorage) +pub fn init_storage(storage: &mut S) where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: StorageRead + StorageWrite, { - let escrow_key = balance_key(&wl_storage.storage.native_token, &ADDRESS); - wl_storage.write(&escrow_key, Amount::default()).expect( + let escrow_key = + balance_key(&storage.get_native_token().unwrap(), &ADDRESS); + storage.write(&escrow_key, Amount::default()).expect( "Initializing the escrow balance of the Ethereum Bridge VP shouldn't \ fail.", ); diff --git a/crates/ethereum_bridge/src/test_utils.rs b/crates/ethereum_bridge/src/test_utils.rs index e6ab489c41..28d42921a6 100644 --- a/crates/ethereum_bridge/src/test_utils.rs +++ b/crates/ethereum_bridge/src/test_utils.rs @@ -19,7 +19,7 @@ use namada_proof_of_stake::{ become_validator, bond_tokens, compute_and_store_total_consensus_stake, staking_token_address, BecomeValidator, }; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::credit_tokens; @@ -63,26 +63,26 @@ impl TestValidatorKeys { } } -/// Set up a [`TestWlStorage`] initialized at genesis with a single +/// Set up a [`TestState`] initialized at genesis with a single /// validator. /// /// The validator's address is [`address::testing::established_address_1`]. #[inline] pub fn setup_default_storage() --> (TestWlStorage, HashMap) { - let mut wl_storage = TestWlStorage::default(); - let all_keys = init_default_storage(&mut wl_storage); - (wl_storage, all_keys) +-> (TestState, HashMap) { + let mut state = TestState::default(); + let all_keys = init_default_storage(&mut state); + (state, all_keys) } -/// Set up a [`TestWlStorage`] initialized at genesis with +/// Set up a [`TestState`] initialized at genesis with /// [`default_validator`]. #[inline] pub fn init_default_storage( - wl_storage: &mut TestWlStorage, + state: &mut TestState, ) -> HashMap { init_storage_with_validators( - wl_storage, + state, HashMap::from_iter([default_validator()]), ) } @@ -98,10 +98,10 @@ pub fn default_validator() -> (Address, token::Amount) { (addr, voting_power) } -/// Writes a dummy [`EthereumBridgeParams`] to the given [`TestWlStorage`], and +/// Writes a dummy [`EthereumBridgeParams`] to the given [`TestState`], and /// returns it. pub fn bootstrap_ethereum_bridge( - wl_storage: &mut TestWlStorage, + state: &mut TestState, ) -> EthereumBridgeParams { let config = EthereumBridgeParams { // start with empty erc20 whitelist @@ -121,7 +121,7 @@ pub fn bootstrap_ethereum_bridge( }, }, }; - config.init_storage(wl_storage); + config.init_storage(state); config } @@ -134,7 +134,7 @@ pub struct WhitelistMeta { } /// Whitelist the given Ethereum tokens. -pub fn whitelist_tokens(wl_storage: &mut TestWlStorage, token_list: L) +pub fn whitelist_tokens(state: &mut TestState, token_list: L) where L: Into>, { @@ -144,52 +144,50 @@ where suffix: whitelist::KeyType::Cap, } .into(); - wl_storage.write(&cap_key, cap).expect("Test failed"); + state.write(&cap_key, cap).expect("Test failed"); let whitelisted_key = whitelist::Key { asset, suffix: whitelist::KeyType::Whitelisted, } .into(); - wl_storage - .write(&whitelisted_key, true) - .expect("Test failed"); + state.write(&whitelisted_key, true).expect("Test failed"); let denom_key = whitelist::Key { asset, suffix: whitelist::KeyType::Denomination, } .into(); - wl_storage.write(&denom_key, denom).expect("Test failed"); + state.write(&denom_key, denom).expect("Test failed"); } } /// Returns the number of keys in `storage` which have values present. -pub fn stored_keys_count(wl_storage: &TestWlStorage) -> usize { +pub fn stored_keys_count(state: &TestState) -> usize { let root = Key { segments: vec![] }; - wl_storage.iter_prefix(&root).expect("Test failed").count() + state.iter_prefix(&root).expect("Test failed").count() } -/// Set up a [`TestWlStorage`] initialized at genesis with the given +/// Set up a [`TestState`] initialized at genesis with the given /// validators. pub fn setup_storage_with_validators( consensus_validators: HashMap, -) -> (TestWlStorage, HashMap) { - let mut wl_storage = TestWlStorage::default(); +) -> (TestState, HashMap) { + let mut state = TestState::default(); let all_keys = - init_storage_with_validators(&mut wl_storage, consensus_validators); - (wl_storage, all_keys) + init_storage_with_validators(&mut state, consensus_validators); + (state, all_keys) } -/// Set up a [`TestWlStorage`] initialized at genesis with the given +/// Set up a [`TestState`] initialized at genesis with the given /// validators. pub fn init_storage_with_validators( - wl_storage: &mut TestWlStorage, + state: &mut TestState, consensus_validators: HashMap, ) -> HashMap { // set last height to a reasonable value; // it should allow vote extensions to be cast - wl_storage.storage.block.height = 1.into(); + state.in_mem_mut().block.height = 1.into(); let mut all_keys = HashMap::new(); let validators: Vec<_> = consensus_validators @@ -216,28 +214,25 @@ pub fn init_storage_with_validators( .collect(); namada_proof_of_stake::test_utils::test_init_genesis( - wl_storage, + state, OwnedPosParams::default(), validators.into_iter(), 0.into(), ) .expect("Test failed"); - bootstrap_ethereum_bridge(wl_storage); + bootstrap_ethereum_bridge(state); for (validator, keys) in all_keys.iter() { let protocol_key = keys.protocol.ref_to(); - wl_storage + state .write(&protocol_pk_key(validator), protocol_key) .expect("Test failed"); } // Initialize pred_epochs to the current height - wl_storage - .storage - .block - .pred_epochs - .new_epoch(wl_storage.storage.block.height); - wl_storage.commit_block().expect("Test failed"); - wl_storage.storage.block.height += 1; + let height = state.in_mem().block.height; + state.in_mem_mut().block.pred_epochs.new_epoch(height); + state.commit_block().expect("Test failed"); + state.in_mem_mut().block.height += 1; all_keys } @@ -247,28 +242,28 @@ pub fn init_storage_with_validators( /// /// N.B. assumes the bridge pool is empty. pub fn commit_bridge_pool_root_at_height( - wl_storage: &mut TestWlStorage, + state: &mut TestState, root: &KeccakHash, height: BlockHeight, ) { - wl_storage.storage.block.height = height; - wl_storage.write(&get_key_from_hash(root), height).unwrap(); - wl_storage.commit_block().unwrap(); - wl_storage.delete(&get_key_from_hash(root)).unwrap(); + state.in_mem_mut().block.height = height; + state.write(&get_key_from_hash(root), height).unwrap(); + state.commit_block().unwrap(); + state.delete(&get_key_from_hash(root)).unwrap(); } /// Append validators to storage at the current epoch /// offset by pipeline length. pub fn append_validators_to_storage( - wl_storage: &mut TestWlStorage, + state: &mut TestState, consensus_validators: HashMap, ) -> HashMap { - let current_epoch = wl_storage.storage.get_current_epoch().0; + let current_epoch = state.in_mem().get_current_epoch().0; let mut all_keys = HashMap::new(); - let params = wl_storage.pos_queries().get_pos_params(); + let params = state.pos_queries().get_pos_params(); - let staking_token = staking_token_address(wl_storage); + let staking_token = staking_token_address(state); for (validator, stake) in consensus_validators { let keys = TestValidatorKeys::generate(); @@ -279,7 +274,7 @@ pub fn append_validators_to_storage( let eth_hot_key = &keys.eth_bridge.ref_to(); become_validator( - wl_storage, + state, BecomeValidator { params: ¶ms, address: &validator, @@ -295,27 +290,27 @@ pub fn append_validators_to_storage( }, ) .expect("Test failed"); - credit_tokens(wl_storage, &staking_token, &validator, stake) + credit_tokens(state, &staking_token, &validator, stake) .expect("Test failed"); - bond_tokens(wl_storage, None, &validator, stake, current_epoch, None) + bond_tokens(state, None, &validator, stake, current_epoch, None) .expect("Test failed"); all_keys.insert(validator, keys); } compute_and_store_total_consensus_stake( - wl_storage, + state, current_epoch + params.pipeline_len, ) .expect("Test failed"); for (validator, keys) in all_keys.iter() { let protocol_key = keys.protocol.ref_to(); - wl_storage + state .write(&protocol_pk_key(validator), protocol_key) .expect("Test failed"); } - wl_storage.commit_block().expect("Test failed"); + state.commit_block().expect("Test failed"); all_keys } diff --git a/crates/ibc/Cargo.toml b/crates/ibc/Cargo.toml index cb54e54832..da368475fe 100644 --- a/crates/ibc/Cargo.toml +++ b/crates/ibc/Cargo.toml @@ -18,11 +18,12 @@ testing = ["namada_core/testing", "ibc-testkit", "proptest"] [dependencies] namada_core = { path = "../core" } +namada_gas = { path = "../gas" } namada_governance = { path = "../governance" } namada_parameters = { path = "../parameters" } namada_state = { path = "../state" } namada_storage = { path = "../storage" } -namada_trans_token = { path = "../trans_token" } +namada_token = { path = "../token" } borsh.workspace = true ibc.workspace = true diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index 9a999e6151..e77dd3dd8e 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -4,77 +4,244 @@ use std::cell::RefCell; use std::rc::Rc; use namada_core::address::{Address, InternalAddress}; -use namada_core::hash::Hash; use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer; use namada_core::ibc::apps::transfer::types::packet::PacketData; use namada_core::ibc::apps::transfer::types::PrefixedCoin; use namada_core::ibc::core::channel::types::timeout::TimeoutHeight; use namada_core::ibc::primitives::Msg; use namada_core::ibc::IbcEvent; -use namada_core::storage::Epochs; use namada_core::tendermint::Time as TmTime; use namada_core::time::DateTimeUtc; use namada_core::token::DenominatedAmount; use namada_governance::storage::proposal::PGFIbcTarget; use namada_parameters::read_epoch_duration_parameter; -use namada_state::wl_storage::{PrefixIter, WriteLogAndStorage}; -use namada_state::write_log::{self, WriteLog}; use namada_state::{ - self as storage, iter_prefix_post, DBIter, ResultExt, State, StorageError, - StorageHasher, StorageResult, StorageWrite, WlStorage, DB, + DBIter, Epochs, ResultExt, State, StateRead, StorageError, StorageHasher, + StorageRead, StorageResult, StorageWrite, TxHostEnvState, WlState, DB, }; -use namada_storage::StorageRead; -use namada_trans_token as token; +use namada_token as token; use crate::{IbcActions, IbcCommonContext, IbcStorageContext}; /// IBC protocol context #[derive(Debug)] -pub struct IbcProtocolContext<'a, D, H> +pub struct IbcProtocolContext<'a, S> where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: State, { - wl_storage: &'a mut WlStorage, + state: &'a mut S, } -impl WriteLogAndStorage for IbcProtocolContext<'_, D, H> +impl StorageRead for IbcProtocolContext<'_, S> where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: State, { - type D = D; - type H = H; + type PrefixIter<'iter> = ::PrefixIter<'iter> where Self: 'iter; - fn write_log(&self) -> &WriteLog { - self.wl_storage.write_log() + fn read_bytes( + &self, + key: &namada_storage::Key, + ) -> StorageResult>> { + self.state.read_bytes(key) + } + + fn has_key(&self, key: &namada_storage::Key) -> StorageResult { + self.state.has_key(key) } - fn write_log_mut(&mut self) -> &mut WriteLog { - self.wl_storage.write_log_mut() + fn iter_prefix<'iter>( + &'iter self, + prefix: &namada_storage::Key, + ) -> StorageResult> { + self.state.iter_prefix(prefix) } - fn storage(&self) -> &State { - self.wl_storage.storage() + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> StorageResult)>> { + self.state.iter_next(iter) } - fn split_borrow(&mut self) -> (&mut WriteLog, &State) { - self.wl_storage.split_borrow() + fn get_chain_id(&self) -> StorageResult { + self.state.get_chain_id() } - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.wl_storage.write_tx_hash(hash) + fn get_block_height(&self) -> StorageResult { + self.state.get_block_height() + } + + fn get_block_header( + &self, + height: namada_storage::BlockHeight, + ) -> StorageResult> { + StorageRead::get_block_header(self.state, height) + } + + fn get_block_hash(&self) -> StorageResult { + self.state.get_block_hash() + } + + fn get_block_epoch(&self) -> StorageResult { + self.state.get_block_epoch() + } + + fn get_pred_epochs(&self) -> StorageResult { + self.state.get_pred_epochs() + } + + fn get_tx_index(&self) -> StorageResult { + self.state.get_tx_index() + } + + fn get_native_token(&self) -> StorageResult
{ + self.state.get_native_token() } } -namada_state::impl_storage_traits!(IbcProtocolContext<'_, D, H>); -impl IbcStorageContext for IbcProtocolContext<'_, D, H> +impl StorageWrite for IbcProtocolContext<'_, S> where - D: DB + for<'iter> DBIter<'iter> + 'static, - H: StorageHasher + 'static, + S: State, +{ + fn write_bytes( + &mut self, + key: &namada_storage::Key, + val: impl AsRef<[u8]>, + ) -> StorageResult<()> { + self.state.write_bytes(key, val) + } + + fn delete(&mut self, key: &namada_storage::Key) -> StorageResult<()> { + self.state.delete(key) + } +} + +// impl StateRead for IbcProtocolContext<'_, S> +// where +// S: StateRead, +// { +// type D = ::D; +// type H = ::H; + +// fn write_log(&self) -> &namada_state::write_log::WriteLog { +// self.state.write_log() +// } + +// fn db(&self) -> &Self::D { +// self.state.db() +// } + +// fn in_mem(&self) -> &namada_state::InMemory { +// self.state.in_mem() +// } + +// fn charge_gas(&self, gas: u64) -> namada_state::Result<()> { +// self.state.charge_gas(gas) +// } +// } + +// impl State for IbcProtocolContext<'_, S> +// where +// S: State +// { +// fn write_log_mut(&mut self) -> &mut write_log::WriteLog { +// self.state.write_log_mut() +// } + +// fn split_borrow( +// &mut self, +// ) -> ( +// &mut write_log::WriteLog, +// &storage::InMemory, +// &Self::D, +// ) { +// self.state.split_borrow() +// } +// } + +// impl_storage_read!(IbcProtocolContext<'_, D, H>); +// impl_storage_write!(IbcProtocolContext<'_, D, H>); + +impl IbcStorageContext for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { + self.write_log_mut().emit_ibc_event(event); + Ok(()) + } + + fn get_ibc_events( + &self, + event_type: impl AsRef, + ) -> Result, StorageError> { + Ok(self + .write_log() + .get_ibc_events() + .iter() + .filter(|event| event.event_type == event_type.as_ref()) + .cloned() + .collect()) + } + + fn transfer_token( + &mut self, + src: &Address, + dest: &Address, + token: &Address, + amount: DenominatedAmount, + ) -> Result<(), StorageError> { + token::transfer(self, token, src, dest, amount.amount()) + } + + fn handle_masp_tx( + &mut self, + shielded: &masp_primitives::transaction::Transaction, + pin_key: Option<&str>, + ) -> Result<(), StorageError> { + namada_token::utils::handle_masp_tx(self, shielded, pin_key)?; + namada_token::utils::update_note_commitment_tree(self, shielded) + } + + fn mint_token( + &mut self, + target: &Address, + token: &Address, + amount: DenominatedAmount, + ) -> Result<(), StorageError> { + token::credit_tokens(self, token, target, amount.amount())?; + let minter_key = token::storage_key::minter_key(token); + self.write(&minter_key, Address::Internal(InternalAddress::Ibc)) + } + + fn burn_token( + &mut self, + target: &Address, + token: &Address, + amount: DenominatedAmount, + ) -> Result<(), StorageError> { + token::burn_tokens(self, token, target, amount.amount()) + } + + fn log_string(&self, message: String) { + tracing::trace!(message); + } +} + +impl IbcCommonContext for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ +} + +impl IbcStorageContext for IbcProtocolContext<'_, S> +where + S: State, { fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { - self.wl_storage.write_log.emit_ibc_event(event); + self.state.write_log_mut().emit_ibc_event(event); Ok(()) } @@ -84,8 +251,8 @@ where event_type: impl AsRef, ) -> Result, StorageError> { Ok(self - .wl_storage - .write_log + .state + .write_log() .get_ibc_events() .iter() .filter(|event| event.event_type == event_type.as_ref()) @@ -101,7 +268,7 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<(), StorageError> { - token::transfer(self, token, src, dest, amount.amount()) + token::transfer(self.state, token, src, dest, amount.amount()) } /// Handle masp tx @@ -120,9 +287,9 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<(), StorageError> { - token::credit_tokens(self.wl_storage, token, target, amount.amount())?; + token::credit_tokens(self.state, token, target, amount.amount())?; let minter_key = token::storage_key::minter_key(token); - self.wl_storage + self.state .write(&minter_key, Address::Internal(InternalAddress::Ibc)) } @@ -133,7 +300,7 @@ where token: &Address, amount: DenominatedAmount, ) -> Result<(), StorageError> { - token::burn_tokens(self.wl_storage, token, target, amount.amount()) + token::burn_tokens(self.state, token, target, amount.amount()) } fn log_string(&self, message: String) { @@ -141,16 +308,11 @@ where } } -impl IbcCommonContext for IbcProtocolContext<'_, D, H> -where - D: DB + for<'iter> DBIter<'iter> + 'static, - H: StorageHasher + 'static, -{ -} +impl IbcCommonContext for IbcProtocolContext<'_, S> where S: State {} /// Transfer tokens over IBC pub fn transfer_over_ibc( - wl_storage: &mut WlStorage, + state: &mut WlState, token: &Address, source: &Address, target: &PGFIbcTarget, @@ -169,8 +331,8 @@ where receiver: target.target.clone().into(), memo: String::default().into(), }; - let timeout_timestamp = DateTimeUtc::now() - + read_epoch_duration_parameter(wl_storage)?.min_duration; + let timeout_timestamp = + DateTimeUtc::now() + read_epoch_duration_parameter(state)?.min_duration; let timeout_timestamp = TmTime::try_from(timeout_timestamp).into_storage_result()?; let ibc_message = MsgTransfer { @@ -184,7 +346,7 @@ where let mut data = vec![]; prost::Message::encode(&any_msg, &mut data).into_storage_result()?; - let ctx = IbcProtocolContext { wl_storage }; + let ctx = IbcProtocolContext { state }; let mut actions = IbcActions::new(Rc::new(RefCell::new(ctx))); actions.execute(&data).into_storage_result() } diff --git a/crates/ibc/src/context/token_transfer.rs b/crates/ibc/src/context/token_transfer.rs index caaa945015..29f4fd7bc1 100644 --- a/crates/ibc/src/context/token_transfer.rs +++ b/crates/ibc/src/context/token_transfer.rs @@ -14,7 +14,7 @@ use namada_core::ibc::core::handler::types::error::ContextError; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; use namada_core::token; use namada_core::uint::Uint; -use namada_trans_token::read_denom; +use namada_token::read_denom; use super::common::IbcCommonContext; use crate::storage; diff --git a/crates/namada/src/ledger/governance/mod.rs b/crates/namada/src/ledger/governance/mod.rs index c694f035d4..043558fd4c 100644 --- a/crates/namada/src/ledger/governance/mod.rs +++ b/crates/namada/src/ledger/governance/mod.rs @@ -13,7 +13,7 @@ use namada_governance::utils::is_valid_validator_voting_period; use namada_governance::ProposalVote; use namada_proof_of_stake::is_validator; use namada_proof_of_stake::queries::find_delegations; -use namada_state::StorageRead; +use namada_state::{StateRead, StorageRead}; use namada_tx::Tx; use namada_vp_env::VpEnv; use thiserror::Error; @@ -47,20 +47,18 @@ pub enum Error { } /// Governance VP -pub struct GovernanceVp<'a, DB, H, CA> +pub struct GovernanceVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for GovernanceVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for GovernanceVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -134,10 +132,9 @@ where } } -impl<'a, DB, H, CA> GovernanceVp<'a, DB, H, CA> +impl<'a, S, CA> GovernanceVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn is_valid_init_proposal_key_set( @@ -711,8 +708,7 @@ where delegation_address: &Address, ) -> Result where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { if !address.eq(delegation_address) { diff --git a/crates/namada/src/ledger/ibc/mod.rs b/crates/namada/src/ledger/ibc/mod.rs index 45f987e7a8..d8f89952fb 100644 --- a/crates/namada/src/ledger/ibc/mod.rs +++ b/crates/namada/src/ledger/ibc/mod.rs @@ -4,13 +4,12 @@ pub use namada_ibc::storage; use namada_ibc::storage::{ channel_counter_key, client_counter_key, connection_counter_key, }; -use namada_state::{StorageHasher, StorageWrite, WlStorage}; +use namada_state::State; /// Initialize storage in the genesis block. -pub fn init_genesis_storage(storage: &mut WlStorage) +pub fn init_genesis_storage(storage: &mut S) where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: State, { // In ibc-go, u64 like a counter is encoded with big-endian: // https://github.com/cosmos/ibc-go/blob/89ffaafb5956a5ea606e1f1bf249c880bea802ed/modules/core/04-channel/keeper/keeper.go#L115 diff --git a/crates/namada/src/ledger/mod.rs b/crates/namada/src/ledger/mod.rs index f73b730945..f68a0d2e39 100644 --- a/crates/namada/src/ledger/mod.rs +++ b/crates/namada/src/ledger/mod.rs @@ -21,6 +21,8 @@ pub use { #[cfg(feature = "wasm-runtime")] mod dry_run_tx { + use std::cell::RefCell; + use namada_sdk::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; use namada_state::{DBIter, ResultExt, StorageHasher, DB}; use namada_tx::data::GasLimit; @@ -30,8 +32,8 @@ mod dry_run_tx { use crate::vm::WasmCacheAccess; /// Dry run a transaction - pub fn dry_run_tx( - mut ctx: RequestCtx<'_, D, H, VpCache, TxCache>, + pub fn dry_run_tx<'a, D, H, CA>( + mut ctx: RequestCtx<'a, D, H, VpCache, TxCache>, request: &RequestQuery, ) -> namada_state::StorageResult where @@ -41,32 +43,31 @@ mod dry_run_tx { { use borsh_ext::BorshSerializeExt; use namada_gas::{Gas, GasMetering, TxGasMeter}; - use namada_state::TempWlStorage; use namada_tx::data::{DecryptedTx, TxType}; use namada_tx::Tx; use crate::ledger::protocol::ShellParams; use crate::storage::TxIndex; + let mut temp_state = ctx.state.with_temp_write_log(); let mut tx = Tx::try_from(&request.data[..]).into_storage_result()?; tx.validate_tx().into_storage_result()?; - let mut temp_wl_storage = TempWlStorage::new(&ctx.wl_storage.storage); let mut cumulated_gas = Gas::default(); // Wrapper dry run to allow estimating the gas cost of a transaction - let mut tx_gas_meter = match tx.header().tx_type { + let tx_gas_meter = match tx.header().tx_type { TxType::Wrapper(wrapper) => { - let mut tx_gas_meter = - TxGasMeter::new(wrapper.gas_limit.to_owned()); + let tx_gas_meter = + RefCell::new(TxGasMeter::new(wrapper.gas_limit.to_owned())); protocol::apply_wrapper_tx( tx.clone(), &wrapper, None, &request.data, ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, + &tx_gas_meter, + &mut temp_state, &mut ctx.vp_wasm_cache, &mut ctx.tx_wasm_cache, ), @@ -74,53 +75,53 @@ mod dry_run_tx { ) .into_storage_result()?; - temp_wl_storage.write_log.commit_tx(); - cumulated_gas = tx_gas_meter.get_tx_consumed_gas(); + temp_state.write_log_mut().commit_tx(); + cumulated_gas = tx_gas_meter.borrow_mut().get_tx_consumed_gas(); tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - TxGasMeter::new_from_sub_limit(tx_gas_meter.get_available_gas()) + let available_gas = tx_gas_meter.borrow().get_available_gas(); + TxGasMeter::new_from_sub_limit(available_gas) } TxType::Protocol(_) | TxType::Decrypted(_) => { - // If dry run only the inner tx, use the max block gas as the - // gas limit + // If dry run only the inner tx, use the max block gas as + // the gas limit TxGasMeter::new(GasLimit::from( - namada_parameters::get_max_block_gas(ctx.wl_storage) - .unwrap(), + namada_parameters::get_max_block_gas(ctx.state).unwrap(), )) } TxType::Raw => { // Cast tx to a decrypted for execution tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); - // If dry run only the inner tx, use the max block gas as the - // gas limit + // If dry run only the inner tx, use the max block gas as + // the gas limit TxGasMeter::new(GasLimit::from( - namada_parameters::get_max_block_gas(ctx.wl_storage) - .unwrap(), + namada_parameters::get_max_block_gas(ctx.state).unwrap(), )) } }; + let tx_gas_meter = RefCell::new(tx_gas_meter); let mut data = protocol::apply_wasm_tx( tx, &TxIndex(0), ShellParams::new( - &mut tx_gas_meter, - &mut temp_wl_storage, + &tx_gas_meter, + &mut temp_state, &mut ctx.vp_wasm_cache, &mut ctx.tx_wasm_cache, ), ) .into_storage_result()?; cumulated_gas = cumulated_gas - .checked_add(tx_gas_meter.get_tx_consumed_gas()) + .checked_add(tx_gas_meter.borrow().get_tx_consumed_gas()) .ok_or(namada_state::StorageError::SimpleMessage( "Overflow in gas", ))?; // Account gas for both inner and wrapper (if available) data.gas_used = cumulated_gas; - // NOTE: the keys changed by the wrapper transaction (if any) are not - // returned from this function + // NOTE: the keys changed by the wrapper transaction (if any) are + // not returned from this function let data = data.serialize_to_vec(); Ok(EncodedResponseQuery { data, @@ -141,7 +142,7 @@ mod test { EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada_sdk::tendermint_rpc::{self, Error as RpcError, Response}; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_test_utils::TestWasms; use namada_tx::data::decrypted::DecryptedTx; @@ -162,8 +163,8 @@ mod test { { /// RPC router pub rpc: RPC, - /// storage - pub wl_storage: TestWlStorage, + /// state + pub state: TestState, /// event log pub event_log: EventLog, /// VP wasm compilation cache @@ -184,17 +185,14 @@ mod test { /// Initialize a test client for the given root RPC router pub fn new(rpc: RPC) -> Self { // Initialize the `TestClient` - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); // Initialize mock gas limit let max_block_gas_key = namada_parameters::storage::get_max_block_gas_key(); - wl_storage - .storage - .write(&max_block_gas_key, namada_core::encode(&20_000_000_u64)) - .expect( - "Max block gas parameter must be initialized in storage", - ); + state.write(&max_block_gas_key, 20_000_000_u64).expect( + "Max block gas parameter must be initialized in storage", + ); let event_log = EventLog::default(); let (vp_wasm_cache, vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -202,7 +200,7 @@ mod test { wasm::compilation_cache::common::testing::cache(); Self { rpc, - wl_storage, + state, event_log, vp_wasm_cache: vp_wasm_cache.read_only(), tx_wasm_cache: tx_wasm_cache.read_only(), @@ -238,7 +236,7 @@ mod test { prove, }; let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: &self.state, event_log: &self.event_log, vp_wasm_cache: self.vp_wasm_cache.clone(), tx_wasm_cache: self.tx_wasm_cache.clone(), @@ -274,22 +272,21 @@ mod test { let tx_hash = Hash::sha256(&tx_no_op); let key = Key::wasm_code(&tx_hash); let len_key = Key::wasm_code_len(&tx_hash); - client.wl_storage.storage.write(&key, &tx_no_op).unwrap(); + client.state.db_write(&key, &tx_no_op).unwrap(); client - .wl_storage - .storage - .write(&len_key, (tx_no_op.len() as u64).serialize_to_vec()) + .state + .db_write(&len_key, (tx_no_op.len() as u64).serialize_to_vec()) .unwrap(); // Request last committed epoch let read_epoch = RPC.shell().epoch(&client).await.unwrap(); - let current_epoch = client.wl_storage.storage.last_epoch; + let current_epoch = client.state.in_mem().last_epoch; assert_eq!(current_epoch, read_epoch); // Request dry run tx let mut outer_tx = Tx::from_type(TxType::Decrypted(DecryptedTx::Decrypted)); - outer_tx.header.chain_id = client.wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = client.state.in_mem().chain_id.clone(); outer_tx.set_code(Code::from_hash(tx_hash, None)); outer_tx.set_data(Data::new(vec![])); let tx_bytes = outer_tx.to_bytes(); @@ -331,10 +328,10 @@ mod test { // Then write some balance ... let balance = token::Amount::native_whole(1000); - StorageWrite::write(&mut client.wl_storage, &balance_key, balance)?; + StorageWrite::write(&mut client.state, &balance_key, balance)?; // It has to be committed to be visible in a query - client.wl_storage.commit_tx(); - client.wl_storage.commit_block().unwrap(); + client.state.commit_tx(); + client.state.commit_block().unwrap(); // ... there should be the same value now let read_balance = RPC .shell() diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs index 60e846fb44..b982abf143 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/bridge_pool_vp.rs @@ -13,6 +13,7 @@ use std::borrow::Cow; use std::collections::BTreeSet; +use std::fmt::Debug; use std::marker::PhantomData; use borsh::BorshDeserialize; @@ -25,7 +26,7 @@ use namada_ethereum_bridge::storage::bridge_pool::{ use namada_ethereum_bridge::storage::parameters::read_native_erc20_address; use namada_ethereum_bridge::storage::whitelist; use namada_ethereum_bridge::ADDRESS as BRIDGE_ADDRESS; -use namada_state::{DBIter, StorageHasher, DB}; +use namada_state::StateRead; use namada_tx::Tx; use crate::address::{Address, InternalAddress}; @@ -70,20 +71,18 @@ impl AmountDelta { } /// Validity predicate for the Ethereum bridge -pub struct BridgePoolVp<'ctx, D, H, CA> +pub struct BridgePoolVp<'ctx, S, CA> where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'ctx, D, H, CA>, + pub ctx: Ctx<'ctx, S, CA>, } -impl<'a, D, H, CA> BridgePoolVp<'a, D, H, CA> +impl<'a, S, CA> BridgePoolVp<'a, S, CA> where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Get the change in the balance of an account @@ -334,7 +333,7 @@ where let same_sender_and_fee_payer = transfer.gas_fee.payer == transfer.transfer.sender; let gas_is_native_asset = - transfer.gas_fee.token == self.ctx.storage.native_token; + transfer.gas_fee.token == self.ctx.state.in_mem().native_token; let gas_and_token_is_native_asset = gas_is_native_asset && tok_is_native_asset; let same_token_and_gas_asset = @@ -366,7 +365,7 @@ where { // when minting wrapped NAM on Ethereum, escrow to the Ethereum // bridge address, and draw from NAM token accounts - let token = Cow::Borrowed(&self.ctx.storage.native_token); + let token = Cow::Borrowed(&self.ctx.state.in_mem().native_token); let escrow_account = &BRIDGE_ADDRESS; (token, escrow_account) } else { @@ -518,10 +517,9 @@ fn sum_gas_and_token_amounts( }) } -impl<'a, D, H, CA> NativeVp for BridgePoolVp<'a, D, H, CA> +impl<'a, S, CA> NativeVp for BridgePoolVp<'a, S, CA> where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -639,29 +637,29 @@ where #[cfg(test)] mod test_bridge_pool_vp { + use std::cell::RefCell; use std::env::temp_dir; use borsh::BorshDeserialize; use namada_core::borsh::BorshSerializeExt; + use namada_core::validity_predicate::VpSentinel; use namada_ethereum_bridge::storage::bridge_pool::get_signed_root_key; use namada_ethereum_bridge::storage::parameters::{ Contracts, EthereumBridgeParams, UpgradeableContract, }; use namada_ethereum_bridge::storage::wrapped_erc20s; use namada_gas::TxGasMeter; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_tx::data::TxType; use super::*; use crate::address::testing::{nam, wnam}; use crate::address::InternalAddress; - use crate::chain::ChainId; use crate::eth_bridge_pool::{GasFee, TransferToEthereum}; use crate::hash::Hash; use crate::ledger::gas::VpGasMeter; - use crate::state::mockdb::MockDB; use crate::state::write_log::WriteLog; - use crate::state::{Sha256Hasher, State, WlStorage}; use crate::storage::TxIndex; use crate::vm::wasm::VpCache; use crate::vm::WasmCacheRwAccess; @@ -751,16 +749,16 @@ mod test_bridge_pool_vp { } } - /// Create a writelog representing storage before a transfer is added to the - /// pool. - fn new_writelog() -> WriteLog { - let mut writelog = WriteLog::default(); + /// Create a write-log representing storage before a transfer is added to + /// the pool. + fn new_write_log(write_log: &mut WriteLog) { + *write_log = WriteLog::default(); // setup the initial bridge pool storage - writelog + write_log .write(&get_signed_root_key(), Hash([0; 32]).serialize_to_vec()) .expect("Test failed"); let transfer = initial_pool(); - writelog + write_log .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .expect("Test failed"); // whitelist wnam @@ -769,7 +767,7 @@ mod test_bridge_pool_vp { suffix: whitelist::KeyType::Whitelisted, } .into(); - writelog + write_log .write(&key, true.serialize_to_vec()) .expect("Test failed"); let key = whitelist::Key { @@ -777,45 +775,44 @@ mod test_bridge_pool_vp { suffix: whitelist::KeyType::Cap, } .into(); - writelog + write_log .write(&key, Amount::max().serialize_to_vec()) .expect("Test failed"); // set up users with ERC20 and NUT balances update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Erc20, bertha_address()), SignedAmount::Positive(BERTHA_WEALTH.into()), SignedAmount::Positive(BERTHA_TOKENS.into()), ); update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Nut, daewon_address()), SignedAmount::Positive(DAEWONS_GAS.into()), SignedAmount::Positive(DAES_NUTS.into()), ); // set up the initial balances of the bridge pool update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Erc20, BRIDGE_POOL_ADDRESS), SignedAmount::Positive(ESCROWED_AMOUNT.into()), SignedAmount::Positive(ESCROWED_TOKENS.into()), ); update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Nut, BRIDGE_POOL_ADDRESS), SignedAmount::Positive(ESCROWED_AMOUNT.into()), SignedAmount::Positive(ESCROWED_NUTS.into()), ); // set up the initial balances of the ethereum bridge account update_balances( - &mut writelog, + write_log, Balance::new(TransferToEthereumKind::Erc20, BRIDGE_ADDRESS), SignedAmount::Positive(ESCROWED_AMOUNT.into()), // we only care about escrowing NAM SignedAmount::Positive(0.into()), ); - writelog.commit_tx(); - writelog + write_log.commit_tx(); } /// Update gas and token balances of an address and @@ -899,7 +896,7 @@ mod test_bridge_pool_vp { } /// Initialize some dummy storage for testing - fn setup_storage() -> WlStorage { + fn setup_storage() -> TestState { // a dummy config for testing let config = EthereumBridgeParams { erc20_whitelist: vec![], @@ -913,41 +910,30 @@ mod test_bridge_pool_vp { }, }, }; - let mut wl_storage = WlStorage { - storage: State::::open( - std::path::Path::new(""), - ChainId::default(), - nam(), - None, - None, - namada_sdk::state::merklize_all_keys, - ), - write_log: Default::default(), - }; - config.init_storage(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - wl_storage.write_log = new_writelog(); - wl_storage.commit_block().expect("Test failed"); - wl_storage + let mut state = TestState::default(); + config.init_storage(&mut state); + state.commit_block().expect("Test failed"); + new_write_log(state.write_log_mut()); + state.commit_block().expect("Test failed"); + state } /// Setup a ctx for running native vps fn setup_ctx<'a>( tx: &'a Tx, - storage: &'a State, - write_log: &'a WriteLog, + state: &'a TestState, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, - ) -> Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess> { + ) -> Ctx<'a, TestState, WasmCacheRwAccess> { Ctx::new( &BRIDGE_POOL_ADDRESS, - storage, - write_log, + state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + gas_meter, + sentinel, keys_changed, verifiers, VpCache::new(temp_dir(), 100usize), @@ -973,7 +959,7 @@ mod test_bridge_pool_vp { F: FnOnce(&mut PendingTransfer, &mut WriteLog) -> BTreeSet, { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -993,11 +979,11 @@ mod test_bridge_pool_vp { }; // add transfer to pool let mut keys_changed = - insert_transfer(&mut transfer, &mut wl_storage.write_log); + insert_transfer(&mut transfer, state.write_log_mut()); // change Bertha's balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: transfer.transfer.asset, kind: TransferToEthereumKind::Erc20, @@ -1012,7 +998,7 @@ mod test_bridge_pool_vp { // change the bridge pool balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: transfer.transfer.asset, kind: TransferToEthereumKind::Erc20, @@ -1026,17 +1012,22 @@ mod test_bridge_pool_vp { keys_changed.append(&mut new_keys_changed); let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp.validate_tx(&tx, &keys_changed, &verifiers); @@ -1322,7 +1313,7 @@ mod test_bridge_pool_vp { #[test] fn test_adding_transfer_twice_fails() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1330,8 +1321,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1339,7 +1330,7 @@ mod test_bridge_pool_vp { // update Bertha's balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: ASSET, kind: TransferToEthereumKind::Erc20, @@ -1354,7 +1345,7 @@ mod test_bridge_pool_vp { // update the bridge pool balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { asset: ASSET, kind: TransferToEthereumKind::Erc20, @@ -1369,17 +1360,22 @@ mod test_bridge_pool_vp { let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp.validate_tx(&tx, &keys_changed, &verifiers); @@ -1391,7 +1387,7 @@ mod test_bridge_pool_vp { #[test] fn test_zero_gas_fees_rejected() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1412,8 +1408,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1430,17 +1426,22 @@ mod test_bridge_pool_vp { let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1454,7 +1455,7 @@ mod test_bridge_pool_vp { #[test] fn test_minting_wnam() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); let tx = Tx::from_type(TxType::Raw); @@ -1477,8 +1478,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1486,8 +1487,8 @@ mod test_bridge_pool_vp { // We escrow 100 Nam into the bridge pool VP // and 100 Nam in the Eth bridge VP let account_key = balance_key(&nam(), &bertha_address()); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), @@ -1495,16 +1496,16 @@ mod test_bridge_pool_vp { .expect("Test failed"); assert!(keys_changed.insert(account_key)); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &bp_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); assert!(keys_changed.insert(bp_account_key)); - wl_storage - .write_log + state + .write_log_mut() .write( &eb_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), @@ -1514,17 +1515,22 @@ mod test_bridge_pool_vp { let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1539,7 +1545,7 @@ mod test_bridge_pool_vp { #[test] fn test_reject_mint_wnam() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); @@ -1562,8 +1568,8 @@ mod test_bridge_pool_vp { // add transfer to pool let keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1571,39 +1577,44 @@ mod test_bridge_pool_vp { // We escrow 100 Nam into the bridge pool VP // and 100 Nam in the Eth bridge VP let account_key = balance_key(&nam(), &bertha_address()); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(BERTHA_WEALTH - 200).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &bp_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); - wl_storage - .write_log + state + .write_log_mut() .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1618,20 +1629,20 @@ mod test_bridge_pool_vp { #[test] fn test_mint_wnam_separate_gas_payer() { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // initialize the eth bridge balance to 0 let eb_account_key = balance_key(&nam(), &Address::Internal(InternalAddress::EthBridge)); - wl_storage + state .write(&eb_account_key, Amount::default()) .expect("Test failed"); // initialize the gas payers account let gas_payer_balance_key = balance_key(&nam(), &established_address_1()); - wl_storage + state .write(&gas_payer_balance_key, Amount::from(BERTHA_WEALTH)) .expect("Test failed"); - wl_storage.write_log.commit_tx(); + state.write_log_mut().commit_tx(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1652,8 +1663,8 @@ mod test_bridge_pool_vp { // add transfer to pool let keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1661,45 +1672,50 @@ mod test_bridge_pool_vp { // We escrow 100 Nam into the bridge pool VP // and 100 Nam in the Eth bridge VP let account_key = balance_key(&nam(), &bertha_address()); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); - wl_storage - .write_log + state + .write_log_mut() .write( &gas_payer_balance_key, Amount::from(BERTHA_WEALTH - 100).serialize_to_vec(), ) .expect("Test failed"); let bp_account_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &bp_account_key, Amount::from(ESCROWED_AMOUNT + 100).serialize_to_vec(), ) .expect("Test failed"); - wl_storage - .write_log + state + .write_log_mut() .write(&eb_account_key, Amount::from(10).serialize_to_vec()) .expect("Test failed"); let verifiers = BTreeSet::default(); // create the data to be given to the vp + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), }; - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_data(transfer); let res = vp @@ -1711,7 +1727,7 @@ mod test_bridge_pool_vp { /// Auxiliary function to test NUT functionality. fn test_nut_aux(kind: TransferToEthereumKind, expect: Expect) { // setup - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); let tx = Tx::from_type(TxType::Raw); // the transfer to be added to the pool @@ -1732,8 +1748,8 @@ mod test_bridge_pool_vp { // add transfer to pool let mut keys_changed = { - wl_storage - .write_log + state + .write_log_mut() .write(&get_pending_key(&transfer), transfer.serialize_to_vec()) .unwrap(); BTreeSet::from([get_pending_key(&transfer)]) @@ -1741,7 +1757,7 @@ mod test_bridge_pool_vp { // update Daewon's balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { kind, asset: ASSET, @@ -1756,7 +1772,7 @@ mod test_bridge_pool_vp { // change the bridge pool balances let mut new_keys_changed = update_balances( - &mut wl_storage.write_log, + state.write_log_mut(), Balance { kind, asset: ASSET, @@ -1771,11 +1787,16 @@ mod test_bridge_pool_vp { // create the data to be given to the vp let verifiers = BTreeSet::default(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = BridgePoolVp { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs index 545d549bf8..acabe77b2d 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/nut.rs @@ -5,7 +5,7 @@ use std::collections::BTreeSet; use eyre::WrapErr; use namada_core::address::{Address, InternalAddress}; use namada_core::storage::Key; -use namada_state::StorageHasher; +use namada_state::StateRead; use namada_tx::Tx; use namada_vp_env::VpEnv; @@ -23,20 +23,18 @@ pub struct Error(#[from] eyre::Report); /// /// All this VP does is reject NUT transfers whose destination /// address is not the Bridge pool escrow address. -pub struct NonUsableTokens<'ctx, DB, H, CA> +pub struct NonUsableTokens<'ctx, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'ctx, DB, H, CA>, + pub ctx: Ctx<'ctx, S, CA>, } -impl<'a, DB, H, CA> NativeVp for NonUsableTokens<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for NonUsableTokens<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -118,6 +116,7 @@ where #[cfg(test)] mod test_nuts { + use std::cell::RefCell; use std::env::temp_dir; use assert_matches::assert_matches; @@ -125,8 +124,9 @@ mod test_nuts { use namada_core::borsh::BorshSerializeExt; use namada_core::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use namada_core::storage::TxIndex; + use namada_core::validity_predicate::VpSentinel; use namada_ethereum_bridge::storage::wrapped_erc20s; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_tx::data::TxType; use proptest::prelude::*; @@ -143,31 +143,35 @@ mod test_nuts { let src_balance_key = balance_key(&nut, &src); let dst_balance_key = balance_key(&nut, &dst); - let wl_storage = { - let mut wl = TestWlStorage::default(); + let state = { + let mut state = TestState::default(); // write initial balances - wl.write(&src_balance_key, Amount::from(200_u64)) + state + .write(&src_balance_key, Amount::from(200_u64)) .expect("Test failed"); - wl.write(&dst_balance_key, Amount::from(100_u64)) + state + .write(&dst_balance_key, Amount::from(100_u64)) .expect("Test failed"); - wl.commit_block().expect("Test failed"); + state.commit_block().expect("Test failed"); // write the updated balances - wl.write_log + state + .write_log_mut() .write( &src_balance_key, Amount::from(100_u64).serialize_to_vec(), ) .expect("Test failed"); - wl.write_log + state + .write_log_mut() .write( &dst_balance_key, Amount::from(200_u64).serialize_to_vec(), ) .expect("Test failed"); - wl + state }; let keys_changed = { @@ -183,15 +187,17 @@ mod test_nuts { }; let tx = Tx::from_type(TxType::Raw); - let ctx = Ctx::<_, _, WasmCacheRwAccess>::new( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); + let ctx = Ctx::<_, WasmCacheRwAccess>::new( &Address::Internal(InternalAddress::Nut(DAI_ERC20_ETH_ADDRESS)), - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, VpCache::new(temp_dir(), 100usize), diff --git a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs index 3f72e60d5c..68f75f89e2 100644 --- a/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs +++ b/crates/namada/src/ledger/native_vp/ethereum_bridge/vp.rs @@ -3,7 +3,6 @@ use std::collections::{BTreeSet, HashSet}; use eyre::{eyre, Result}; use namada_core::address::Address; -use namada_core::hash::StorageHasher; use namada_core::storage::Key; use namada_ethereum_bridge; use namada_ethereum_bridge::storage; @@ -11,6 +10,7 @@ use namada_ethereum_bridge::storage::escrow_key; use namada_tx::Tx; use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; +use crate::state::StateRead; use crate::token::storage_key::{balance_key, is_balance_key}; use crate::token::Amount; use crate::vm::WasmCacheAccess; @@ -21,20 +21,18 @@ use crate::vm::WasmCacheAccess; pub struct Error(#[from] eyre::Error); /// Validity predicate for the Ethereum bridge -pub struct EthBridge<'ctx, DB, H, CA> +pub struct EthBridge<'ctx, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'ctx, DB, H, CA>, + pub ctx: Ctx<'ctx, S, CA>, } -impl<'ctx, DB, H, CA> EthBridge<'ctx, DB, H, CA> +impl<'ctx, S, CA> EthBridge<'ctx, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// If the Ethereum bridge's escrow key was written to, we check @@ -45,7 +43,7 @@ where verifiers: &BTreeSet
, ) -> Result { let escrow_key = balance_key( - &self.ctx.storage.native_token, + &self.ctx.state.in_mem().native_token, &crate::ethereum_bridge::ADDRESS, ); @@ -85,10 +83,9 @@ where } } -impl<'a, DB, H, CA> NativeVp for EthBridge<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for EthBridge<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -115,8 +112,10 @@ where "Ethereum Bridge VP triggered", ); - if !validate_changed_keys(&self.ctx.storage.native_token, keys_changed)? - { + if !validate_changed_keys( + &self.ctx.state.in_mem().native_token, + keys_changed, + )? { return Ok(false); } @@ -165,11 +164,14 @@ fn validate_changed_keys( #[cfg(test)] mod tests { + use std::cell::RefCell; use std::default::Default; use std::env::temp_dir; use namada_core::borsh::BorshSerializeExt; + use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; + use namada_state::testing::TestState; use namada_state::StorageWrite; use namada_tx::data::TxType; use namada_tx::Tx; @@ -185,9 +187,6 @@ mod tests { use crate::ethereum_events; use crate::ethereum_events::EthAddress; use crate::ledger::gas::VpGasMeter; - use crate::state::mockdb::MockDB; - use crate::state::write_log::WriteLog; - use crate::state::{Sha256Hasher, State, WlStorage}; use crate::storage::TxIndex; use crate::token::storage_key::minted_balance_key; use crate::vm::wasm::VpCache; @@ -209,15 +208,15 @@ mod tests { } /// Initialize some dummy storage for testing - fn setup_storage() -> WlStorage { - let mut wl_storage = WlStorage::::default(); + fn setup_storage() -> TestState { + let mut state = TestState::default(); // setup a user with a balance let balance_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage + state .write( &balance_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE), @@ -237,28 +236,27 @@ mod tests { }, }, }; - config.init_storage(&mut wl_storage); - wl_storage.commit_block().expect("Test failed"); - wl_storage + config.init_storage(&mut state); + state.commit_block().expect("Test failed"); + state } /// Setup a ctx for running native vps fn setup_ctx<'a>( tx: &'a Tx, - storage: &'a State, - write_log: &'a WriteLog, + state: &'a TestState, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, - ) -> Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess> { + ) -> Ctx<'a, TestState, WasmCacheRwAccess> { Ctx::new( &crate::ethereum_bridge::ADDRESS, - storage, - write_log, + state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - u64::MAX.into(), - )), + gas_meter, + sentinel, keys_changed, verifiers, VpCache::new(temp_dir(), 100usize), @@ -353,14 +351,14 @@ mod tests { /// Test that escrowing Nam is accepted. #[test] fn test_escrow_nam_accepted() { - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // debit the user's balance let account_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) @@ -370,8 +368,8 @@ mod tests { // credit the balance to the escrow let escrow_key = balance_key(&nam(), &crate::ethereum_bridge::ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &escrow_key, Amount::from( @@ -386,11 +384,16 @@ mod tests { // set up the VP let tx = Tx::from_type(TxType::Raw); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), @@ -403,14 +406,14 @@ mod tests { /// Test that escrowing must increase the balance #[test] fn test_escrowed_nam_must_increase() { - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // debit the user's balance let account_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) @@ -420,8 +423,8 @@ mod tests { // do not credit the balance to the escrow let escrow_key = balance_key(&nam(), &crate::ethereum_bridge::ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &escrow_key, Amount::from(BRIDGE_POOL_ESCROW_INITIAL_BALANCE) @@ -434,11 +437,16 @@ mod tests { // set up the VP let tx = Tx::from_type(TxType::Raw); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), @@ -452,14 +460,14 @@ mod tests { /// be triggered if escrowing occurs. #[test] fn test_escrowing_must_trigger_bridge_pool_vp() { - let mut wl_storage = setup_storage(); + let mut state = setup_storage(); // debit the user's balance let account_key = balance_key( &nam(), &Address::decode(ARBITRARY_OWNER_A_ADDRESS).expect("Test failed"), ); - wl_storage - .write_log + state + .write_log_mut() .write( &account_key, Amount::from(ARBITRARY_OWNER_A_INITIAL_BALANCE - ESCROW_AMOUNT) @@ -469,8 +477,8 @@ mod tests { // credit the balance to the escrow let escrow_key = balance_key(&nam(), &crate::ethereum_bridge::ADDRESS); - wl_storage - .write_log + state + .write_log_mut() .write( &escrow_key, Amount::from( @@ -485,11 +493,16 @@ mod tests { // set up the VP let tx = Tx::from_type(TxType::Raw); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(u64::MAX.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp = EthBridge { ctx: setup_ctx( &tx, - &wl_storage.storage, - &wl_storage.write_log, + &state, + &gas_meter, + &sentinel, &keys_changed, &verifiers, ), diff --git a/crates/namada/src/ledger/native_vp/ibc/context.rs b/crates/namada/src/ledger/native_vp/ibc/context.rs index 3a57ffc126..a4efe4e853 100644 --- a/crates/namada/src/ledger/native_vp/ibc/context.rs +++ b/crates/namada/src/ledger/native_vp/ibc/context.rs @@ -5,14 +5,14 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use borsh_ext::BorshSerializeExt; use namada_core::storage::Epochs; use namada_ibc::{IbcCommonContext, IbcStorageContext}; -use namada_state::{StorageError, StorageRead, StorageWrite}; +use namada_state::{StateRead, StorageError, StorageRead, StorageWrite}; use crate::address::{Address, InternalAddress}; use crate::ibc::IbcEvent; use crate::ledger::ibc::storage::is_ibc_key; use crate::ledger::native_vp::CtxPreStorageRead; use crate::state::write_log::StorageModification; -use crate::state::{self as ledger_storage, ResultExt, StorageHasher}; +use crate::state::{PrefixIter, ResultExt}; use crate::storage::{BlockHash, BlockHeight, Epoch, Header, Key, TxIndex}; use crate::token::{self as token, Amount, DenominatedAmount}; use crate::vm::WasmCacheAccess; @@ -22,28 +22,26 @@ pub type Result = std::result::Result; /// Pseudo execution environment context for ibc native vp #[derive(Debug)] -pub struct PseudoExecutionContext<'view, 'a, DB, H, CA> +pub struct PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Temporary store for pseudo execution store: HashMap, /// Context to read the previous value - ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>, + ctx: CtxPreStorageRead<'view, 'a, S, CA>, /// IBC event pub event: BTreeSet, } -impl<'view, 'a, DB, H, CA> PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Generate new pseudo execution context - pub fn new(ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>) -> Self { + pub fn new(ctx: CtxPreStorageRead<'view, 'a, S, CA>) -> Self { Self { store: HashMap::new(), ctx, @@ -65,14 +63,12 @@ where } } -impl<'view, 'a, DB, H, CA> StorageRead - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> StorageRead for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = ledger_storage::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = PrefixIter<'iter, ::D> where Self: 'iter; fn read_bytes(&self, key: &Key) -> Result>> { match self.store.get(key) { @@ -145,11 +141,9 @@ where } } -impl<'view, 'a, DB, H, CA> StorageWrite - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> StorageWrite for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn write_bytes( @@ -172,11 +166,10 @@ where } } -impl<'view, 'a, DB, H, CA> IbcStorageContext - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcStorageContext + for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<()> { @@ -281,47 +274,42 @@ where } } -impl<'view, 'a, DB, H, CA> IbcCommonContext - for PseudoExecutionContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcCommonContext + for PseudoExecutionContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { } /// Ibc native vp validation context #[derive(Debug)] -pub struct VpValidationContext<'view, 'a, DB, H, CA> +pub struct VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to read the post value - ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>, + ctx: CtxPreStorageRead<'view, 'a, S, CA>, } -impl<'view, 'a, DB, H, CA> VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Generate a new ibc vp validation context - pub fn new(ctx: CtxPreStorageRead<'view, 'a, DB, H, CA>) -> Self { + pub fn new(ctx: CtxPreStorageRead<'view, 'a, S, CA>) -> Self { Self { ctx } } } -impl<'view, 'a, DB, H, CA> StorageRead - for VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> StorageRead for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = ledger_storage::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = PrefixIter<'iter, ::D> where Self: 'iter; fn read_bytes(&self, key: &Key) -> Result>> { self.ctx.read_bytes(key) @@ -378,11 +366,10 @@ where } } -impl<'view, 'a, DB, H, CA> StorageWrite - for VpValidationContext<'view, 'a, DB, H, CA> +// TODO: This shouldn't be impl'd if possible +impl<'view, 'a, S, CA> StorageWrite for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn write_bytes( @@ -398,11 +385,10 @@ where } } -impl<'view, 'a, DB, H, CA> IbcStorageContext - for VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcStorageContext + for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn emit_ibc_event(&mut self, _event: IbcEvent) -> Result<()> { @@ -458,11 +444,10 @@ where } } -impl<'view, 'a, DB, H, CA> IbcCommonContext - for VpValidationContext<'view, 'a, DB, H, CA> +impl<'view, 'a, S, CA> IbcCommonContext + for VpValidationContext<'view, 'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { } diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index bf54ed1088..76c3e04e05 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -16,7 +16,7 @@ use namada_ibc::{ }; use namada_proof_of_stake::storage::read_pos_params; use namada_state::write_log::StorageModification; -use namada_state::StorageHasher; +use namada_state::StateRead; use namada_tx::Tx; use namada_vp_env::VpEnv; use thiserror::Error; @@ -48,20 +48,18 @@ pub enum Error { pub type VpResult = std::result::Result; /// IBC VP -pub struct Ibc<'a, DB, H, CA> +pub struct Ibc<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for Ibc<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for Ibc<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -88,10 +86,9 @@ where } } -impl<'a, DB, H, CA> Ibc<'a, DB, H, CA> +impl<'a, S, CA> Ibc<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { fn validate_state( @@ -130,7 +127,7 @@ where } // check the event - let actual = self.ctx.write_log.get_ibc_events(); + let actual = self.ctx.state.write_log().get_ibc_events(); if *actual != ctx.borrow().event { return Err(Error::IbcEvent(format!( "The IBC event is invalid: Actual {:?}, Expected {:?}", @@ -162,7 +159,8 @@ where pub fn validation_params(&self) -> VpResult { use std::str::FromStr; let chain_id = self.ctx.get_chain_id().map_err(Error::NativeVpError)?; - let proof_specs = namada_state::ics23_specs::ibc_proof_specs::(); + let proof_specs = + namada_state::ics23_specs::ibc_proof_specs::<::H>(); let pos_params = read_pos_params(&self.ctx.post()).map_err(Error::NativeVpError)?; let pipeline_len = pos_params.pipeline_len; @@ -317,9 +315,10 @@ mod tests { }; use ibc_testkit::testapp::ibc::clients::mock::consensus_state::MockConsensusState; use ibc_testkit::testapp::ibc::clients::mock::header::MockHeader; + use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; use namada_governance::parameters::GovernanceParameters; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_state::StorageRead; use namada_tx::data::TxType; use namada_tx::{Code, Data, Section, Signature, Tx}; @@ -421,15 +420,15 @@ mod tests { ClientId::from_str(&id).expect("Creating a client ID failed") } - fn init_storage() -> TestWlStorage { - let mut wl_storage = TestWlStorage::default(); + fn init_storage() -> TestState { + let mut state = TestState::default(); // initialize the storage - ibc::init_genesis_storage(&mut wl_storage); + ibc::init_genesis_storage(&mut state); let gov_params = GovernanceParameters::default(); - gov_params.init_storage(&mut wl_storage).unwrap(); + gov_params.init_storage(&mut state).unwrap(); pos::test_utils::test_init_genesis( - &mut wl_storage, + &mut state, namada_proof_of_stake::OwnedPosParams::default(), vec![get_dummy_genesis_validator()].into_iter(), Epoch(1), @@ -441,31 +440,31 @@ mod tests { min_num_of_blocks: 10, min_duration: DurationSecs(100), }; - wl_storage - .write_log + state + .write_log_mut() .write(&epoch_duration_key, epoch_duration.serialize_to_vec()) .expect("write failed"); // max_expected_time_per_block let time = DurationSecs::from(Duration::new(60, 0)); let time_key = get_max_expected_time_per_block_key(); - wl_storage - .write_log + state + .write_log_mut() .write(&time_key, namada_core::encode(&time)) .expect("write failed"); // set a dummy header - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); - wl_storage + state } - fn insert_init_client(wl_storage: &mut TestWlStorage) { + fn insert_init_client(state: &mut TestState) { // insert a mock client type let client_id = get_client_id(); // insert a mock client state @@ -477,41 +476,39 @@ mod tests { }; let client_state = MockClientState::new(header); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); // insert a mock consensus state let consensus_key = consensus_state_key(&client_id, height); let consensus_state = MockConsensusState::new(header); let bytes = Protobuf::::encode_vec(consensus_state); - wl_storage - .write_log + state + .write_log_mut() .write(&consensus_key, bytes) .expect("write failed"); // insert update time and height let client_update_time_key = client_update_timestamp_key(&client_id); - let time = wl_storage - .storage - .get_block_header(None) + let time = StateRead::get_block_header(state, None) .unwrap() .0 .unwrap() .time; let bytes = TmTime::try_from(time).unwrap().encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_time_key, bytes) .expect("write failed"); let client_update_height_key = client_update_height_key(&client_id); - let host_height = wl_storage.storage.get_block_height().0; + let host_height = state.in_mem().get_block_height().0; let host_height = Height::new(0, host_height.0).expect("invalid height"); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_height_key, host_height.encode_vec()) .expect("write failed"); - wl_storage.write_log.commit_tx(); + state.write_log_mut().commit_tx(); } fn get_connection_id() -> ConnectionId { @@ -567,8 +564,8 @@ mod tests { ChanCounterparty::new(counterpart_port_id, Some(counterpart_channel_id)) } - fn get_next_seq(wl_storage: &TestWlStorage, key: &Key) -> Sequence { - let (val, _) = wl_storage.storage.read(key).expect("read failed"); + fn get_next_seq(state: &TestState, key: &Key) -> Sequence { + let (val, _) = state.db_read(key).expect("read failed"); match val { Some(v) => { // IBC related data is encoded without borsh @@ -581,8 +578,8 @@ mod tests { } } - fn increment_sequence(wl_storage: &mut TestWlStorage, key: &Key) { - let count = match wl_storage.read_bytes(key).expect("read failed") { + fn increment_sequence(state: &mut TestState, key: &Key) { + let count = match state.read_bytes(key).expect("read failed") { Some(value) => { let count: [u8; 8] = value.try_into().expect("decoding a count failed"); @@ -590,21 +587,21 @@ mod tests { } None => 0, }; - wl_storage - .write_log + state + .write_log_mut() .write(key, (count + 1).to_be_bytes().to_vec()) .expect("write failed"); } - fn increment_counter(wl_storage: &mut TestWlStorage, key: &Key) { - let count = match wl_storage.read_bytes(key).expect("read failed") { + fn increment_counter(state: &mut TestState, key: &Key) { + let count = match state.read_bytes(key).expect("read failed") { Some(value) => { u64::try_from_slice(&value).expect("invalid counter value") } None => unreachable!("The counter should be initialized"), }; - wl_storage - .write_log + state + .write_log_mut() .write(key, (count + 1).serialize_to_vec()) .expect("write failed"); } @@ -659,7 +656,7 @@ mod tests { #[test] fn test_create_client() { - let mut wl_storage = init_storage(); + let mut state = init_storage(); let mut keys_changed = BTreeSet::new(); let height = Height::new(0, 1).unwrap(); @@ -679,22 +676,22 @@ mod tests { // client state let client_state_key = client_state_key(&get_client_id()); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); keys_changed.insert(client_state_key); // client consensus let consensus_key = consensus_state_key(&client_id, height); let bytes = Protobuf::::encode_vec(consensus_state); - wl_storage - .write_log + state + .write_log_mut() .write(&consensus_key, bytes) .expect("write failed"); keys_changed.insert(consensus_key); // client counter let client_counter_key = client_counter_key(); - increment_counter(&mut wl_storage, &client_counter_key); + increment_counter(&mut state, &client_counter_key); keys_changed.insert(client_counter_key); let event = RawIbcEvent::CreateClient(CreateClient::new( @@ -703,26 +700,26 @@ mod tests { client_state.latest_height(), )); let message_event = RawIbcEvent::Message(MessageEvent::Client); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -730,13 +727,14 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -752,19 +750,19 @@ mod tests { #[test] fn test_create_client_fail() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // initialize the storage - ibc::init_genesis_storage(&mut wl_storage); + ibc::init_genesis_storage(&mut state); // set a dummy header - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); @@ -777,8 +775,8 @@ mod tests { let client_state = MockClientState::new(header); let client_state_key = client_state_key(&get_client_id()); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); keys_changed.insert(client_state_key); @@ -796,25 +794,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -830,18 +829,18 @@ mod tests { #[test] fn test_update_client() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + let mut state = init_storage(); + insert_init_client(&mut state); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -863,8 +862,8 @@ mod tests { // client state let client_state = MockClientState::new(header); let bytes = Protobuf::::encode_vec(client_state); - wl_storage - .write_log + state + .write_log_mut() .write(&client_state_key, bytes) .expect("write failed"); keys_changed.insert(client_state_key); @@ -872,33 +871,31 @@ mod tests { let consensus_key = consensus_state_key(&client_id, height); let consensus_state = MockConsensusState::new(header); let bytes = Protobuf::::encode_vec(consensus_state); - wl_storage - .write_log + state + .write_log_mut() .write(&consensus_key, bytes) .expect("write failed"); keys_changed.insert(consensus_key); // client update time let client_update_time_key = client_update_timestamp_key(&client_id); - let time = wl_storage - .storage - .get_block_header(None) + let time = StateRead::get_block_header(&state, None) .unwrap() .0 .unwrap() .time; let bytes = TmTime::try_from(time).unwrap().encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_time_key, bytes) .expect("write failed"); keys_changed.insert(client_update_time_key); // client update height let client_update_height_key = client_update_height_key(&client_id); - let host_height = wl_storage.storage.get_block_height().0; + let host_height = state.in_mem().get_block_height().0; let host_height = Height::new(0, host_height.0).expect("invalid height"); - wl_storage - .write_log + state + .write_log_mut() .write(&client_update_height_key, host_height.encode_vec()) .expect("write failed"); keys_changed.insert(client_update_height_key); @@ -912,11 +909,11 @@ mod tests { Protobuf::::encode_vec(header), )); let message_event = RawIbcEvent::Message(MessageEvent::Client); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -924,25 +921,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -958,17 +956,17 @@ mod tests { #[test] fn test_init_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + let mut state = init_storage(); + insert_init_client(&mut state); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -995,8 +993,8 @@ mod tests { ) .expect("invalid connection"); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1004,14 +1002,14 @@ mod tests { let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); let bytes = conn_list.serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_conn_key, bytes) .expect("write failed"); keys_changed.insert(client_conn_key); // connection counter let conn_counter_key = connection_counter_key(); - increment_counter(&mut wl_storage, &conn_counter_key); + increment_counter(&mut state, &conn_counter_key); keys_changed.insert(conn_counter_key); // event let event = RawIbcEvent::OpenInitConnection(ConnOpenInit::new( @@ -1020,11 +1018,11 @@ mod tests { msg.counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1032,7 +1030,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1040,20 +1038,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1068,18 +1067,18 @@ mod tests { #[test] fn test_init_connection_fail() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // initialize the storage - ibc::init_genesis_storage(&mut wl_storage); + ibc::init_genesis_storage(&mut state); // set a dummy header - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); @@ -1106,8 +1105,8 @@ mod tests { ) .expect("invalid connection"); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1115,14 +1114,14 @@ mod tests { let client_conn_key = client_connections_key(&msg.client_id_on_a); let conn_list = conn_id.to_string(); let bytes = conn_list.serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_conn_key, bytes) .expect("write failed"); keys_changed.insert(client_conn_key); // connection counter let conn_counter_key = connection_counter_key(); - increment_counter(&mut wl_storage, &conn_counter_key); + increment_counter(&mut state, &conn_counter_key); keys_changed.insert(conn_counter_key); // No event @@ -1131,25 +1130,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1164,17 +1164,17 @@ mod tests { #[test] fn test_try_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + let mut state = init_storage(); + insert_init_client(&mut state); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1215,8 +1215,8 @@ mod tests { ) .expect("invalid connection"); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1224,14 +1224,14 @@ mod tests { let client_conn_key = client_connections_key(&msg.client_id_on_b); let conn_list = conn_id.to_string(); let bytes = conn_list.serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&client_conn_key, bytes) .expect("write failed"); keys_changed.insert(client_conn_key); // connection counter let conn_counter_key = connection_counter_key(); - increment_counter(&mut wl_storage, &conn_counter_key); + increment_counter(&mut state, &conn_counter_key); keys_changed.insert(conn_counter_key); // event let event = RawIbcEvent::OpenTryConnection(ConnOpenTry::new( @@ -1241,36 +1241,37 @@ mod tests { msg.counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1286,34 +1287,34 @@ mod tests { #[test] fn test_ack_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an Init connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Init); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1349,11 +1350,11 @@ mod tests { counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_code = vec![]; @@ -1361,7 +1362,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1369,20 +1370,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1397,34 +1399,34 @@ mod tests { #[test] fn test_confirm_connection() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert a TryOpen connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::TryOpen); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); // update the connection to Open let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); keys_changed.insert(conn_key); @@ -1446,11 +1448,11 @@ mod tests { counterparty.client_id().clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Connection); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_code = vec![]; @@ -1458,7 +1460,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1466,20 +1468,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1494,27 +1497,27 @@ mod tests { #[test] fn test_init_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an opened connection let conn_id = get_connection_id(); let conn_key = connection_key(&conn_id); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1541,26 +1544,26 @@ mod tests { ) .unwrap(); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); // channel counter let chan_counter_key = channel_counter_key(); - increment_counter(&mut wl_storage, &chan_counter_key); + increment_counter(&mut state, &chan_counter_key); keys_changed.insert(chan_counter_key); // sequences let channel_id = get_channel_id(); let port_id = msg.port_id_on_a.clone(); let send_key = next_sequence_send_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &send_key); + increment_sequence(&mut state, &send_key); keys_changed.insert(send_key); let recv_key = next_sequence_recv_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &recv_key); + increment_sequence(&mut state, &recv_key); keys_changed.insert(recv_key); let ack_key = next_sequence_ack_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &ack_key); + increment_sequence(&mut state, &ack_key); keys_changed.insert(ack_key); // event let event = RawIbcEvent::OpenInitChannel(ChanOpenInit::new( @@ -1571,11 +1574,11 @@ mod tests { msg.version_proposal.clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1583,7 +1586,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1591,20 +1594,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1619,26 +1623,26 @@ mod tests { #[test] fn test_try_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1664,26 +1668,26 @@ mod tests { let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::TryOpen, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); // channel counter let chan_counter_key = channel_counter_key(); - increment_counter(&mut wl_storage, &chan_counter_key); + increment_counter(&mut state, &chan_counter_key); keys_changed.insert(chan_counter_key); // sequences let channel_id = get_channel_id(); let port_id = msg.port_id_on_a.clone(); let send_key = next_sequence_send_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &send_key); + increment_sequence(&mut state, &send_key); keys_changed.insert(send_key); let recv_key = next_sequence_recv_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &recv_key); + increment_sequence(&mut state, &recv_key); keys_changed.insert(recv_key); let ack_key = next_sequence_ack_key(&port_id, &channel_id); - increment_sequence(&mut wl_storage, &ack_key); + increment_sequence(&mut state, &ack_key); keys_changed.insert(ack_key); // event let event = RawIbcEvent::OpenTryChannel(ChanOpenTry::new( @@ -1695,11 +1699,11 @@ mod tests { msg.version_supported_on_a.clone(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1707,7 +1711,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1715,20 +1719,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1743,34 +1748,34 @@ mod tests { #[test] fn test_ack_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Init channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Init, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1790,8 +1795,8 @@ mod tests { // update the channel to Open let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); @@ -1804,11 +1809,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1816,7 +1821,7 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = wl_storage.storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); outer_tx.add_section(Section::Signature(Signature::new( @@ -1824,20 +1829,21 @@ mod tests { [(0, keypair_1())].into_iter().collect(), None, ))); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &outer_tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1852,34 +1858,34 @@ mod tests { #[test] fn test_confirm_channel() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert a TryOpen channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::TryOpen, Order::Ordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -1896,8 +1902,8 @@ mod tests { // update the channel to Open let channel = get_channel(ChanState::Open, Order::Ordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); keys_changed.insert(channel_key); @@ -1911,11 +1917,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -1923,25 +1929,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -1959,42 +1966,42 @@ mod tests { #[test] fn test_send_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // init balance let sender = established_address_1(); let balance_key = balance_key(&nam(), &sender); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2017,9 +2024,9 @@ mod tests { // the sequence send let seq_key = next_sequence_send_key(&get_port_id(), &get_channel_id()); - let sequence = get_next_seq(&wl_storage, &seq_key); - wl_storage - .write_log + let sequence = get_next_seq(&state, &seq_key); + state + .write_log_mut() .write(&seq_key, (u64::from(sequence) + 1).to_be_bytes().to_vec()) .expect("write failed"); keys_changed.insert(seq_key); @@ -2030,8 +2037,8 @@ mod tests { commitment_key(&msg.port_id_on_a, &msg.chan_id_on_a, sequence); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); keys_changed.insert(commitment_key); @@ -2044,8 +2051,8 @@ mod tests { memo: msg.packet_data.memo.clone(), }; let event = RawIbcEvent::Module(ModuleEvent::from(transfer_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::SendPacket(SendPacket::new( packet, @@ -2053,11 +2060,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2065,25 +2072,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2098,34 +2106,34 @@ mod tests { #[test] fn test_recv_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2168,8 +2176,8 @@ mod tests { msg.packet.seq_on_a, ); let bytes = [1_u8].to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&receipt_key, bytes) .expect("write failed"); keys_changed.insert(receipt_key); @@ -2182,8 +2190,8 @@ mod tests { let transfer_ack = AcknowledgementStatus::success(ack_success_b64()); let acknowledgement: Acknowledgement = transfer_ack.into(); let bytes = sha2::Sha256::digest(acknowledgement.as_bytes()).to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&ack_key, bytes) .expect("write failed"); keys_changed.insert(ack_key); @@ -2196,15 +2204,15 @@ mod tests { let trace_hash = calc_hash(coin.denom.to_string()); let denom_key = ibc_denom_key(receiver.to_string(), &trace_hash); let bytes = coin.denom.to_string().serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&denom_key, bytes) .expect("write failed"); keys_changed.insert(denom_key); let denom_key = ibc_denom_key(nam().to_string(), &trace_hash); let bytes = coin.denom.to_string().serialize_to_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&denom_key, bytes) .expect("write failed"); keys_changed.insert(denom_key); @@ -2218,16 +2226,16 @@ mod tests { success: true, }; let event = RawIbcEvent::Module(ModuleEvent::from(recv_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let denom_trace_event = DenomTraceEvent { trace_hash: Some(trace_hash), denom: coin.denom, }; let event = RawIbcEvent::Module(ModuleEvent::from(denom_trace_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::ReceivePacket(ReceivePacket::new( msg.packet.clone(), @@ -2235,11 +2243,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::WriteAcknowledgement(WriteAcknowledgement::new( @@ -2248,11 +2256,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2260,25 +2268,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2293,23 +2302,23 @@ mod tests { #[test] fn test_ack_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // commitment @@ -2342,19 +2351,19 @@ mod tests { ); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2369,8 +2378,8 @@ mod tests { }; // delete the commitment - wl_storage - .write_log + state + .write_log_mut() .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); @@ -2386,8 +2395,8 @@ mod tests { acknowledgement: transfer_ack, }; let event = RawIbcEvent::Module(ModuleEvent::from(ack_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::AcknowledgePacket(AcknowledgePacket::new( packet, @@ -2395,11 +2404,11 @@ mod tests { get_connection_id(), )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2407,25 +2416,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2440,31 +2450,31 @@ mod tests { #[test] fn test_timeout_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // init the escrow balance let balance_key = balance_key(&nam(), &Address::Internal(InternalAddress::Ibc)); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment @@ -2497,19 +2507,19 @@ mod tests { ); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2523,8 +2533,8 @@ mod tests { }; // delete the commitment - wl_storage - .write_log + state + .write_log_mut() .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); @@ -2538,19 +2548,19 @@ mod tests { memo: data.memo, }; let event = RawIbcEvent::Module(ModuleEvent::from(timeout_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( packet, Order::Unordered, )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2558,25 +2568,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -2591,31 +2602,31 @@ mod tests { #[test] fn test_timeout_on_close_packet() { let mut keys_changed = BTreeSet::new(); - let mut wl_storage = init_storage(); - insert_init_client(&mut wl_storage); + let mut state = init_storage(); + insert_init_client(&mut state); // insert an open connection let conn_key = connection_key(&get_connection_id()); let conn = get_connection(ConnState::Open); let bytes = conn.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&conn_key, bytes) .expect("write failed"); // insert an Open channel let channel_key = channel_key(&get_port_id(), &get_channel_id()); let channel = get_channel(ChanState::Open, Order::Unordered); let bytes = channel.encode_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&channel_key, bytes) .expect("write failed"); // init the escrow balance let balance_key = balance_key(&nam(), &Address::Internal(InternalAddress::Ibc)); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&balance_key, amount.serialize_to_vec()) .expect("write failed"); // commitment @@ -2648,19 +2659,19 @@ mod tests { ); let commitment = commitment(&packet); let bytes = commitment.into_vec(); - wl_storage - .write_log + state + .write_log_mut() .write(&commitment_key, bytes) .expect("write failed"); - wl_storage.write_log.commit_tx(); - wl_storage.commit_block().expect("commit failed"); + state.write_log_mut().commit_tx(); + state.commit_block().expect("commit failed"); // for next block - wl_storage - .storage + state + .in_mem_mut() .set_header(get_dummy_header()) .expect("Setting a dummy header shouldn't fail"); - wl_storage - .storage + state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); @@ -2675,8 +2686,8 @@ mod tests { }; // delete the commitment - wl_storage - .write_log + state + .write_log_mut() .delete(&commitment_key) .expect("delete failed"); keys_changed.insert(commitment_key); @@ -2690,19 +2701,19 @@ mod tests { memo: data.memo, }; let event = RawIbcEvent::Module(ModuleEvent::from(timeout_event)); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( packet, Order::Unordered, )); let message_event = RawIbcEvent::Message(MessageEvent::Channel); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(message_event.try_into().unwrap()); - wl_storage - .write_log + state + .write_log_mut() .emit_ibc_event(event.try_into().unwrap()); let tx_index = TxIndex::default(); @@ -2710,25 +2721,26 @@ mod tests { let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let mut tx = Tx::new(wl_storage.storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(tx_code, None) .add_serialized_data(tx_data) .sign_wrapper(keypair_1()); - let gas_meter = VpGasMeter::new_from_tx_meter( + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, diff --git a/crates/namada/src/ledger/native_vp/masp.rs b/crates/namada/src/ledger/native_vp/masp.rs index c76b2995b7..a49ae67c74 100644 --- a/crates/namada/src/ledger/native_vp/masp.rs +++ b/crates/namada/src/ledger/native_vp/masp.rs @@ -15,7 +15,7 @@ use namada_core::masp::encode_asset_type; use namada_core::storage::{IndexedTx, Key}; use namada_gas::MASP_VERIFY_SHIELDED_TX_GAS; use namada_sdk::masp::verify_shielded_tx; -use namada_state::{OptionExt, ResultExt}; +use namada_state::{OptionExt, ResultExt, StateRead}; use namada_token::read_denom; use namada_tx::Tx; use namada_vp_env::VpEnv; @@ -48,14 +48,13 @@ pub enum Error { pub type Result = std::result::Result; /// MASP VP -pub struct MaspVp<'a, DB, H, CA> +pub struct MaspVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } struct TransparentTransferData { @@ -65,10 +64,9 @@ struct TransparentTransferData { amount: Amount, } -impl<'a, DB, H, CA> MaspVp<'a, DB, H, CA> +impl<'a, S, CA> MaspVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { // Check that the transaction correctly revealed the nullifiers @@ -397,10 +395,9 @@ fn unepoched_tokens( Ok(unepoched_tokens) } -impl<'a, DB, H, CA> NativeVp for MaspVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for MaspVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -412,7 +409,7 @@ where _verifiers: &BTreeSet
, ) -> Result { let epoch = self.ctx.get_block_epoch()?; - let conversion_state = self.ctx.storage.get_conversion_state(); + let conversion_state = self.ctx.state.in_mem().get_conversion_state(); let shielded_tx = self.ctx.get_shielded_action(tx_data)?; if u64::from(self.ctx.get_block_height()?) diff --git a/crates/namada/src/ledger/native_vp/mod.rs b/crates/namada/src/ledger/native_vp/mod.rs index cbfe3b225f..d0cb02c430 100644 --- a/crates/namada/src/ledger/native_vp/mod.rs +++ b/crates/namada/src/ledger/native_vp/mod.rs @@ -9,6 +9,7 @@ pub mod parameters; use std::cell::RefCell; use std::collections::BTreeSet; +use std::fmt::Debug; use borsh::BorshDeserialize; use eyre::WrapErr; @@ -18,6 +19,7 @@ use namada_core::validity_predicate::VpSentinel; use namada_gas::GasMetering; use namada_tx::Tx; pub use namada_vp_env::VpEnv; +use state::StateRead; use super::vp_host_fns; use crate::address::Address; @@ -25,8 +27,7 @@ use crate::hash::Hash; use crate::ibc::IbcEvent; use crate::ledger::gas::VpGasMeter; use crate::state; -use crate::state::write_log::WriteLog; -use crate::state::{ResultExt, State, StorageHasher, StorageRead}; +use crate::state::{ResultExt, StorageRead}; use crate::storage::{BlockHash, BlockHeight, Epoch, Header, Key, TxIndex}; use crate::vm::prefix_iter::PrefixIterators; use crate::vm::WasmCacheAccess; @@ -56,24 +57,21 @@ pub trait NativeVp { /// wrapper types and `eval_runner` field. The references must not be changed /// when [`Ctx`] is mutable. #[derive(Debug)] -pub struct Ctx<'a, DB, H, CA> +pub struct Ctx<'a, S, CA> where - DB: state::DB + for<'iter> state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// The address of the account that owns the VP pub address: &'a Address, /// Storage prefix iterators. - pub iterators: RefCell>, + pub iterators: RefCell::D>>, /// VP gas meter. - pub gas_meter: RefCell, + pub gas_meter: &'a RefCell, /// Errors sentinel - pub sentinel: RefCell, - /// Read-only access to the storage. - pub storage: &'a State, - /// Read-only access to the write log. - pub write_log: &'a WriteLog, + pub sentinel: &'a RefCell, + /// Read-only state access. + pub state: &'a S, /// The transaction code is used for signature verification pub tx: &'a Tx, /// The transaction index is used to obtain the shielded transaction's @@ -95,42 +93,39 @@ where /// Read access to the prior storage (state before tx execution) via /// [`trait@StorageRead`]. #[derive(Debug)] -pub struct CtxPreStorageRead<'view, 'a: 'view, DB, H, CA> +pub struct CtxPreStorageRead<'view, 'a: 'view, S, CA> where - DB: state::DB + for<'iter> state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { - ctx: &'view Ctx<'a, DB, H, CA>, + ctx: &'view Ctx<'a, S, CA>, } /// Read access to the posterior storage (state after tx execution) via /// [`trait@StorageRead`]. #[derive(Debug)] -pub struct CtxPostStorageRead<'view, 'a: 'view, DB, H, CA> +pub struct CtxPostStorageRead<'view, 'a: 'view, S, CA> where - DB: state::DB + for<'iter> state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { - ctx: &'view Ctx<'a, DB, H, CA>, + ctx: &'view Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> Ctx<'a, DB, H, CA> +impl<'a, S, CA> Ctx<'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Initialize a new context for native VP call #[allow(clippy::too_many_arguments)] pub fn new( address: &'a Address, - storage: &'a State, - write_log: &'a WriteLog, + state: &'a S, tx: &'a Tx, tx_index: &'a TxIndex, - gas_meter: VpGasMeter, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, keys_changed: &'a BTreeSet, verifiers: &'a BTreeSet
, #[cfg(feature = "wasm-runtime")] @@ -138,11 +133,10 @@ where ) -> Self { Self { address, + state, iterators: RefCell::new(PrefixIterators::default()), - gas_meter: RefCell::new(gas_meter), - sentinel: RefCell::new(VpSentinel::default()), - storage, - write_log, + gas_meter, + sentinel, tx, tx_index, keys_changed, @@ -156,27 +150,24 @@ where /// Read access to the prior storage (state before tx execution) /// via [`trait@StorageRead`]. - pub fn pre<'view>(&'view self) -> CtxPreStorageRead<'view, 'a, DB, H, CA> { + pub fn pre<'view>(&'view self) -> CtxPreStorageRead<'view, 'a, S, CA> { CtxPreStorageRead { ctx: self } } /// Read access to the posterior storage (state after tx execution) /// via [`trait@StorageRead`]. - pub fn post<'view>( - &'view self, - ) -> CtxPostStorageRead<'view, 'a, DB, H, CA> { + pub fn post<'view>(&'view self) -> CtxPostStorageRead<'view, 'a, S, CA> { CtxPostStorageRead { ctx: self } } } -impl<'view, 'a: 'view, DB, H, CA> StorageRead - for CtxPreStorageRead<'view, 'a, DB, H, CA> +impl<'view, 'a: 'view, S, CA> StorageRead + for CtxPreStorageRead<'view, 'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = state::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = state::PrefixIter<'iter,:: D> where Self: 'iter; fn read_bytes( &self, @@ -184,8 +175,7 @@ where ) -> Result>, state::StorageError> { vp_host_fns::read_pre( &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.state, key, &mut self.ctx.sentinel.borrow_mut(), ) @@ -195,8 +185,7 @@ where fn has_key(&self, key: &storage::Key) -> Result { vp_host_fns::has_key_pre( &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.state, key, &mut self.ctx.sentinel.borrow_mut(), ) @@ -209,8 +198,8 @@ where ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_pre( &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.write_log, - self.ctx.storage, + self.ctx.state.write_log(), + self.ctx.state.db(), prefix, &mut self.ctx.sentinel.borrow_mut(), ) @@ -224,7 +213,7 @@ where &'iter self, iter: &mut Self::PrefixIter<'iter>, ) -> Result)>, state::StorageError> { - vp_host_fns::iter_next::( + vp_host_fns::iter_next::<::D>( &mut self.ctx.gas_meter.borrow_mut(), iter, &mut self.ctx.sentinel.borrow_mut(), @@ -268,14 +257,13 @@ where } } -impl<'view, 'a: 'view, DB, H, CA> StorageRead - for CtxPostStorageRead<'view, 'a, DB, H, CA> +impl<'view, 'a: 'view, S, CA> StorageRead + for CtxPostStorageRead<'view, 'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type PrefixIter<'iter> = state::PrefixIter<'iter, DB> where Self: 'iter; + type PrefixIter<'iter> = state::PrefixIter<'iter, ::D> where Self: 'iter; fn read_bytes( &self, @@ -283,8 +271,7 @@ where ) -> Result>, state::StorageError> { vp_host_fns::read_post( &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.state, key, &mut self.ctx.sentinel.borrow_mut(), ) @@ -294,8 +281,7 @@ where fn has_key(&self, key: &storage::Key) -> Result { vp_host_fns::has_key_post( &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.storage, - self.ctx.write_log, + self.ctx.state, key, &mut self.ctx.sentinel.borrow_mut(), ) @@ -308,8 +294,8 @@ where ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_post( &mut self.ctx.gas_meter.borrow_mut(), - self.ctx.write_log, - self.ctx.storage, + self.ctx.state.write_log(), + self.ctx.state.db(), prefix, &mut self.ctx.sentinel.borrow_mut(), ) @@ -323,7 +309,7 @@ where &'iter self, iter: &mut Self::PrefixIter<'iter>, ) -> Result)>, state::StorageError> { - vp_host_fns::iter_next::( + vp_host_fns::iter_next::<::D>( &mut self.ctx.gas_meter.borrow_mut(), iter, &mut self.ctx.sentinel.borrow_mut(), @@ -359,7 +345,7 @@ where } fn get_native_token(&self) -> Result { - Ok(self.ctx.storage.native_token.clone()) + Ok(self.ctx.state.in_mem().native_token.clone()) } fn get_pred_epochs(&self) -> state::StorageResult { @@ -367,15 +353,14 @@ where } } -impl<'view, 'a: 'view, DB, H, CA> VpEnv<'view> for Ctx<'a, DB, H, CA> +impl<'view, 'a: 'view, S, CA> VpEnv<'view> for Ctx<'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { - type Post = CtxPostStorageRead<'view, 'a, DB, H, CA>; - type Pre = CtxPreStorageRead<'view, 'a, DB, H, CA>; - type PrefixIter<'iter> = state::PrefixIter<'iter, DB> where Self: 'iter; + type Post = CtxPostStorageRead<'view, 'a, S, CA>; + type Pre = CtxPreStorageRead<'view, 'a, S, CA>; + type PrefixIter<'iter> = state::PrefixIter<'iter, ::D> where Self: 'iter; fn pre(&'view self) -> Self::Pre { CtxPreStorageRead { ctx: self } @@ -391,7 +376,7 @@ where ) -> Result, state::StorageError> { vp_host_fns::read_temp( &mut self.gas_meter.borrow_mut(), - self.write_log, + self.state, key, &mut self.sentinel.borrow_mut(), ) @@ -405,7 +390,7 @@ where ) -> Result>, state::StorageError> { vp_host_fns::read_temp( &mut self.gas_meter.borrow_mut(), - self.write_log, + self.state, key, &mut self.sentinel.borrow_mut(), ) @@ -415,7 +400,7 @@ where fn get_chain_id(&self) -> Result { vp_host_fns::get_chain_id( &mut self.gas_meter.borrow_mut(), - self.storage, + self.state, &mut self.sentinel.borrow_mut(), ) .into_storage_result() @@ -424,7 +409,7 @@ where fn get_block_height(&self) -> Result { vp_host_fns::get_block_height( &mut self.gas_meter.borrow_mut(), - self.storage, + self.state, &mut self.sentinel.borrow_mut(), ) .into_storage_result() @@ -436,7 +421,7 @@ where ) -> Result, state::StorageError> { vp_host_fns::get_block_header( &mut self.gas_meter.borrow_mut(), - self.storage, + self.state, height, &mut self.sentinel.borrow_mut(), ) @@ -446,7 +431,7 @@ where fn get_block_hash(&self) -> Result { vp_host_fns::get_block_hash( &mut self.gas_meter.borrow_mut(), - self.storage, + self.state, &mut self.sentinel.borrow_mut(), ) .into_storage_result() @@ -455,7 +440,7 @@ where fn get_block_epoch(&self) -> Result { vp_host_fns::get_block_epoch( &mut self.gas_meter.borrow_mut(), - self.storage, + self.state, &mut self.sentinel.borrow_mut(), ) .into_storage_result() @@ -473,7 +458,7 @@ where fn get_native_token(&self) -> Result { vp_host_fns::get_native_token( &mut self.gas_meter.borrow_mut(), - self.storage, + self.state, &mut self.sentinel.borrow_mut(), ) .into_storage_result() @@ -482,7 +467,7 @@ where fn get_pred_epochs(&self) -> state::StorageResult { vp_host_fns::get_pred_epochs( &mut self.gas_meter.borrow_mut(), - self.storage, + self.state, &mut self.sentinel.borrow_mut(), ) .into_storage_result() @@ -494,7 +479,7 @@ where ) -> Result, state::StorageError> { vp_host_fns::get_ibc_events( &mut self.gas_meter.borrow_mut(), - self.write_log, + self.state, event_type, ) .into_storage_result() @@ -506,8 +491,8 @@ where ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_pre( &mut self.gas_meter.borrow_mut(), - self.write_log, - self.storage, + self.state.write_log(), + self.state.db(), prefix, &mut self.sentinel.borrow_mut(), ) @@ -526,22 +511,24 @@ where use crate::vm::host_env::VpCtx; use crate::vm::wasm::run::VpEvalWasm; - let eval_runner = VpEvalWasm { - db: PhantomData, - hasher: PhantomData, - cache_access: PhantomData, - }; - let mut iterators: PrefixIterators<'_, DB> = + let eval_runner = + VpEvalWasm::<::D, ::H, CA> { + db: PhantomData, + hasher: PhantomData, + cache_access: PhantomData, + }; + let mut iterators: PrefixIterators<'_, ::D> = PrefixIterators::default(); let mut result_buffer: Option> = None; let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let ctx = VpCtx::new( self.address, - self.storage, - self.write_log, - &mut self.gas_meter.borrow_mut(), - &mut self.sentinel.borrow_mut(), + self.state.write_log(), + self.state.in_mem(), + self.state.db(), + self.gas_meter, + self.sentinel, self.tx, self.tx_index, &mut iterators, @@ -659,10 +646,9 @@ pub trait StorageReader { ) -> eyre::Result>; } -impl<'a, DB, H, CA> StorageReader for &Ctx<'a, DB, H, CA> +impl<'a, S, CA> StorageReader for &Ctx<'a, S, CA> where - DB: 'static + state::DB + for<'iter> state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Helper function. After reading posterior state, diff --git a/crates/namada/src/ledger/native_vp/multitoken.rs b/crates/namada/src/ledger/native_vp/multitoken.rs index 9e3783a17a..d07777126f 100644 --- a/crates/namada/src/ledger/native_vp/multitoken.rs +++ b/crates/namada/src/ledger/native_vp/multitoken.rs @@ -3,6 +3,7 @@ use std::collections::{BTreeSet, HashMap}; use namada_governance::is_proposal_accepted; +use namada_state::StateRead; use namada_token::storage_key::is_any_token_parameter_key; use namada_tx::Tx; use namada_vp_env::VpEnv; @@ -29,20 +30,18 @@ pub enum Error { pub type Result = std::result::Result; /// Multitoken VP -pub struct MultitokenVp<'a, DB, H, CA> +pub struct MultitokenVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for MultitokenVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for MultitokenVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -168,10 +167,9 @@ where } } -impl<'a, DB, H, CA> MultitokenVp<'a, DB, H, CA> +impl<'a, S, CA> MultitokenVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Return the minter if the minter is valid and the minter VP exists @@ -214,11 +212,13 @@ where #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; use borsh_ext::BorshSerializeExt; + use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use namada_tx::data::TxType; use namada_tx::{Code, Data, Section, Signature, Tx}; @@ -239,11 +239,11 @@ mod tests { const ADDRESS: Address = Address::Internal(InternalAddress::Multitoken); - fn dummy_tx(wl_storage: &TestWlStorage) -> Tx { + fn dummy_tx(state: &TestState) -> Tx { let tx_code = vec![]; let tx_data = vec![]; let mut tx = Tx::from_type(TxType::Raw); - tx.header.chain_id = wl_storage.storage.chain_id.clone(); + tx.header.chain_id = state.in_mem().chain_id.clone(); tx.set_code(Code::new(tx_code, None)); tx.set_data(Data::new(tx_data)); tx.add_section(Section::Signature(Signature::new( @@ -256,48 +256,48 @@ mod tests { #[test] fn test_valid_transfer() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let sender = established_address_1(); let sender_key = balance_key(&nam(), &sender); let amount = Amount::native_whole(100); - wl_storage - .storage - .write(&sender_key, amount.serialize_to_vec()) + state + .db_write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); - wl_storage - .write_log + state + .write_log_mut() .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); let receiver = established_address_2(); let receiver_key = balance_key(&nam(), &receiver); let amount = Amount::native_whole(10); - wl_storage - .write_log + state + .write_log_mut() .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); verifiers.insert(sender); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -312,21 +312,20 @@ mod tests { #[test] fn test_invalid_transfer() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let sender = established_address_1(); let sender_key = balance_key(&nam(), &sender); let amount = Amount::native_whole(100); - wl_storage - .storage - .write(&sender_key, amount.serialize_to_vec()) + state + .db_write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); // transfer 10 let amount = Amount::native_whole(90); - wl_storage - .write_log + state + .write_log_mut() .write(&sender_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(sender_key); @@ -334,26 +333,27 @@ mod tests { let receiver_key = balance_key(&nam(), &receiver); // receive more than 10 let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&receiver_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(receiver_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -368,7 +368,7 @@ mod tests { #[test] fn test_valid_mint() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -378,15 +378,15 @@ mod tests { let target = established_address_1(); let target_key = balance_key(&token, &target); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -394,28 +394,29 @@ mod tests { // minter let minter = Address::Internal(InternalAddress::Ibc); let minter_key = minter_key(&token); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -430,7 +431,7 @@ mod tests { #[test] fn test_invalid_mint() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // mint 100 @@ -438,15 +439,15 @@ mod tests { let target_key = balance_key(&nam(), &target); // mint more than 100 let amount = Amount::native_whole(1000); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&nam()); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -454,28 +455,29 @@ mod tests { // minter let minter = nam(); let minter_key = minter_key(&nam()); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -490,7 +492,7 @@ mod tests { #[test] fn test_no_minter() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -500,15 +502,15 @@ mod tests { let target = established_address_1(); let target_key = balance_key(&token, &target); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -516,19 +518,20 @@ mod tests { // no minter is set let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -543,7 +546,7 @@ mod tests { #[test] fn test_invalid_minter() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); // IBC token @@ -553,15 +556,15 @@ mod tests { let target = established_address_1(); let target_key = balance_key(&token, &target); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&target_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(target_key); let minted_key = minted_balance_key(&token); let amount = Amount::native_whole(100); - wl_storage - .write_log + state + .write_log_mut() .write(&minted_key, amount.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minted_key); @@ -569,28 +572,29 @@ mod tests { // invalid minter let minter = established_address_1(); let minter_key = minter_key(&token); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -605,34 +609,35 @@ mod tests { #[test] fn test_invalid_minter_update() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let minter_key = minter_key(&nam()); let minter = established_address_1(); - wl_storage - .write_log + state + .write_log_mut() .write(&minter_key, minter.serialize_to_vec()) .expect("write failed"); keys_changed.insert(minter_key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let mut verifiers = BTreeSet::new(); // for the minter verifiers.insert(minter); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -647,7 +652,7 @@ mod tests { #[test] fn test_invalid_key_update() { - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); let mut keys_changed = BTreeSet::new(); let key = Key::from( @@ -655,27 +660,28 @@ mod tests { ) .push(&"invalid_segment".to_string()) .unwrap(); - wl_storage - .write_log + state + .write_log_mut() .write(&key, 0.serialize_to_vec()) .expect("write failed"); keys_changed.insert(key); let tx_index = TxIndex::default(); - let tx = dummy_tx(&wl_storage); - let gas_meter = VpGasMeter::new_from_tx_meter( + let tx = dummy_tx(&state); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(u64::MAX.into()), - ); + )); let (vp_wasm_cache, _vp_cache_dir) = wasm_cache(); let verifiers = BTreeSet::new(); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &wl_storage.storage, - &wl_storage.write_log, + &state, &tx, &tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, diff --git a/crates/namada/src/ledger/native_vp/parameters.rs b/crates/namada/src/ledger/native_vp/parameters.rs index a64667d9f8..4fe0f11f76 100644 --- a/crates/namada/src/ledger/native_vp/parameters.rs +++ b/crates/namada/src/ledger/native_vp/parameters.rs @@ -4,6 +4,7 @@ use std::collections::BTreeSet; use namada_core::address::Address; use namada_core::storage::Key; +use namada_state::StateRead; use namada_tx::Tx; use thiserror::Error; @@ -21,20 +22,18 @@ pub enum Error { pub type Result = std::result::Result; /// Parameters VP -pub struct ParametersVp<'a, DB, H, CA> +pub struct ParametersVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for ParametersVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for ParametersVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; diff --git a/crates/namada/src/ledger/pgf/mod.rs b/crates/namada/src/ledger/pgf/mod.rs index 23ff47903d..264f96691b 100644 --- a/crates/namada/src/ledger/pgf/mod.rs +++ b/crates/namada/src/ledger/pgf/mod.rs @@ -7,6 +7,7 @@ use std::collections::BTreeSet; use namada_governance::pgf::storage::keys as pgf_storage; use namada_governance::{is_proposal_accepted, pgf}; +use namada_state::StateRead; use namada_tx::Tx; use thiserror::Error; @@ -30,20 +31,18 @@ pub enum Error { } /// Pgf VP -pub struct PgfVp<'a, DB, H, CA> +pub struct PgfVp<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: namada_state::StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> NativeVp for PgfVp<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for PgfVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; @@ -111,10 +110,9 @@ where } } -impl<'a, DB, H, CA> PgfVp<'a, DB, H, CA> +impl<'a, S, CA> PgfVp<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + namada_state::StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Validate a governance parameter diff --git a/crates/namada/src/ledger/pos/vp.rs b/crates/namada/src/ledger/pos/vp.rs index dfb1c32c25..bbc4d502ee 100644 --- a/crates/namada/src/ledger/pos/vp.rs +++ b/crates/namada/src/ledger/pos/vp.rs @@ -13,7 +13,7 @@ pub use namada_proof_of_stake::types; // is_validator_address_raw_hash_key, // is_validator_max_commission_rate_change_key, // }; -use namada_state::StorageHasher; +use namada_state::StateRead; use namada_state::StorageRead; use namada_tx::Tx; use thiserror::Error; @@ -34,32 +34,29 @@ pub enum Error { pub type Result = std::result::Result; /// Proof-of-Stake validity predicate -pub struct PosVP<'a, DB, H, CA> +pub struct PosVP<'a, S, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, CA: WasmCacheAccess, { /// Context to interact with the host structures. - pub ctx: Ctx<'a, DB, H, CA>, + pub ctx: Ctx<'a, S, CA>, } -impl<'a, DB, H, CA> PosVP<'a, DB, H, CA> +impl<'a, S, CA> PosVP<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { /// Instantiate a `PosVP`. - pub fn new(ctx: Ctx<'a, DB, H, CA>) -> Self { + pub fn new(ctx: Ctx<'a, S, CA>) -> Self { Self { ctx } } } -impl<'a, DB, H, CA> NativeVp for PosVP<'a, DB, H, CA> +impl<'a, S, CA> NativeVp for PosVP<'a, S, CA> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { type Error = Error; diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 33c930a5c6..0c96c9e495 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -1,15 +1,17 @@ //! The ledger's protocol +use std::cell::RefCell; use std::collections::BTreeSet; +use std::fmt::Debug; use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use masp_primitives::transaction::Transaction; use namada_core::hash::Hash; use namada_core::storage::Key; +use namada_core::validity_predicate::VpSentinel; use namada_gas::TxGasMeter; use namada_sdk::tx::TX_TRANSFER_WASM; -use namada_state::wl_storage::WriteLogAndStorage; -use namada_state::StorageRead; +use namada_state::StorageWrite; use namada_tx::data::protocol::ProtocolTxType; use namada_tx::data::{ DecryptedTx, GasLimit, TxResult, TxType, VpsResult, WrapperTx, @@ -32,8 +34,7 @@ use crate::ledger::native_vp::parameters::{self, ParametersVp}; use crate::ledger::native_vp::{self, NativeVp}; use crate::ledger::pgf::PgfVp; use crate::ledger::pos::{self, PosVP}; -use crate::state::write_log::WriteLog; -use crate::state::{DBIter, State, StorageHasher, WlStorage, DB}; +use crate::state::{DBIter, State, StorageHasher, StorageRead, WlState, DB}; use crate::storage; use crate::storage::TxIndex; use crate::token::Amount; @@ -101,32 +102,37 @@ pub enum Error { /// Shell parameters for running wasm transactions. #[allow(missing_docs)] -pub struct ShellParams<'a, CA, WLS> +#[derive(Debug)] +pub struct ShellParams<'a, S, D, H, CA> where + S: State + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, - WLS: WriteLogAndStorage + StorageRead, { - tx_gas_meter: &'a mut TxGasMeter, - wl_storage: &'a mut WLS, - vp_wasm_cache: &'a mut VpCache, - tx_wasm_cache: &'a mut TxCache, + pub tx_gas_meter: &'a RefCell, + pub state: &'a mut S, + pub vp_wasm_cache: &'a mut VpCache, + pub tx_wasm_cache: &'a mut TxCache, } -impl<'a, CA, WLS> ShellParams<'a, CA, WLS> +impl<'a, S, D, H, CA> ShellParams<'a, S, D, H, CA> where + S: State + Sync, + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, - WLS: WriteLogAndStorage + StorageRead, { /// Create a new instance of `ShellParams` pub fn new( - tx_gas_meter: &'a mut TxGasMeter, - wl_storage: &'a mut WLS, + tx_gas_meter: &'a RefCell, + state: &'a mut S, vp_wasm_cache: &'a mut VpCache, tx_wasm_cache: &'a mut TxCache, ) -> Self { Self { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, } @@ -156,8 +162,8 @@ pub fn dispatch_tx<'a, D, H, CA>( tx: Tx, tx_bytes: &'a [u8], tx_index: TxIndex, - tx_gas_meter: &'a mut TxGasMeter, - wl_storage: &'a mut WlStorage, + tx_gas_meter: &'a RefCell, + state: &'a mut WlState, vp_wasm_cache: &'a mut VpCache, tx_wasm_cache: &'a mut TxCache, wrapper_args: Option<&mut WrapperArgs>, @@ -174,13 +180,13 @@ where &tx_index, ShellParams { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, }, ), TxType::Protocol(protocol_tx) => { - apply_protocol_tx(protocol_tx.tx, tx.data(), wl_storage) + apply_protocol_tx(protocol_tx.tx, tx.data(), state) } TxType::Wrapper(ref wrapper) => { let fee_unshielding_transaction = @@ -192,14 +198,14 @@ where tx_bytes, ShellParams { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, }, wrapper_args, )?; Ok(TxResult { - gas_used: tx_gas_meter.get_tx_consumed_gas(), + gas_used: tx_gas_meter.borrow().get_tx_consumed_gas(), changed_keys, vps_result: VpsResult::default(), initialized_accounts: vec![], @@ -235,25 +241,26 @@ where /// - gas accounting /// /// Returns the set of changed storage keys. -pub(crate) fn apply_wrapper_tx<'a, D, H, CA, WLS>( +pub(crate) fn apply_wrapper_tx( tx: Tx, wrapper: &WrapperTx, fee_unshield_transaction: Option, tx_bytes: &[u8], - mut shell_params: ShellParams<'a, CA, WLS>, + mut shell_params: ShellParams<'_, S, D, H, CA>, wrapper_args: Option<&mut WrapperArgs>, ) -> Result> where - CA: 'static + WasmCacheAccess + Sync, + S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, - WLS: WriteLogAndStorage + StorageRead, + CA: 'static + WasmCacheAccess + Sync, { let mut changed_keys = BTreeSet::default(); // Write wrapper tx hash to storage shell_params - .wl_storage + .state + .write_log_mut() .write_tx_hash(tx.header_hash()) .expect("Error while writing tx hash to storage"); @@ -269,6 +276,7 @@ where // Account for gas shell_params .tx_gas_meter + .borrow_mut() .add_wrapper_gas(tx_bytes) .map_err(|err| Error::GasError(err.to_string()))?; @@ -299,22 +307,22 @@ pub fn get_fee_unshielding_transaction( /// - Fee amount overflows /// - Not enough funds are available to pay the entire amount of the fee /// - The accumulated fee amount to be credited to the block proposer overflows -fn charge_fee<'a, D, H, CA, WLS>( +fn charge_fee<'a, S, D, H, CA>( wrapper: &WrapperTx, masp_transaction: Option, - shell_params: &mut ShellParams<'a, CA, WLS>, + shell_params: &mut ShellParams<'a, S, D, H, CA>, changed_keys: &mut BTreeSet, wrapper_args: Option<&mut WrapperArgs>, ) -> Result<()> where - CA: 'static + WasmCacheAccess + Sync, + S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, - WLS: WriteLogAndStorage + StorageRead, + CA: 'static + WasmCacheAccess + Sync, { let ShellParams { tx_gas_meter: _, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, } = shell_params; @@ -323,22 +331,22 @@ where let requires_fee_unshield = if let Some(transaction) = masp_transaction { // The unshielding tx does not charge gas, instantiate a // custom gas meter for this step - let mut tx_gas_meter = - TxGasMeter::new(GasLimit::from( - wl_storage + let tx_gas_meter = + RefCell::new(TxGasMeter::new(GasLimit::from( + state .read::( &namada_parameters::storage::get_fee_unshielding_gas_limit_key( ), ) .expect("Error reading the storage") .expect("Missing fee unshielding gas limit in storage")), - ); + )); // If it fails, do not return early // from this function but try to take the funds from the unshielded // balance match wrapper.generate_fee_unshielding( - get_transfer_hash_from_storage(*wl_storage), + get_transfer_hash_from_storage(*state), Some(TX_TRANSFER_WASM.to_string()), transaction, ) { @@ -346,13 +354,13 @@ where // NOTE: A clean tx write log must be provided to this call // for a correct vp validation. Block write log, instead, // should contain any prior changes (if any) - wl_storage.write_log_mut().precommit_tx(); + state.write_log_mut().precommit_tx(); match apply_wasm_tx( fee_unshielding_tx, &TxIndex::default(), ShellParams { - tx_gas_meter: &mut tx_gas_meter, - wl_storage: *wl_storage, + tx_gas_meter: &tx_gas_meter, + state: *state, vp_wasm_cache, tx_wasm_cache, }, @@ -361,7 +369,7 @@ where // NOTE: do not commit yet cause this could be // exploited to get free unshieldings if !result.is_accepted() { - wl_storage.write_log_mut().drop_tx_keep_precommit(); + state.write_log_mut().drop_tx_keep_precommit(); tracing::error!( "The unshielding tx is invalid, some VPs \ rejected it: {:#?}", @@ -370,7 +378,7 @@ where } } Err(e) => { - wl_storage.write_log_mut().drop_tx_keep_precommit(); + state.write_log_mut().drop_tx_keep_precommit(); tracing::error!( "The unshielding tx is invalid, wasm run failed: \ {}", @@ -392,14 +400,14 @@ where Some(WrapperArgs { block_proposer, is_committed_fee_unshield: _, - }) => transfer_fee(*wl_storage, block_proposer, wrapper)?, - None => check_fees(*wl_storage, wrapper)?, + }) => transfer_fee(*state, block_proposer, wrapper)?, + None => check_fees(*state, wrapper)?, } - changed_keys.extend(wl_storage.write_log_mut().get_keys_with_precommit()); + changed_keys.extend(state.write_log_mut().get_keys_with_precommit()); // Commit tx write log even in case of subsequent errors - wl_storage.write_log_mut().commit_tx(); + state.write_log_mut().commit_tx(); // Update the flag only after the fee payment has been committed if let Some(args) = wrapper_args { args.is_committed_fee_unshield = requires_fee_unshield; @@ -410,16 +418,16 @@ where /// Perform the actual transfer of fess from the fee payer to the block /// proposer. -pub fn transfer_fee( - wl_storage: &mut WLS, +pub fn transfer_fee( + state: &mut S, block_proposer: &Address, wrapper: &WrapperTx, ) -> Result<()> where - WLS: WriteLogAndStorage + StorageRead, + S: State + StorageRead + StorageWrite, { let balance = crate::token::read_balance( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), ) @@ -427,15 +435,12 @@ where match wrapper.get_tx_fee() { Ok(fees) => { - let fees = crate::token::denom_to_amount( - fees, - &wrapper.fee.token, - wl_storage, - ) - .map_err(|e| Error::FeeError(e.to_string()))?; + let fees = + crate::token::denom_to_amount(fees, &wrapper.fee.token, state) + .map_err(|e| Error::FeeError(e.to_string()))?; if balance.checked_sub(fees).is_some() { token_transfer( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, @@ -454,7 +459,7 @@ where shouldn't happen." ); token_transfer( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), block_proposer, @@ -489,17 +494,17 @@ where /// `crate::token::transfer` this function updates the tx write log and /// not the block write log. fn token_transfer( - wl_storage: &mut WLS, + state: &mut WLS, token: &Address, src: &Address, dest: &Address, amount: Amount, ) -> Result<()> where - WLS: WriteLogAndStorage + StorageRead, + WLS: State + StorageRead, { let src_key = crate::token::storage_key::balance_key(token, src); - let src_balance = crate::token::read_balance(wl_storage, token, src) + let src_balance = crate::token::read_balance(state, token, src) .expect("Token balance read in protocol must not fail"); match src_balance.checked_sub(amount) { Some(new_src_balance) => { @@ -507,16 +512,15 @@ where return Ok(()); } let dest_key = crate::token::storage_key::balance_key(token, dest); - let dest_balance = - crate::token::read_balance(wl_storage, token, dest) - .expect("Token balance read in protocol must not fail"); + let dest_balance = crate::token::read_balance(state, token, dest) + .expect("Token balance read in protocol must not fail"); match dest_balance.checked_add(amount) { Some(new_dest_balance) => { - wl_storage + state .write_log_mut() .write(&src_key, new_src_balance.serialize_to_vec()) .map_err(|e| Error::FeeError(e.to_string()))?; - match wl_storage + match state .write_log_mut() .write(&dest_key, new_dest_balance.serialize_to_vec()) { @@ -535,12 +539,12 @@ where } /// Check if the fee payer has enough transparent balance to pay fees -pub fn check_fees(wl_storage: &WLS, wrapper: &WrapperTx) -> Result<()> +pub fn check_fees(state: &S, wrapper: &WrapperTx) -> Result<()> where - WLS: WriteLogAndStorage + StorageRead, + S: State + StorageRead, { let balance = crate::token::read_balance( - wl_storage, + state, &wrapper.fee.token, &wrapper.fee_payer(), ) @@ -550,9 +554,8 @@ where .get_tx_fee() .map_err(|e| Error::FeeError(e.to_string()))?; - let fees = - crate::token::denom_to_amount(fees, &wrapper.fee.token, wl_storage) - .map_err(|e| Error::FeeError(e.to_string()))?; + let fees = crate::token::denom_to_amount(fees, &wrapper.fee.token, state) + .map_err(|e| Error::FeeError(e.to_string()))?; if balance.checked_sub(fees).is_some() { Ok(()) } else { @@ -564,37 +567,30 @@ where /// Apply a transaction going via the wasm environment. Gas will be metered and /// validity predicates will be triggered in the normal way. -pub fn apply_wasm_tx<'a, D, H, CA, WLS>( +pub fn apply_wasm_tx<'a, S, D, H, CA>( tx: Tx, tx_index: &TxIndex, - shell_params: ShellParams<'a, CA, WLS>, + shell_params: ShellParams<'a, S, D, H, CA>, ) -> Result where - CA: 'static + WasmCacheAccess + Sync, + S: State + Sync, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, - WLS: WriteLogAndStorage + StorageRead, + CA: 'static + WasmCacheAccess + Sync, { let ShellParams { tx_gas_meter, - wl_storage, + state, vp_wasm_cache, tx_wasm_cache, } = shell_params; - let (tx_gas_meter, storage, write_log, vp_wasm_cache, tx_wasm_cache) = { - let (write_log, storage) = wl_storage.split_borrow(); - ( - tx_gas_meter, - storage, - write_log, - vp_wasm_cache, - tx_wasm_cache, - ) - }; + let (tx_gas_meter, vp_wasm_cache, tx_wasm_cache) = + { (tx_gas_meter, vp_wasm_cache, tx_wasm_cache) }; let tx_hash = tx.raw_header_hash(); - if let Some(true) = write_log.has_replay_protection_entry(&tx_hash) { + if let Some(true) = state.write_log().has_replay_protection_entry(&tx_hash) + { // If the same transaction has already been applied in this block, skip // execution and return return Err(Error::ReplayAttempt(tx_hash)); @@ -603,9 +599,8 @@ where let verifiers = execute_tx( &tx, tx_index, - storage, + state, tx_gas_meter, - write_log, vp_wasm_cache, tx_wasm_cache, )?; @@ -613,17 +608,16 @@ where let vps_result = check_vps(CheckVps { tx: &tx, tx_index, - storage, - tx_gas_meter, - write_log, + state, + tx_gas_meter: &mut tx_gas_meter.borrow_mut(), verifiers_from_tx: &verifiers, vp_wasm_cache, })?; - let gas_used = tx_gas_meter.get_tx_consumed_gas(); - let initialized_accounts = write_log.get_initialized_accounts(); - let changed_keys = write_log.get_keys(); - let ibc_events = write_log.take_ibc_events(); + let gas_used = tx_gas_meter.borrow().get_tx_consumed_gas(); + let initialized_accounts = state.write_log().get_initialized_accounts(); + let changed_keys = state.write_log().get_keys(); + let ibc_events = state.write_log_mut().take_ibc_events(); Ok(TxResult { gas_used, @@ -637,10 +631,7 @@ where /// Returns [`Error::DisallowedTx`] when the given tx is inner (decrypted) tx /// and its code `Hash` is not included in the `tx_allowlist` parameter. -pub fn check_tx_allowed( - tx: &Tx, - wl_storage: &WlStorage, -) -> Result<()> +pub fn check_tx_allowed(tx: &Tx, state: &WlState) -> Result<()> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -650,11 +641,8 @@ where .get_section(tx.code_sechash()) .and_then(|x| Section::code_sec(&x)) { - if crate::parameters::is_tx_allowed( - wl_storage, - &code_sec.code.hash(), - ) - .map_err(Error::StorageError)? + if crate::parameters::is_tx_allowed(state, &code_sec.code.hash()) + .map_err(Error::StorageError)? { return Ok(()); } @@ -673,7 +661,7 @@ where pub(crate) fn apply_protocol_tx( tx: ProtocolTxType, data: Option>, - storage: &mut WlStorage, + state: &mut WlState, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, @@ -702,15 +690,12 @@ where ) => { let ethereum_events::VextDigest { events, .. } = ethereum_events::VextDigest::singleton(ext); - transactions::ethereum_events::apply_derived_tx(storage, events) + transactions::ethereum_events::apply_derived_tx(state, events) .map_err(Error::ProtocolTxError) } EthereumTxData::BridgePoolVext(ext) => { - transactions::bridge_pool_roots::apply_derived_tx( - storage, - ext.into(), - ) - .map_err(Error::ProtocolTxError) + transactions::bridge_pool_roots::apply_derived_tx(state, ext.into()) + .map_err(Error::ProtocolTxError) } EthereumTxData::ValSetUpdateVext(ext) => { // NOTE(feature = "abcipp"): with ABCI++, we can write the @@ -720,7 +705,7 @@ where // to reach a complete proof. let signing_epoch = ext.data.signing_epoch; transactions::validator_set_update::aggregate_votes( - storage, + state, validator_set_update::VextDigest::singleton(ext), signing_epoch, ) @@ -741,23 +726,22 @@ where /// Execute a transaction code. Returns verifiers requested by the transaction. #[allow(clippy::too_many_arguments)] -fn execute_tx( +fn execute_tx( tx: &Tx, tx_index: &TxIndex, - storage: &State, - tx_gas_meter: &mut TxGasMeter, - write_log: &mut WriteLog, + state: &mut S, + tx_gas_meter: &RefCell, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result> where + S: State, D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { wasm::run::tx( - storage, - write_log, + state, tx_gas_meter, tx_index, tx, @@ -772,48 +756,44 @@ where } /// Arguments to [`check_vps`]. -struct CheckVps<'a, D, H, CA> +struct CheckVps<'a, S, CA> where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, + S: State, CA: 'static + WasmCacheAccess + Sync, { tx: &'a Tx, tx_index: &'a TxIndex, - storage: &'a State, + state: &'a S, tx_gas_meter: &'a mut TxGasMeter, - write_log: &'a WriteLog, verifiers_from_tx: &'a BTreeSet
, vp_wasm_cache: &'a mut VpCache, } /// Check the acceptance of a transaction by validity predicates -fn check_vps( +fn check_vps( CheckVps { tx, tx_index, - storage, + state, tx_gas_meter, - write_log, verifiers_from_tx, vp_wasm_cache, - }: CheckVps<'_, D, H, CA>, + }: CheckVps<'_, S, CA>, ) -> Result where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, + S: State + Sync, CA: 'static + WasmCacheAccess + Sync, { - let (verifiers, keys_changed) = - write_log.verifiers_and_changed_keys(verifiers_from_tx); + let (verifiers, keys_changed) = state + .write_log() + .verifiers_and_changed_keys(verifiers_from_tx); let vps_result = execute_vps( verifiers, keys_changed, tx, tx_index, - storage, - write_log, + state, tx_gas_meter, vp_wasm_cache, )?; @@ -828,31 +808,31 @@ where /// Execute verifiers' validity predicates #[allow(clippy::too_many_arguments)] -fn execute_vps( +fn execute_vps( verifiers: BTreeSet
, keys_changed: BTreeSet, tx: &Tx, tx_index: &TxIndex, - storage: &State, - write_log: &WriteLog, + state: &S, tx_gas_meter: &TxGasMeter, vp_wasm_cache: &mut VpCache, ) -> Result where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, + S: State + Sync, CA: 'static + WasmCacheAccess + Sync, { let vps_result = verifiers .par_iter() .try_fold(VpsResult::default, |mut result, addr| { - let mut gas_meter = VpGasMeter::new_from_tx_meter(tx_gas_meter); + let gas_meter = + RefCell::new(VpGasMeter::new_from_tx_meter(tx_gas_meter)); let accept = match &addr { Address::Implicit(_) | Address::Established(_) => { - let (vp_hash, gas) = storage + let (vp_hash, gas) = state .validity_predicate(addr) .map_err(Error::StateError)?; gas_meter + .borrow_mut() .consume(gas) .map_err(|err| Error::GasError(err.to_string()))?; let Some(vp_code_hash) = vp_hash else { @@ -869,9 +849,8 @@ where tx, tx_index, addr, - storage, - write_log, - &mut gas_meter, + state, + &gas_meter, &keys_changed, &verifiers, vp_wasm_cache.clone(), @@ -885,185 +864,94 @@ where }) } Address::Internal(internal_addr) => { + let sentinel = RefCell::new(VpSentinel::default()); let ctx = native_vp::Ctx::new( addr, - storage, - write_log, + state, tx, tx_index, - gas_meter, + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache.clone(), ); - let (accepted, sentinel): (Result, _) = - match internal_addr { - InternalAddress::PoS => { - let pos = PosVP { ctx }; - let verifiers_addr_ref = &verifiers; - let pos_ref = &pos; - // TODO this is temporarily ran in a new thread - // to - // avoid crashing the ledger (required - // `UnwindSafe` - // and `RefUnwindSafe` in - // namada/src/ledger/pos/vp.rs) - let keys_changed_ref = &keys_changed; - let result = pos_ref - .validate_tx( - tx, - keys_changed_ref, - verifiers_addr_ref, - ) - .map_err(Error::PosNativeVpError); - // Take the gas meter and sentinel - // back - // out of the context - gas_meter = pos.ctx.gas_meter.into_inner(); - (result, pos.ctx.sentinel.into_inner()) - } - InternalAddress::Ibc => { - let ibc = Ibc { ctx }; - let result = ibc - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::IbcNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = ibc.ctx.gas_meter.into_inner(); - (result, ibc.ctx.sentinel.into_inner()) - } - InternalAddress::Parameters => { - let parameters = ParametersVp { ctx }; - let result = parameters - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::ParametersNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - parameters.ctx.gas_meter.into_inner(); - (result, parameters.ctx.sentinel.into_inner()) - } - InternalAddress::PosSlashPool => { - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = ctx.gas_meter.into_inner(); - ( - Err(Error::AccessForbidden( - (*internal_addr).clone(), - )), - ctx.sentinel.into_inner(), - ) - } - InternalAddress::Governance => { - let governance = GovernanceVp { ctx }; - let result = governance - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::GovernanceNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - governance.ctx.gas_meter.into_inner(); - (result, governance.ctx.sentinel.into_inner()) - } - InternalAddress::Multitoken => { - let multitoken = MultitokenVp { ctx }; - let result = multitoken - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::MultitokenNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - multitoken.ctx.gas_meter.into_inner(); - (result, multitoken.ctx.sentinel.into_inner()) - } - InternalAddress::EthBridge => { - let bridge = EthBridge { ctx }; - let result = bridge - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::EthBridgeNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = bridge.ctx.gas_meter.into_inner(); - (result, bridge.ctx.sentinel.into_inner()) - } - InternalAddress::EthBridgePool => { - let bridge_pool = BridgePoolVp { ctx }; - let result = bridge_pool - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::BridgePoolNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = - bridge_pool.ctx.gas_meter.into_inner(); - (result, bridge_pool.ctx.sentinel.into_inner()) - } - InternalAddress::Pgf => { - let pgf_vp = PgfVp { ctx }; - let result = pgf_vp - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::PgfNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = pgf_vp.ctx.gas_meter.into_inner(); - (result, pgf_vp.ctx.sentinel.into_inner()) - } - InternalAddress::Nut(_) => { - let non_usable_tokens = NonUsableTokens { ctx }; - let result = non_usable_tokens - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::NutNativeVpError); - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = non_usable_tokens - .ctx - .gas_meter - .into_inner(); - ( - result, - non_usable_tokens.ctx.sentinel.into_inner(), - ) - } - InternalAddress::IbcToken(_) - | InternalAddress::Erc20(_) => { - // The address should be a part of a multitoken - // key - // Take the gas meter and the sentinel - // back - // out of the context - gas_meter = ctx.gas_meter.into_inner(); - ( - Ok(verifiers.contains(&Address::Internal( - InternalAddress::Multitoken, - ))), - ctx.sentinel.into_inner(), - ) - } - InternalAddress::Masp => { - let masp = MaspVp { ctx }; - let result = masp - .validate_tx(tx, &keys_changed, &verifiers) - .map_err(Error::MaspNativeVpError); - // Take the gas meter and the sentinel back out - // of the context - gas_meter = masp.ctx.gas_meter.into_inner(); - (result, masp.ctx.sentinel.into_inner()) - } - }; + let accepted: Result = match internal_addr { + InternalAddress::PoS => { + let pos = PosVP { ctx }; + pos.validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::PosNativeVpError) + } + InternalAddress::Ibc => { + let ibc = Ibc { ctx }; + ibc.validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::IbcNativeVpError) + } + InternalAddress::Parameters => { + let parameters = ParametersVp { ctx }; + parameters + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::ParametersNativeVpError) + } + InternalAddress::PosSlashPool => Err( + Error::AccessForbidden((*internal_addr).clone()), + ), + InternalAddress::Governance => { + let governance = GovernanceVp { ctx }; + governance + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::GovernanceNativeVpError) + } + InternalAddress::Multitoken => { + let multitoken = MultitokenVp { ctx }; + multitoken + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::MultitokenNativeVpError) + } + InternalAddress::EthBridge => { + let bridge = EthBridge { ctx }; + bridge + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::EthBridgeNativeVpError) + } + InternalAddress::EthBridgePool => { + let bridge_pool = BridgePoolVp { ctx }; + bridge_pool + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::BridgePoolNativeVpError) + } + InternalAddress::Pgf => { + let pgf_vp = PgfVp { ctx }; + pgf_vp + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::PgfNativeVpError) + } + InternalAddress::Nut(_) => { + let non_usable_tokens = NonUsableTokens { ctx }; + non_usable_tokens + .validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::NutNativeVpError) + } + InternalAddress::IbcToken(_) + | InternalAddress::Erc20(_) => { + // The address should be a part of a multitoken + // key + Ok(verifiers.contains(&Address::Internal( + InternalAddress::Multitoken, + ))) + } + InternalAddress::Masp => { + let masp = MaspVp { ctx }; + masp.validate_tx(tx, &keys_changed, &verifiers) + .map_err(Error::MaspNativeVpError) + } + }; accepted.map_err(|err| { // No need to check invalid sig because internal vps // don't check the signature - if sentinel.is_out_of_gas() { + if sentinel.borrow().is_out_of_gas() { Error::GasError(err.to_string()) } else { err @@ -1105,7 +993,7 @@ where result .gas_used - .set(gas_meter) + .set(gas_meter.into_inner()) .map_err(|err| Error::GasError(err.to_string()))?; Ok(result) @@ -1175,14 +1063,14 @@ mod tests { fn apply_eth_tx( tx: EthereumTxData, - wl_storage: &mut WlStorage, + state: &mut WlState, ) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { let (data, tx) = tx.serialize(); - let tx_result = apply_protocol_tx(tx, Some(data), wl_storage)?; + let tx_result = apply_protocol_tx(tx, Some(data), state)?; Ok(tx_result) } @@ -1196,7 +1084,7 @@ mod tests { let validator_a_stake = Amount::native_whole(100); let validator_b_stake = Amount::native_whole(100); let total_stake = validator_a_stake + validator_b_stake; - let (mut wl_storage, _) = test_utils::setup_storage_with_validators( + let (mut state, _) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), validator_a_stake), (validator_b, validator_b_stake), @@ -1221,11 +1109,11 @@ mod tests { namada_vote_ext::ethereum_events::SignedVext(signed), ); - apply_eth_tx(tx.clone(), &mut wl_storage)?; - apply_eth_tx(tx, &mut wl_storage)?; + apply_eth_tx(tx.clone(), &mut state)?; + apply_eth_tx(tx, &mut state)?; let eth_msg_keys = vote_tallies::Keys::from(&event); - let seen_by_bytes = wl_storage.read_bytes(ð_msg_keys.seen_by())?; + let seen_by_bytes = state.read_bytes(ð_msg_keys.seen_by())?; let seen_by_bytes = seen_by_bytes.unwrap(); assert_eq!( Votes::try_from_slice(&seen_by_bytes)?, @@ -1234,7 +1122,7 @@ mod tests { // the vote should have only be applied once let voting_power: EpochedVotingPower = - wl_storage.read(ð_msg_keys.voting_power())?.unwrap(); + state.read(ð_msg_keys.voting_power())?.unwrap(); let expected = EpochedVotingPower::from([( 0.into(), FractionalVotingPower::HALF * total_stake, @@ -1254,18 +1142,18 @@ mod tests { let validator_a_stake = Amount::native_whole(100); let validator_b_stake = Amount::native_whole(100); let total_stake = validator_a_stake + validator_b_stake; - let (mut wl_storage, keys) = test_utils::setup_storage_with_validators( + let (mut state, keys) = test_utils::setup_storage_with_validators( HashMap::from_iter(vec![ (validator_a.clone(), validator_a_stake), (validator_b, validator_b_stake), ]), ); - vp::bridge_pool::init_storage(&mut wl_storage); + vp::bridge_pool::init_storage(&mut state); - let root = wl_storage.ethbridge_queries().get_bridge_pool_root(); - let nonce = wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + let root = state.ethbridge_queries().get_bridge_pool_root(); + let nonce = state.ethbridge_queries().get_bridge_pool_nonce(); test_utils::commit_bridge_pool_root_at_height( - &mut wl_storage, + &mut state, &root, 100.into(), ); @@ -1281,22 +1169,21 @@ mod tests { } .sign(&signing_key); let tx = EthereumTxData::BridgePoolVext(vext); - apply_eth_tx(tx.clone(), &mut wl_storage)?; - apply_eth_tx(tx, &mut wl_storage)?; + apply_eth_tx(tx.clone(), &mut state)?; + apply_eth_tx(tx, &mut state)?; let bp_root_keys = vote_tallies::Keys::from(( &vote_tallies::BridgePoolRoot(EthereumProof::new((root, nonce))), 100.into(), )); - let root_seen_by_bytes = - wl_storage.read_bytes(&bp_root_keys.seen_by())?; + let root_seen_by_bytes = state.read_bytes(&bp_root_keys.seen_by())?; assert_eq!( Votes::try_from_slice(root_seen_by_bytes.as_ref().unwrap())?, Votes::from([(validator_a, BlockHeight(100))]) ); // the vote should have only be applied once let voting_power: EpochedVotingPower = - wl_storage.read(&bp_root_keys.voting_power())?.unwrap(); + state.read(&bp_root_keys.voting_power())?.unwrap(); let expected = EpochedVotingPower::from([( 0.into(), FractionalVotingPower::HALF * total_stake, @@ -1308,7 +1195,7 @@ mod tests { #[test] fn test_apply_wasm_tx_allowlist() { - let (mut wl_storage, _validators) = test_utils::setup_default_storage(); + let (mut state, _validators) = test_utils::setup_default_storage(); let mut tx = Tx::new(ChainId::default(), None); tx.update_header(TxType::Decrypted(DecryptedTx::Decrypted)); @@ -1321,13 +1208,12 @@ mod tests { { let allowlist = vec![format!("{}-bad", tx_hash)]; crate::parameters::update_tx_allowlist_parameter( - &mut wl_storage, - allowlist, + &mut state, allowlist, ) .unwrap(); - wl_storage.commit_tx(); + state.commit_tx(); - let result = check_tx_allowed(&tx, &wl_storage); + let result = check_tx_allowed(&tx, &state); assert_matches!(result.unwrap_err(), Error::DisallowedTx); } @@ -1335,13 +1221,12 @@ mod tests { { let allowlist = vec![tx_hash.to_string()]; crate::parameters::update_tx_allowlist_parameter( - &mut wl_storage, - allowlist, + &mut state, allowlist, ) .unwrap(); - wl_storage.commit_tx(); + state.commit_tx(); - let result = check_tx_allowed(&tx, &wl_storage); + let result = check_tx_allowed(&tx, &state); if let Err(result) = result { assert!(!matches!(result, Error::DisallowedTx)); } diff --git a/crates/namada/src/ledger/storage/mod.rs b/crates/namada/src/ledger/storage/mod.rs index d78fd79ec8..dcae9a776d 100644 --- a/crates/namada/src/ledger/storage/mod.rs +++ b/crates/namada/src/ledger/storage/mod.rs @@ -1,3 +1,3 @@ //! Ledger's state storage with key-value backed store and a merkle tree -pub use namada_state::{write_log, PrefixIter, WlStorage, *}; +pub use namada_state::{write_log, PrefixIter, *}; diff --git a/crates/namada/src/ledger/vp_host_fns.rs b/crates/namada/src/ledger/vp_host_fns.rs index 716a49a66a..36421497ec 100644 --- a/crates/namada/src/ledger/vp_host_fns.rs +++ b/crates/namada/src/ledger/vp_host_fns.rs @@ -1,6 +1,8 @@ //! Host functions for VPs used for both native and WASM VPs. +use std::fmt::Debug; use std::num::TryFromIntError; +use std::ops::DerefMut; use namada_core::address::{Address, ESTABLISHED_ADDRESS_BYTES_LEN}; use namada_core::hash::{Hash, HASH_LENGTH}; @@ -11,7 +13,7 @@ use namada_core::storage::{ use namada_core::validity_predicate::VpSentinel; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; use namada_state::write_log::WriteLog; -use namada_state::{write_log, State, StorageHasher}; +use namada_state::{write_log, DBIter, StateRead, DB}; use namada_tx::{Section, Tx}; use thiserror::Error; @@ -50,9 +52,9 @@ pub type EnvResult = std::result::Result; /// Add a gas cost incured in a validity predicate pub fn add_gas( - gas_meter: &mut VpGasMeter, + gas_meter: &mut impl DerefMut, used_gas: u64, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult<()> { gas_meter.consume(used_gas).map_err(|err| { sentinel.set_out_of_gas(); @@ -63,18 +65,16 @@ pub fn add_gas( /// Storage read prior state (before tx execution). It will try to read from the /// storage. -pub fn read_pre( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn read_pre( + gas_meter: &mut impl DerefMut, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult>> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (log_val, gas) = write_log.read_pre(key); + let (log_val, gas) = state.write_log().read_pre(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { @@ -96,7 +96,7 @@ where None => { // When not found in write log, try to read from the storage let (value, gas) = - storage.read(key).map_err(RuntimeError::StorageError)?; + state.db_read(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(value) } @@ -105,19 +105,17 @@ where /// Storage read posterior state (after tx execution). It will try to read from /// the write log first and if no entry found then from the storage. -pub fn read_post( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn read_post( + gas_meter: &mut impl DerefMut, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult>> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = write_log.read(key); + let (log_val, gas) = state.write_log().read(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { @@ -139,7 +137,7 @@ where None => { // When not found in write log, try to read from the storage let (value, gas) = - storage.read(key).map_err(RuntimeError::StorageError)?; + state.db_read(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(value) } @@ -148,14 +146,17 @@ where /// Storage read temporary state (after tx execution). It will try to read from /// only the write log. -pub fn read_temp( - gas_meter: &mut VpGasMeter, - write_log: &WriteLog, +pub fn read_temp( + gas_meter: &mut impl DerefMut, + state: &S, key: &Key, - sentinel: &mut VpSentinel, -) -> EnvResult>> { + sentinel: &mut impl DerefMut, +) -> EnvResult>> +where + S: StateRead + Debug, +{ // Try to read from the write log first - let (log_val, gas) = write_log.read(key); + let (log_val, gas) = state.write_log().read(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(write_log::StorageModification::Temp { ref value }) => { @@ -168,19 +169,17 @@ pub fn read_temp( /// Storage `has_key` in prior state (before tx execution). It will try to read /// from the storage. -pub fn has_key_pre( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn has_key_pre( + gas_meter: &mut impl DerefMut, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = write_log.read_pre(key); + let (log_val, gas) = state.write_log().read_pre(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(&write_log::StorageModification::Write { .. }) => Ok(true), @@ -193,7 +192,7 @@ where None => { // When not found in write log, try to check the storage let (present, gas) = - storage.has_key(key).map_err(RuntimeError::StorageError)?; + state.db_has_key(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(present) } @@ -202,19 +201,17 @@ where /// Storage `has_key` in posterior state (after tx execution). It will try to /// check the write log first and if no entry found then the storage. -pub fn has_key_post( - gas_meter: &mut VpGasMeter, - storage: &State, - write_log: &WriteLog, +pub fn has_key_post( + gas_meter: &mut impl DerefMut, + state: &S, key: &Key, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { // Try to read from the write log first - let (log_val, gas) = write_log.read(key); + let (log_val, gas) = state.write_log().read(key); add_gas(gas_meter, gas, sentinel)?; match log_val { Some(&write_log::StorageModification::Write { .. }) => Ok(true), @@ -227,7 +224,7 @@ where None => { // When not found in write log, try to check the storage let (present, gas) = - storage.has_key(key).map_err(RuntimeError::StorageError)?; + state.db_has_key(key).map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(present) } @@ -235,49 +232,45 @@ where } /// Getting the chain ID. -pub fn get_chain_id( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_chain_id( + gas_meter: &mut impl DerefMut, + state: &S, + sentinel: &mut impl DerefMut, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (chain_id, gas) = storage.get_chain_id(); + let (chain_id, gas) = state.in_mem().get_chain_id(); add_gas(gas_meter, gas, sentinel)?; Ok(chain_id) } /// Getting the block height. The height is that of the block to which the /// current transaction is being applied. -pub fn get_block_height( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_block_height( + gas_meter: &mut impl DerefMut, + state: &S, + sentinel: &mut impl DerefMut, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (height, gas) = storage.get_block_height(); + let (height, gas) = state.in_mem().get_block_height(); add_gas(gas_meter, gas, sentinel)?; Ok(height) } /// Getting the block header. -pub fn get_block_header( - gas_meter: &mut VpGasMeter, - storage: &State, +pub fn get_block_header( + gas_meter: &mut impl DerefMut, + state: &S, height: BlockHeight, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (header, gas) = storage - .get_block_header(Some(height)) + let (header, gas) = StateRead::get_block_header(state, Some(height)) .map_err(RuntimeError::StorageError)?; add_gas(gas_meter, gas, sentinel)?; Ok(header) @@ -285,16 +278,15 @@ where /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. -pub fn get_block_hash( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_block_hash( + gas_meter: &mut impl DerefMut, + state: &S, + sentinel: &mut impl DerefMut, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (hash, gas) = storage.get_block_hash(); + let (hash, gas) = state.in_mem().get_block_hash(); add_gas(gas_meter, gas, sentinel)?; Ok(hash) } @@ -302,9 +294,9 @@ where /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. pub fn get_tx_code_hash( - gas_meter: &mut VpGasMeter, + gas_meter: &mut impl DerefMut, tx: &Tx, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult> { add_gas( gas_meter, @@ -320,16 +312,15 @@ pub fn get_tx_code_hash( /// Getting the block epoch. The epoch is that of the block to which the /// current transaction is being applied. -pub fn get_block_epoch( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_block_epoch( + gas_meter: &mut impl DerefMut, + state: &S, + sentinel: &mut impl DerefMut, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { - let (epoch, gas) = storage.get_current_epoch(); + let (epoch, gas) = state.in_mem().get_current_epoch(); add_gas(gas_meter, gas, sentinel)?; Ok(epoch) } @@ -337,9 +328,9 @@ where /// Getting the block epoch. The epoch is that of the block to which the /// current transaction is being applied. pub fn get_tx_index( - gas_meter: &mut VpGasMeter, + gas_meter: &mut impl DerefMut, tx_index: &TxIndex, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult { add_gas( gas_meter, @@ -350,50 +341,52 @@ pub fn get_tx_index( } /// Getting the native token's address. -pub fn get_native_token( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_native_token( + gas_meter: &mut impl DerefMut, + state: &S, + sentinel: &mut impl DerefMut, ) -> EnvResult
where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { add_gas( gas_meter, ESTABLISHED_ADDRESS_BYTES_LEN as u64 * MEMORY_ACCESS_GAS_PER_BYTE, sentinel, )?; - Ok(storage.native_token.clone()) + Ok(state.in_mem().native_token.clone()) } /// Given the information about predecessor block epochs -pub fn get_pred_epochs( - gas_meter: &mut VpGasMeter, - storage: &State, - sentinel: &mut VpSentinel, +pub fn get_pred_epochs( + gas_meter: &mut impl DerefMut, + state: &S, + sentinel: &mut impl DerefMut, ) -> EnvResult where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead + Debug, { add_gas( gas_meter, - storage.block.pred_epochs.first_block_heights.len() as u64 + state.in_mem().block.pred_epochs.first_block_heights.len() as u64 * 8 * MEMORY_ACCESS_GAS_PER_BYTE, sentinel, )?; - Ok(storage.block.pred_epochs.clone()) + Ok(state.in_mem().block.pred_epochs.clone()) } /// Getting the IBC event. -pub fn get_ibc_events( - _gas_meter: &mut VpGasMeter, - write_log: &WriteLog, +pub fn get_ibc_events( + _gas_meter: &mut impl DerefMut, + state: &S, event_type: String, -) -> EnvResult> { - Ok(write_log +) -> EnvResult> +where + S: StateRead + Debug, +{ + Ok(state + .write_log() .get_ibc_events() .iter() .filter(|event| event.event_type == event_type) @@ -403,46 +396,49 @@ pub fn get_ibc_events( /// Storage prefix iterator for prior state (before tx execution), ordered by /// storage keys. It will try to get an iterator from the storage. -pub fn iter_prefix_pre<'a, DB, H>( - gas_meter: &mut VpGasMeter, +pub fn iter_prefix_pre<'a, D>( + gas_meter: &mut impl DerefMut, + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. write_log: &'a WriteLog, - storage: &'a State, + db: &'a D, prefix: &Key, - sentinel: &mut VpSentinel, -) -> EnvResult> + sentinel: &mut impl DerefMut, +) -> EnvResult> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: DB + for<'iter> DBIter<'iter>, { - let (iter, gas) = namada_state::iter_prefix_pre(write_log, storage, prefix); + let (iter, gas) = namada_state::iter_prefix_pre(write_log, db, prefix); add_gas(gas_meter, gas, sentinel)?; Ok(iter) } /// Storage prefix iterator for posterior state (after tx execution), ordered by /// storage keys. It will try to get an iterator from the storage. -pub fn iter_prefix_post<'a, DB, H>( - gas_meter: &mut VpGasMeter, +pub fn iter_prefix_post<'a, D>( + gas_meter: &mut impl DerefMut, + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. write_log: &'a WriteLog, - storage: &'a State, + db: &'a D, prefix: &Key, - sentinel: &mut VpSentinel, -) -> EnvResult> + sentinel: &mut impl DerefMut, +) -> EnvResult> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: DB + for<'iter> DBIter<'iter>, { - let (iter, gas) = - namada_state::iter_prefix_post(write_log, storage, prefix); + let (iter, gas) = namada_state::iter_prefix_post(write_log, db, prefix); add_gas(gas_meter, gas, sentinel)?; Ok(iter) } /// Get the next item in a storage prefix iterator (pre or post). pub fn iter_next( - gas_meter: &mut VpGasMeter, + gas_meter: &mut impl DerefMut, iter: &mut namada_state::PrefixIter, - sentinel: &mut VpSentinel, + sentinel: &mut impl DerefMut, ) -> EnvResult)>> where DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, diff --git a/crates/namada/src/vm/host_env.rs b/crates/namada/src/vm/host_env.rs index 11acd72987..a0652a9f97 100644 --- a/crates/namada/src/vm/host_env.rs +++ b/crates/namada/src/vm/host_env.rs @@ -1,7 +1,9 @@ //! Virtual machine's host environment exposes functions that may be called from //! within a virtual machine. +use std::cell::{RefCell, RefMut}; use std::collections::BTreeSet; use std::convert::TryInto; +use std::fmt::Debug; use std::num::TryFromIntError; use borsh::BorshDeserialize; @@ -9,14 +11,17 @@ use borsh_ext::BorshSerializeExt; use masp_primitives::transaction::Transaction; use namada_core::address::ESTABLISHED_ADDRESS_BYTES_LEN; use namada_core::internal::KeyVal; -use namada_core::storage::{Epochs, TX_INDEX_LENGTH}; +use namada_core::storage::TX_INDEX_LENGTH; use namada_core::validity_predicate::VpSentinel; use namada_gas::{ self as gas, GasMetering, TxGasMeter, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE, }; use namada_state::write_log::{self, WriteLog}; -use namada_state::{self, ResultExt, State, StorageError, StorageHasher}; +use namada_state::{ + DBIter, InMemory, State, StateRead, StorageError, StorageHasher, + StorageRead, StorageWrite, TxHostEnvState, VpHostEnvState, DB, +}; use namada_token::storage_key::is_any_token_parameter_key; use namada_tx::data::TxSentinel; use namada_tx::Tx; @@ -32,10 +37,9 @@ use crate::hash::Hash; use crate::ibc::IbcEvent; use crate::internal::HostEnvResult; use crate::ledger::vp_host_fns; -use crate::storage::{BlockHeight, Epoch, Key, TxIndex}; +use crate::storage::{BlockHeight, Key, TxIndex}; use crate::token::storage_key::{ - balance_key, is_any_minted_balance_key, is_any_minter_key, - is_any_token_balance_key, minted_balance_key, minter_key, + is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, }; use crate::vm::memory::VmMemory; use crate::vm::prefix_iter::{PrefixIteratorId, PrefixIterators}; @@ -86,37 +90,39 @@ pub enum TxRuntimeError { pub type TxResult = std::result::Result; /// A transaction's host environment -pub struct TxVmEnv<'a, MEM, DB, H, CA> +pub struct TxVmEnv<'a, MEM, D, H, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { /// The VM memory for bi-directional data passing pub memory: MEM, /// The tx context contains references to host structures. - pub ctx: TxCtx<'a, DB, H, CA>, + pub ctx: TxCtx<'a, D, H, CA>, } /// A transaction's host context #[derive(Debug)] -pub struct TxCtx<'a, DB, H, CA> +pub struct TxCtx<'a, D, H, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { - /// Read-only access to the storage. - pub storage: HostRef<'a, &'a State>, - /// Read/write access to the write log. + /// Mutable access to write log. pub write_log: MutHostRef<'a, &'a WriteLog>, + /// Read-only access to in-memory state. + pub in_mem: HostRef<'a, &'a InMemory>, + /// Read-only access to DB. + pub db: HostRef<'a, &'a D>, /// Storage prefix iterators. - pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, DB>>, - /// Transaction gas meter. - pub gas_meter: MutHostRef<'a, &'a TxGasMeter>, - /// Transaction sentinel - pub sentinel: MutHostRef<'a, &'a TxSentinel>, + pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, D>>, + /// Transaction gas meter. In `RefCell` to charge gas in read-only fns. + pub gas_meter: HostRef<'a, &'a RefCell>, + /// Transaction sentinel. In `RefCell` to charge gas in read-only fns. + pub sentinel: HostRef<'a, &'a RefCell>, /// The transaction code is used for signature verification pub tx: HostRef<'a, &'a Tx>, /// The transaction index is used to identify a shielded transaction's @@ -138,10 +144,10 @@ where pub cache_access: std::marker::PhantomData, } -impl<'a, MEM, DB, H, CA> TxVmEnv<'a, MEM, DB, H, CA> +impl<'a, MEM, D, H, CA> TxVmEnv<'a, MEM, D, H, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -155,11 +161,12 @@ where #[allow(clippy::too_many_arguments)] pub fn new( memory: MEM, - storage: &State, write_log: &mut WriteLog, - iterators: &mut PrefixIterators<'a, DB>, - gas_meter: &mut TxGasMeter, - sentinel: &mut TxSentinel, + in_mem: &InMemory, + db: &D, + iterators: &mut PrefixIterators<'a, D>, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, verifiers: &mut BTreeSet
, @@ -167,11 +174,12 @@ where #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, ) -> Self { - let storage = unsafe { HostRef::new(storage) }; let write_log = unsafe { MutHostRef::new(write_log) }; + let in_mem = unsafe { HostRef::new(in_mem) }; + let db = unsafe { HostRef::new(db) }; let iterators = unsafe { MutHostRef::new(iterators) }; - let gas_meter = unsafe { MutHostRef::new(gas_meter) }; - let sentinel = unsafe { MutHostRef::new(sentinel) }; + let gas_meter = unsafe { HostRef::new(gas_meter) }; + let sentinel = unsafe { HostRef::new(sentinel) }; let tx = unsafe { HostRef::new(tx) }; let tx_index = unsafe { HostRef::new(tx_index) }; let verifiers = unsafe { MutHostRef::new(verifiers) }; @@ -181,8 +189,9 @@ where #[cfg(feature = "wasm-runtime")] let tx_wasm_cache = unsafe { MutHostRef::new(tx_wasm_cache) }; let ctx = TxCtx { - storage, write_log, + db, + in_mem, iterators, gas_meter, sentinel, @@ -200,12 +209,17 @@ where Self { memory, ctx } } + + /// Access state from within a tx + pub fn state(&self) -> TxHostEnvState { + self.ctx.state() + } } -impl Clone for TxVmEnv<'_, MEM, DB, H, CA> +impl Clone for TxVmEnv<'_, MEM, D, H, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -217,16 +231,49 @@ where } } -impl<'a, DB, H, CA> Clone for TxCtx<'a, DB, H, CA> +impl<'a, D, H, CA> TxCtx<'a, D, H, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + /// Access state from within a tx + pub fn state(&self) -> TxHostEnvState { + let write_log = unsafe { self.write_log.get() }; + let db = unsafe { self.db.get() }; + let in_mem = unsafe { self.in_mem.get() }; + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + TxHostEnvState { + write_log, + db, + in_mem, + gas_meter, + sentinel, + } + } + + /// Mutably borrow gas meter and sentinel + pub fn borrow_mut_gas_meter( + &self, + ) -> (RefMut, RefMut) { + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + (gas_meter.borrow_mut(), sentinel.borrow_mut()) + } +} + +impl<'a, D, H, CA> Clone for TxCtx<'a, D, H, CA> +where + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { fn clone(&self) -> Self { Self { - storage: self.storage.clone(), write_log: self.write_log.clone(), + db: self.db.clone(), + in_mem: self.in_mem.clone(), iterators: self.iterators.clone(), gas_meter: self.gas_meter.clone(), sentinel: self.sentinel.clone(), @@ -245,10 +292,10 @@ where } /// A validity predicate's host environment -pub struct VpVmEnv<'a, MEM, DB, H, EVAL, CA> +pub struct VpVmEnv<'a, MEM, D, H, EVAL, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -256,29 +303,31 @@ where /// The VM memory for bi-directional data passing pub memory: MEM, /// The VP context contains references to host structures. - pub ctx: VpCtx<'a, DB, H, EVAL, CA>, + pub ctx: VpCtx<'a, D, H, EVAL, CA>, } /// A validity predicate's host context -pub struct VpCtx<'a, DB, H, EVAL, CA> +pub struct VpCtx<'a, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { /// The address of the account that owns the VP pub address: HostRef<'a, &'a Address>, - /// Read-only access to the storage. - pub storage: HostRef<'a, &'a State>, - /// Read-only access to the write log. + /// Read-only access to write log. pub write_log: HostRef<'a, &'a WriteLog>, + /// Read-only access to in-memory state. + pub in_mem: HostRef<'a, &'a InMemory>, + /// Read-only access to DB. + pub db: HostRef<'a, &'a D>, /// Storage prefix iterators. - pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, DB>>, - /// VP gas meter. - pub gas_meter: MutHostRef<'a, &'a VpGasMeter>, - /// Errors sentinel - pub sentinel: MutHostRef<'a, &'a VpSentinel>, + pub iterators: MutHostRef<'a, &'a PrefixIterators<'a, D>>, + /// VP gas meter. In `RefCell` to charge gas in read-only fns. + pub gas_meter: HostRef<'a, &'a RefCell>, + /// Errors sentinel. In `RefCell` to charge gas in read-only fns. + pub sentinel: HostRef<'a, &'a RefCell>, /// The transaction code is used for signature verification pub tx: HostRef<'a, &'a Tx>, /// The transaction index is used to identify a shielded transaction's @@ -303,8 +352,8 @@ where /// A Validity predicate runner for calls from the [`vp_eval`] function. pub trait VpEvaluator { - /// Storage DB type - type Db: namada_state::DB + for<'iter> namada_state::DBIter<'iter>; + /// DB type + type Db: DB + for<'iter> DBIter<'iter>; /// Storage hasher type type H: StorageHasher; /// Recursive VP evaluator type @@ -325,11 +374,11 @@ pub trait VpEvaluator { ) -> HostEnvResult; } -impl<'a, MEM, DB, H, EVAL, CA> VpVmEnv<'a, MEM, DB, H, EVAL, CA> +impl<'a, MEM, D, H, EVAL, CA> VpVmEnv<'a, MEM, D, H, EVAL, CA> where - MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, + MEM: VmMemory, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -344,13 +393,14 @@ where pub fn new( memory: MEM, address: &Address, - storage: &State, write_log: &WriteLog, - gas_meter: &mut VpGasMeter, - sentinel: &mut VpSentinel, + in_mem: &InMemory, + db: &D, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, - iterators: &mut PrefixIterators<'a, DB>, + iterators: &mut PrefixIterators<'a, D>, verifiers: &BTreeSet
, result_buffer: &mut Option>, keys_changed: &BTreeSet, @@ -359,8 +409,9 @@ where ) -> Self { let ctx = VpCtx::new( address, - storage, write_log, + in_mem, + db, gas_meter, sentinel, tx, @@ -376,12 +427,17 @@ where Self { memory, ctx } } + + /// Access state from within a VP + pub fn state(&self) -> VpHostEnvState { + self.ctx.state() + } } -impl Clone for VpVmEnv<'_, MEM, DB, H, EVAL, CA> +impl<'a, MEM, D, H, EVAL, CA> Clone for VpVmEnv<'a, MEM, D, H, EVAL, CA> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -394,9 +450,9 @@ where } } -impl<'a, DB, H, EVAL, CA> VpCtx<'a, DB, H, EVAL, CA> +impl<'a, D, H, EVAL, CA> VpCtx<'a, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -411,13 +467,14 @@ where #[allow(clippy::too_many_arguments)] pub fn new( address: &Address, - storage: &State, write_log: &WriteLog, - gas_meter: &mut VpGasMeter, - sentinel: &mut VpSentinel, + in_mem: &InMemory, + db: &D, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, - iterators: &mut PrefixIterators<'a, DB>, + iterators: &mut PrefixIterators<'a, D>, verifiers: &BTreeSet
, result_buffer: &mut Option>, keys_changed: &BTreeSet, @@ -425,13 +482,14 @@ where #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, ) -> Self { let address = unsafe { HostRef::new(address) }; - let storage = unsafe { HostRef::new(storage) }; let write_log = unsafe { HostRef::new(write_log) }; + let db = unsafe { HostRef::new(db) }; + let in_mem = unsafe { HostRef::new(in_mem) }; let tx = unsafe { HostRef::new(tx) }; let tx_index = unsafe { HostRef::new(tx_index) }; let iterators = unsafe { MutHostRef::new(iterators) }; - let gas_meter = unsafe { MutHostRef::new(gas_meter) }; - let sentinel = unsafe { MutHostRef::new(sentinel) }; + let gas_meter = unsafe { HostRef::new(gas_meter) }; + let sentinel = unsafe { HostRef::new(sentinel) }; let verifiers = unsafe { HostRef::new(verifiers) }; let result_buffer = unsafe { MutHostRef::new(result_buffer) }; let keys_changed = unsafe { HostRef::new(keys_changed) }; @@ -440,8 +498,9 @@ where let vp_wasm_cache = unsafe { MutHostRef::new(vp_wasm_cache) }; Self { address, - storage, write_log, + db, + in_mem, iterators, gas_meter, sentinel, @@ -457,11 +516,36 @@ where cache_access: std::marker::PhantomData, } } + + /// Access state from within a VP + pub fn state(&self) -> VpHostEnvState { + let write_log = unsafe { self.write_log.get() }; + let db = unsafe { self.db.get() }; + let in_mem = unsafe { self.in_mem.get() }; + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + VpHostEnvState { + write_log, + db, + in_mem, + gas_meter, + sentinel, + } + } + + /// Mutably borrow gas meter and sentinel + pub fn borrow_mut_gas_meter( + &self, + ) -> (RefMut, RefMut) { + let gas_meter = unsafe { self.gas_meter.get() }; + let sentinel = unsafe { self.sentinel.get() }; + (gas_meter.borrow_mut(), sentinel.borrow_mut()) + } } -impl<'a, DB, H, EVAL, CA> Clone for VpCtx<'a, DB, H, EVAL, CA> +impl<'a, D, H, EVAL, CA> Clone for VpCtx<'a, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -469,8 +553,9 @@ where fn clone(&self) -> Self { Self { address: self.address.clone(), - storage: self.storage.clone(), write_log: self.write_log.clone(), + db: self.db.clone(), + in_mem: self.in_mem.clone(), iterators: self.iterators.clone(), gas_meter: self.gas_meter.clone(), sentinel: self.sentinel.clone(), @@ -489,20 +574,20 @@ where } /// Add a gas cost incured in a transaction -pub fn tx_charge_gas( - env: &TxVmEnv, +pub fn tx_charge_gas( + env: &TxVmEnv, used_gas: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); // if we run out of gas, we need to stop the execution gas_meter.consume(used_gas).map_err(|err| { - let sentinel = unsafe { env.ctx.sentinel.get() }; sentinel.set_out_of_gas(); tracing::info!( "Stopping transaction execution because of gas error: {}", @@ -514,72 +599,49 @@ where } /// Called from VP wasm to request to use the given gas amount -pub fn vp_charge_gas( - env: &VpVmEnv, +pub fn vp_charge_gas( + env: &VpVmEnv, used_gas: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, used_gas, sentinel) + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, used_gas, &mut sentinel) } /// Storage `has_key` function exposed to the wasm VM Tx environment. It will /// try to check the write log first and if no entry found then the storage. -pub fn tx_has_key( - env: &TxVmEnv, +pub fn tx_has_key( + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_has_key {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; // try to read from the write log first - let write_log = unsafe { env.ctx.write_log.get() }; - let (log_val, gas) = write_log.read(&key); - tx_charge_gas(env, gas)?; - Ok(match log_val { - Some(&write_log::StorageModification::Write { .. }) => { - HostEnvResult::Success.to_i64() - } - Some(&write_log::StorageModification::Delete) => { - // the given key has been deleted - HostEnvResult::Fail.to_i64() - } - Some(&write_log::StorageModification::InitAccount { .. }) => { - HostEnvResult::Success.to_i64() - } - Some(&write_log::StorageModification::Temp { .. }) => { - HostEnvResult::Success.to_i64() - } - None => { - // when not found in write log, try to check the storage - let storage = unsafe { env.ctx.storage.get() }; - let (present, gas) = - storage.has_key(&key).map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; - HostEnvResult::from(present).to_i64() - } - }) + let state = env.state(); + let present = state.has_key(&key)?; + Ok(HostEnvResult::from(present).to_i64()) } /// Storage read function exposed to the wasm VM Tx environment. It will try to @@ -587,86 +649,42 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn tx_read( - env: &TxVmEnv, +pub fn tx_read( + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_read {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; - // try to read from the write log first - let write_log = unsafe { env.ctx.write_log.get() }; - let (log_val, gas) = write_log.read(&key); - tx_charge_gas(env, gas)?; - Ok(match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - let len: i64 = value - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value.clone()); - len - } - Some(&write_log::StorageModification::Delete) => { - // fail, given key has been deleted - HostEnvResult::Fail.to_i64() - } - Some(write_log::StorageModification::InitAccount { - ref vp_code_hash, - }) => { - // read the VP of a new account - let len: i64 = vp_code_hash - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(vp_code_hash.to_vec()); - len - } - Some(write_log::StorageModification::Temp { ref value }) => { + let state = env.state(); + let value = state.read_bytes(&key)?; + match value { + Some(value) => { let len: i64 = value .len() .try_into() .map_err(TxRuntimeError::NumConversionError)?; let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value.clone()); - len - } - None => { - // when not found in write log, try to read from the storage - let storage = unsafe { env.ctx.storage.get() }; - let (value, gas) = - storage.read(&key).map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; - match value { - Some(value) => { - let len: i64 = value - .len() - .try_into() - .map_err(TxRuntimeError::NumConversionError)?; - let result_buffer = unsafe { env.ctx.result_buffer.get() }; - result_buffer.replace(value); - len - } - None => HostEnvResult::Fail.to_i64(), - } + result_buffer.replace(value); + Ok(len) } - }) + None => Ok(HostEnvResult::Fail.to_i64()), + } } /// This function is a helper to handle the first step of reading var-len @@ -677,14 +695,15 @@ where /// first step reads the value into a result buffer and returns the size (if /// any) back to the guest, the second step reads the value from cache into a /// pre-allocated buffer with the obtained size. -pub fn tx_result_buffer( - env: &TxVmEnv, +pub fn tx_result_buffer( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let result_buffer = unsafe { env.ctx.result_buffer.get() }; @@ -695,28 +714,29 @@ where .memory .write_bytes(result_ptr, value) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Storage prefix iterator function exposed to the wasm VM Tx environment. /// It will try to get an iterator from the storage and return the corresponding /// ID of the iterator, ordered by storage keys. -pub fn tx_iter_prefix( - env: &TxVmEnv, +pub fn tx_iter_prefix( + env: &TxVmEnv, prefix_ptr: u64, prefix_len: u64, ) -> TxResult where MEM: VmMemory, - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (prefix, gas) = env .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_iter_prefix {}", prefix); @@ -724,10 +744,9 @@ where Key::parse(prefix).map_err(TxRuntimeError::StorageDataError)?; let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let (iter, gas) = - namada_state::iter_prefix_post(write_log, storage, &prefix); - tx_charge_gas(env, gas)?; + let db = unsafe { env.ctx.db.get() }; + let (iter, gas) = namada_state::iter_prefix_post(write_log, db, &prefix); + tx_charge_gas::(env, gas)?; let iterators = unsafe { env.ctx.iterators.get() }; Ok(iterators.insert(iter).id()) @@ -739,27 +758,28 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn tx_iter_next( - env: &TxVmEnv, +pub fn tx_iter_next( + env: &TxVmEnv, iter_id: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { tracing::debug!("tx_iter_next iter_id {}", iter_id,); - let write_log = unsafe { env.ctx.write_log.get() }; + let state = env.state(); let iterators = unsafe { env.ctx.iterators.get() }; let iter_id = PrefixIteratorId::new(iter_id); while let Some((key, val, iter_gas)) = iterators.next(iter_id) { - let (log_val, log_gas) = write_log.read( + let (log_val, log_gas) = state.write_log().read( &Key::parse(key.clone()) .map_err(TxRuntimeError::StorageDataError)?, ); - tx_charge_gas(env, iter_gas + log_gas)?; + tx_charge_gas::(env, iter_gas + log_gas)?; match log_val { Some(write_log::StorageModification::Write { ref value }) => { let key_val = borsh::to_vec(&KeyVal { @@ -775,11 +795,11 @@ where result_buffer.replace(key_val); return Ok(len); } - Some(&write_log::StorageModification::Delete) => { + Some(write_log::StorageModification::Delete) => { // check the next because the key has already deleted continue; } - Some(&write_log::StorageModification::InitAccount { .. }) => { + Some(write_log::StorageModification::InitAccount { .. }) => { // a VP of a new account doesn't need to be iterated continue; } @@ -815,8 +835,8 @@ where /// Storage write function exposed to the wasm VM Tx environment. The given /// key/value will be written to the write log. -pub fn tx_write( - env: &TxVmEnv, +pub fn tx_write( + env: &TxVmEnv, key_ptr: u64, key_len: u64, val_ptr: u64, @@ -824,42 +844,42 @@ pub fn tx_write( ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let (value, gas) = env .memory .read_bytes(val_ptr, val_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_update {}, {:?}", key, value); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; if key.is_validity_predicate().is_some() { - tx_validate_vp_code_hash(env, &value, &None)?; + tx_validate_vp_code_hash::(env, &value, &None)?; } - check_address_existence(env, &key)?; + check_address_existence::(env, &key)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let (gas, _size_diff) = write_log - .write(&key, value) - .map_err(TxRuntimeError::StorageModificationError)?; - tx_charge_gas(env, gas) + let mut state = env.state(); + state + .write_bytes(&key, value) + .map_err(TxRuntimeError::StorageError) } /// Temporary storage write function exposed to the wasm VM Tx environment. The /// given key/value will be written only to the write log. It will be never /// written to the storage. -pub fn tx_write_temp( - env: &TxVmEnv, +pub fn tx_write_temp( + env: &TxVmEnv, key_ptr: u64, key_len: u64, val_ptr: u64, @@ -867,42 +887,45 @@ pub fn tx_write_temp( ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let (value, gas) = env .memory .read_bytes(val_ptr, val_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_write_temp {}, {:?}", key, value); let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; - check_address_existence(env, &key)?; + check_address_existence::(env, &key)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let (gas, _size_diff) = write_log + let mut state = env.state(); + let (gas, _size_diff) = state + .write_log_mut() .write_temp(&key, value) .map_err(TxRuntimeError::StorageModificationError)?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } -fn check_address_existence( - env: &TxVmEnv, +fn check_address_existence( + env: &TxVmEnv, key: &Key, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { // Get the token if the key is a balance or minter key @@ -914,8 +937,7 @@ where is_any_minted_balance_key(key).or_else(|| is_any_minter_key(key)) }; - let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let state = env.state(); for addr in key.find_addresses() { // skip if the address is a token address if Some(&addr) == token { @@ -926,15 +948,15 @@ where continue; } let vp_key = Key::validity_predicate(&addr); - let (vp, gas) = write_log.read(&vp_key); - tx_charge_gas(env, gas)?; + let (vp, gas) = state.write_log().read(&vp_key); + tx_charge_gas::(env, gas)?; // just check the existence because the write log should not have the // delete log of the VP if vp.is_none() { - let (is_present, gas) = storage - .has_key(&vp_key) + let (is_present, gas) = state + .db_has_key(&vp_key) .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; if !is_present { tracing::info!( "Trying to write into storage with a key containing an \ @@ -952,22 +974,23 @@ where /// Storage delete function exposed to the wasm VM Tx environment. The given /// key/value will be written as deleted to the write log. -pub fn tx_delete( - env: &TxVmEnv, +pub fn tx_delete( + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (key, gas) = env .memory .read_string(key_ptr, key_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_delete {}", key); @@ -976,57 +999,57 @@ where return Err(TxRuntimeError::CannotDeleteVp); } - let write_log = unsafe { env.ctx.write_log.get() }; - let (gas, _size_diff) = write_log - .delete(&key) - .map_err(TxRuntimeError::StorageModificationError)?; - tx_charge_gas(env, gas) + let mut state = env.state(); + state.delete(&key).map_err(TxRuntimeError::StorageError) } /// Emitting an IBC event function exposed to the wasm VM Tx environment. /// The given IBC event will be set to the write log. -pub fn tx_emit_ibc_event( - env: &TxVmEnv, +pub fn tx_emit_ibc_event( + env: &TxVmEnv, event_ptr: u64, event_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (event, gas) = env .memory .read_bytes(event_ptr, event_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let event: IbcEvent = BorshDeserialize::try_from_slice(&event) .map_err(TxRuntimeError::EncodingError)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let gas = write_log.emit_ibc_event(event); - tx_charge_gas(env, gas) + let mut state = env.state(); + let gas = state.write_log_mut().emit_ibc_event(event); + tx_charge_gas::(env, gas) } /// Getting an IBC event function exposed to the wasm VM Tx environment. -pub fn tx_get_ibc_events( - env: &TxVmEnv, +pub fn tx_get_ibc_events( + env: &TxVmEnv, event_type_ptr: u64, event_type_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (event_type, gas) = env .memory .read_string(event_type_ptr, event_type_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let events: Vec = write_log + tx_charge_gas::(env, gas)?; + let state = env.state(); + let events: Vec = state + .write_log() .get_ibc_events() .iter() .filter(|event| event.event_type == event_type) @@ -1047,15 +1070,15 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_read_pre( - env: &VpVmEnv, +pub fn vp_read_pre( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1063,17 +1086,15 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; // try to read from the storage let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; + let state = env.state(); let value = - vp_host_fns::read_pre(gas_meter, storage, write_log, &key, sentinel)?; + vp_host_fns::read_pre(&mut gas_meter, &state, &key, &mut sentinel)?; tracing::debug!( "vp_read_pre addr {}, key {}, value {:?}", unsafe { env.ctx.address.get() }, @@ -1100,15 +1121,15 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_read_post( - env: &VpVmEnv, +pub fn vp_read_post( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1116,19 +1137,17 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; tracing::debug!("vp_read_post {}, key {}", key, key_ptr,); // try to read from the write log first let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; + let state = env.state(); let value = - vp_host_fns::read_post(gas_meter, storage, write_log, &key, sentinel)?; + vp_host_fns::read_post(&mut gas_meter, &state, &key, &mut sentinel)?; Ok(match value { Some(value) => { let len: i64 = value @@ -1148,15 +1167,15 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_read_temp( - env: &VpVmEnv, +pub fn vp_read_temp( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1164,17 +1183,17 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; tracing::debug!("vp_read_temp {}, key {}", key, key_ptr); // try to read from the write log let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let value = vp_host_fns::read_temp(gas_meter, write_log, &key, sentinel)?; + let state = env.state(); + let value = + vp_host_fns::read_temp(&mut gas_meter, &state, &key, &mut sentinel)?; Ok(match value { Some(value) => { let len: i64 = value @@ -1197,14 +1216,14 @@ where /// first step reads the value into a result buffer and returns the size (if /// any) back to the guest, the second step reads the value from cache into a /// pre-allocated buffer with the obtained size. -pub fn vp_result_buffer( - env: &VpVmEnv, +pub fn vp_result_buffer( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1216,22 +1235,21 @@ where .memory .write_bytes(result_ptr, value) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel) } /// Storage `has_key` in prior state (before tx execution) function exposed to /// the wasm VM VP environment. It will try to read from the storage. -pub fn vp_has_key_pre( - env: &VpVmEnv, +pub fn vp_has_key_pre( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1239,34 +1257,31 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; tracing::debug!("vp_has_key_pre {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; - let present = vp_host_fns::has_key_pre( - gas_meter, storage, write_log, &key, sentinel, - )?; + let state = env.state(); + let present = + vp_host_fns::has_key_pre(&mut gas_meter, &state, &key, &mut sentinel)?; Ok(HostEnvResult::from(present).to_i64()) } /// Storage `has_key` in posterior state (after tx execution) function exposed /// to the wasm VM VP environment. It will try to check the write log first and /// if no entry found then the storage. -pub fn vp_has_key_post( - env: &VpVmEnv, +pub fn vp_has_key_post( + env: &VpVmEnv, key_ptr: u64, key_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1274,19 +1289,16 @@ where .memory .read_string(key_ptr, key_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; tracing::debug!("vp_has_key_post {}, key {}", key, key_ptr,); let key = Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; - let present = vp_host_fns::has_key_post( - gas_meter, storage, write_log, &key, sentinel, - )?; + let state = env.state(); + let present = + vp_host_fns::has_key_post(&mut gas_meter, &state, &key, &mut sentinel)?; Ok(HostEnvResult::from(present).to_i64()) } @@ -1294,15 +1306,16 @@ where /// exposed to the wasm VM VP environment. It will try to get an iterator from /// the storage and return the corresponding ID of the iterator, ordered by /// storage keys. -pub fn vp_iter_prefix_pre( - env: &VpVmEnv, +pub fn vp_iter_prefix_pre( + env: &VpVmEnv, prefix_ptr: u64, prefix_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1310,9 +1323,8 @@ where .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; tracing::debug!("vp_iter_prefix_pre {}", prefix); @@ -1320,9 +1332,13 @@ where .map_err(vp_host_fns::RuntimeError::StorageDataError)?; let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let db = unsafe { env.ctx.db.get() }; let iter = vp_host_fns::iter_prefix_pre( - gas_meter, write_log, storage, &prefix, sentinel, + &mut gas_meter, + write_log, + db, + &prefix, + &mut sentinel, )?; let iterators = unsafe { env.ctx.iterators.get() }; @@ -1333,15 +1349,16 @@ where /// exposed to the wasm VM VP environment. It will try to get an iterator from /// the storage and return the corresponding ID of the iterator, ordered by /// storage keys. -pub fn vp_iter_prefix_post( - env: &VpVmEnv, +pub fn vp_iter_prefix_post( + env: &VpVmEnv, prefix_ptr: u64, prefix_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1349,9 +1366,8 @@ where .memory .read_string(prefix_ptr, prefix_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; tracing::debug!("vp_iter_prefix_post {}", prefix); @@ -1359,9 +1375,13 @@ where .map_err(vp_host_fns::RuntimeError::StorageDataError)?; let write_log = unsafe { env.ctx.write_log.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let db = unsafe { env.ctx.db.get() }; let iter = vp_host_fns::iter_prefix_post( - gas_meter, write_log, storage, &prefix, sentinel, + &mut gas_meter, + write_log, + db, + &prefix, + &mut sentinel, )?; let iterators = unsafe { env.ctx.iterators.get() }; @@ -1373,14 +1393,15 @@ where /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). -pub fn vp_iter_next( - env: &VpVmEnv, +pub fn vp_iter_next( + env: &VpVmEnv, iter_id: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1389,10 +1410,9 @@ where let iterators = unsafe { env.ctx.iterators.get() }; let iter_id = PrefixIteratorId::new(iter_id); if let Some(iter) = iterators.get_mut(iter_id) { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); if let Some((key, val)) = - vp_host_fns::iter_next(gas_meter, iter, sentinel)? + vp_host_fns::iter_next(&mut gas_meter, iter, &mut sentinel)? { let key_val = borsh::to_vec(&KeyVal { key, val }) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -1409,22 +1429,23 @@ where } /// Verifier insertion function exposed to the wasm VM Tx environment. -pub fn tx_insert_verifier( - env: &TxVmEnv, +pub fn tx_insert_verifier( + env: &TxVmEnv, addr_ptr: u64, addr_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (addr, gas) = env .memory .read_string(addr_ptr, addr_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; tracing::debug!("tx_insert_verifier {}, addr_ptr {}", addr, addr_ptr,); @@ -1433,15 +1454,15 @@ where let verifiers = unsafe { env.ctx.verifiers.get() }; // This is not a storage write, use the same multiplier used for a storage // read - tx_charge_gas(env, addr_len * MEMORY_ACCESS_GAS_PER_BYTE)?; + tx_charge_gas::(env, addr_len * MEMORY_ACCESS_GAS_PER_BYTE)?; verifiers.insert(addr); Ok(()) } /// Update a validity predicate function exposed to the wasm VM Tx environment -pub fn tx_update_validity_predicate( - env: &TxVmEnv, +pub fn tx_update_validity_predicate( + env: &TxVmEnv, addr_ptr: u64, addr_len: u64, code_hash_ptr: u64, @@ -1451,15 +1472,16 @@ pub fn tx_update_validity_predicate( ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (addr, gas) = env .memory .read_string(addr_ptr, addr_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let addr = Address::decode(addr).map_err(TxRuntimeError::AddressError)?; tracing::debug!("tx_update_validity_predicate for addr {}", addr); @@ -1468,7 +1490,7 @@ where .memory .read_bytes(code_tag_ptr, code_tag_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let code_tag = Option::::try_from_slice(&code_tag) .map_err(TxRuntimeError::EncodingError)?; @@ -1477,20 +1499,21 @@ where .memory .read_bytes(code_hash_ptr, code_hash_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; - tx_validate_vp_code_hash(env, &code_hash, &code_tag)?; + tx_validate_vp_code_hash::(env, &code_hash, &code_tag)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let (gas, _size_diff) = write_log + let mut state = env.state(); + let (gas, _size_diff) = state + .write_log_mut() .write(&key, code_hash) .map_err(TxRuntimeError::StorageModificationError)?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Initialize a new account established address. -pub fn tx_init_account( - env: &TxVmEnv, +pub fn tx_init_account( + env: &TxVmEnv, code_hash_ptr: u64, code_hash_len: u64, code_tag_ptr: u64, @@ -1499,94 +1522,102 @@ pub fn tx_init_account( ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (code_hash, gas) = env .memory .read_bytes(code_hash_ptr, code_hash_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let (code_tag, gas) = env .memory .read_bytes(code_tag_ptr, code_tag_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let code_tag = Option::::try_from_slice(&code_tag) .map_err(TxRuntimeError::EncodingError)?; - tx_validate_vp_code_hash(env, &code_hash, &code_tag)?; + tx_validate_vp_code_hash::(env, &code_hash, &code_tag)?; tracing::debug!("tx_init_account"); - let storage = unsafe { env.ctx.storage.get() }; - let write_log = unsafe { env.ctx.write_log.get() }; let code_hash = Hash::try_from(&code_hash[..]) .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; - let (addr, gas) = write_log.init_account(&storage.address_gen, code_hash); + let mut state = env.state(); + let (write_log, in_mem, _db) = state.split_borrow(); + let gen = &in_mem.address_gen; + let (addr, gas) = write_log.init_account(gen, code_hash); let addr_bytes = addr.serialize_to_vec(); - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let gas = env .memory .write_bytes(result_ptr, addr_bytes) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the chain ID function exposed to the wasm VM Tx environment. -pub fn tx_get_chain_id( - env: &TxVmEnv, +pub fn tx_get_chain_id( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (chain_id, gas) = storage.get_chain_id(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (chain_id, gas) = state.in_mem().get_chain_id(); + tx_charge_gas::(env, gas)?; let gas = env .memory .write_string(result_ptr, chain_id) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the block height function exposed to the wasm VM Tx /// environment. The height is that of the block to which the current /// transaction is being applied. -pub fn tx_get_block_height( - env: &TxVmEnv, +pub fn tx_get_block_height( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (height, gas) = storage.get_block_height(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (height, gas) = state.in_mem().get_block_height(); + tx_charge_gas::(env, gas)?; Ok(height.0) } /// Getting the transaction index function exposed to the wasm VM Tx /// environment. The index is that of the transaction being applied /// in the current block. -pub fn tx_get_tx_index( - env: &TxVmEnv, +pub fn tx_get_tx_index( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - tx_charge_gas(env, TX_INDEX_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE)?; + tx_charge_gas::( + env, + TX_INDEX_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + )?; let tx_index = unsafe { env.ctx.tx_index.get() }; Ok(tx_index.0) } @@ -1594,128 +1625,138 @@ where /// Getting the block height function exposed to the wasm VM VP /// environment. The height is that of the block to which the current /// transaction is being applied. -pub fn vp_get_tx_index( - env: &VpVmEnv, +pub fn vp_get_tx_index( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); let tx_index = unsafe { env.ctx.tx_index.get() }; - let tx_idx = vp_host_fns::get_tx_index(gas_meter, tx_index, sentinel)?; + let tx_idx = + vp_host_fns::get_tx_index(&mut gas_meter, tx_index, &mut sentinel)?; Ok(tx_idx.0) } /// Getting the block hash function exposed to the wasm VM Tx environment. The /// hash is that of the block to which the current transaction is being applied. -pub fn tx_get_block_hash( - env: &TxVmEnv, +pub fn tx_get_block_hash( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (hash, gas) = storage.get_block_hash(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (hash, gas) = state.in_mem().get_block_hash(); + tx_charge_gas::(env, gas)?; let gas = env .memory .write_bytes(result_ptr, hash.0) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the block epoch function exposed to the wasm VM Tx /// environment. The epoch is that of the block to which the current /// transaction is being applied. -pub fn tx_get_block_epoch( - env: &TxVmEnv, +pub fn tx_get_block_epoch( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (epoch, gas) = storage.get_current_epoch(); - tx_charge_gas(env, gas)?; + let state = env.state(); + let (epoch, gas) = state.in_mem().get_current_epoch(); + tx_charge_gas::(env, gas)?; Ok(epoch.0) } /// Get predecessor epochs function exposed to the wasm VM Tx environment. -pub fn tx_get_pred_epochs( - env: &TxVmEnv, +pub fn tx_get_pred_epochs( + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let pred_epochs = storage.block.pred_epochs.clone(); + let state = env.state(); + let pred_epochs = state.in_mem().block.pred_epochs.clone(); let bytes = pred_epochs.serialize_to_vec(); let len: i64 = bytes .len() .try_into() .map_err(TxRuntimeError::NumConversionError)?; - tx_charge_gas(env, MEMORY_ACCESS_GAS_PER_BYTE * len as u64)?; + tx_charge_gas::( + env, + MEMORY_ACCESS_GAS_PER_BYTE * len as u64, + )?; let result_buffer = unsafe { env.ctx.result_buffer.get() }; result_buffer.replace(bytes); Ok(len) } /// Get the native token's address -pub fn tx_get_native_token( - env: &TxVmEnv, +pub fn tx_get_native_token( + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { // Gas for getting the native token address from storage - tx_charge_gas( + tx_charge_gas::( env, ESTABLISHED_ADDRESS_BYTES_LEN as u64 * MEMORY_ACCESS_GAS_PER_BYTE, )?; - let storage = unsafe { env.ctx.storage.get() }; - let native_token = storage.native_token.clone(); + let state = env.state(); + let native_token = state.in_mem().native_token.clone(); let native_token_string = native_token.encode(); let gas = env .memory .write_string(result_ptr, native_token_string) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas) + tx_charge_gas::(env, gas) } /// Getting the block header function exposed to the wasm VM Tx environment. -pub fn tx_get_block_header( - env: &TxVmEnv, +pub fn tx_get_block_header( + env: &TxVmEnv, height: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - let storage = unsafe { env.ctx.storage.get() }; - let (header, gas) = storage - .get_block_header(Some(BlockHeight(height))) - .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + let state = env.state(); + let (header, gas) = + StateRead::get_block_header(&state, Some(BlockHeight(height))) + .map_err(TxRuntimeError::StateError)?; + + tx_charge_gas::(env, gas)?; Ok(match header { Some(h) => { let value = h.serialize_to_vec(); @@ -1732,67 +1773,69 @@ where } /// Getting the chain ID function exposed to the wasm VM VP environment. -pub fn vp_get_chain_id( - env: &VpVmEnv, +pub fn vp_get_chain_id( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let chain_id = vp_host_fns::get_chain_id(gas_meter, storage, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + let state = env.state(); + let chain_id = + vp_host_fns::get_chain_id(&mut gas_meter, &state, &mut sentinel)?; let gas = env .memory .write_string(result_ptr, chain_id) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel) } /// Getting the block height function exposed to the wasm VM VP /// environment. The height is that of the block to which the current /// transaction is being applied. -pub fn vp_get_block_height( - env: &VpVmEnv, +pub fn vp_get_block_height( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let height = vp_host_fns::get_block_height(gas_meter, storage, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + let state = env.state(); + let height = + vp_host_fns::get_block_height(&mut gas_meter, &state, &mut sentinel)?; Ok(height.0) } /// Getting the block header function exposed to the wasm VM VP environment. -pub fn vp_get_block_header( - env: &VpVmEnv, +pub fn vp_get_block_header( + env: &VpVmEnv, height: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let (header, gas) = storage - .get_block_header(Some(BlockHeight(height))) - .map_err(vp_host_fns::RuntimeError::StorageError)?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + let state = env.state(); + let (header, gas) = + StateRead::get_block_header(&state, Some(BlockHeight(height))) + .map_err(vp_host_fns::RuntimeError::StorageError)?; + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; Ok(match header { Some(h) => { let value = h.serialize_to_vec(); @@ -1810,44 +1853,46 @@ where /// Getting the block hash function exposed to the wasm VM VP environment. The /// hash is that of the block to which the current transaction is being applied. -pub fn vp_get_block_hash( - env: &VpVmEnv, +pub fn vp_get_block_hash( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let hash = vp_host_fns::get_block_hash(gas_meter, storage, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + let state = env.state(); + let hash = + vp_host_fns::get_block_hash(&mut gas_meter, &state, &mut sentinel)?; let gas = env .memory .write_bytes(result_ptr, hash.0) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel) } /// Getting the transaction hash function exposed to the wasm VM VP environment. -pub fn vp_get_tx_code_hash( - env: &VpVmEnv, +pub fn vp_get_tx_code_hash( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); let tx = unsafe { env.ctx.tx.get() }; - let hash = vp_host_fns::get_tx_code_hash(gas_meter, tx, sentinel)?; + let hash = + vp_host_fns::get_tx_code_hash(&mut gas_meter, tx, &mut sentinel)?; let mut result_bytes = vec![]; if let Some(hash) = hash { result_bytes.push(1); @@ -1859,45 +1904,46 @@ where .memory .write_bytes(result_ptr, result_bytes) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel) } /// Getting the block epoch function exposed to the wasm VM VP /// environment. The epoch is that of the block to which the current /// transaction is being applied. -pub fn vp_get_block_epoch( - env: &VpVmEnv, +pub fn vp_get_block_epoch( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; - let epoch = vp_host_fns::get_block_epoch(gas_meter, storage, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + let state = env.state(); + let epoch = + vp_host_fns::get_block_epoch(&mut gas_meter, &state, &mut sentinel)?; Ok(epoch.0) } /// Get predecessor epochs function exposed to the wasm VM VP environment. -pub fn vp_get_pred_epochs( - env: &VpVmEnv, +pub fn vp_get_pred_epochs( + env: &VpVmEnv, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + let state = env.state(); let pred_epochs = - vp_host_fns::get_pred_epochs(gas_meter, storage, sentinel)?; + vp_host_fns::get_pred_epochs(&mut gas_meter, &state, &mut sentinel)?; let bytes = pred_epochs.serialize_to_vec(); let len: i64 = bytes .len() @@ -1909,15 +1955,16 @@ where } /// Getting the IBC event function exposed to the wasm VM VP environment. -pub fn vp_get_ibc_events( - env: &VpVmEnv, +pub fn vp_get_ibc_events( + env: &VpVmEnv, event_type_ptr: u64, event_type_len: u64, ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1925,12 +1972,12 @@ where .memory .read_string(event_type_ptr, event_type_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; - let write_log = unsafe { env.ctx.write_log.get() }; - let events = vp_host_fns::get_ibc_events(gas_meter, write_log, event_type)?; + let state = env.state(); + let events = + vp_host_fns::get_ibc_events(&mut gas_meter, &state, event_type)?; let value = events.serialize_to_vec(); let len: i64 = value .len() @@ -1946,8 +1993,8 @@ where /// verifications. When the runtime gas meter is implemented, this function can /// be removed #[allow(clippy::too_many_arguments)] -pub fn vp_verify_tx_section_signature( - env: &VpVmEnv, +pub fn vp_verify_tx_section_signature( + env: &VpVmEnv, hash_list_ptr: u64, hash_list_len: u64, public_keys_map_ptr: u64, @@ -1960,8 +2007,9 @@ pub fn vp_verify_tx_section_signature( ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -1970,9 +2018,8 @@ where .read_bytes(hash_list_ptr, hash_list_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; let hashes = <[Hash; 1]>::try_from_slice(&hash_list) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -1980,7 +2027,7 @@ where .memory .read_bytes(public_keys_map_ptr, public_keys_map_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; let public_keys_map = namada_core::account::AccountPublicKeysMap::try_from_slice( &public_keys_map, @@ -1991,7 +2038,7 @@ where .memory .read_bytes(signer_ptr, signer_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; let signer = Address::try_from_slice(&signer) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -1999,7 +2046,7 @@ where .memory .read_bytes(max_signatures_ptr, max_signatures_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; let max_signatures = Option::::try_from_slice(&max_signatures) .map_err(vp_host_fns::RuntimeError::EncodingError)?; @@ -2031,15 +2078,16 @@ where /// Log a string from exposed to the wasm VM Tx environment. The message will be /// printed at the [`tracing::Level::INFO`]. This function is for development /// only. -pub fn tx_log_string( - env: &TxVmEnv, +pub fn tx_log_string( + env: &TxVmEnv, str_ptr: u64, str_len: u64, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (str, _gas) = env @@ -2053,28 +2101,28 @@ where /// Execute IBC tx. // Temporarily the IBC tx execution is implemented via a host function to // workaround wasm issue. -pub fn tx_ibc_execute( - env: &TxVmEnv, +pub fn tx_ibc_execute( + env: &TxVmEnv, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { - use std::cell::RefCell; use std::rc::Rc; use namada_ibc::{IbcActions, TransferModule}; let tx_data = unsafe { env.ctx.tx.get().data() }.ok_or_else(|| { let sentinel = unsafe { env.ctx.sentinel.get() }; - sentinel.set_invalid_commitment(); + sentinel.borrow_mut().set_invalid_commitment(); TxRuntimeError::MissingTxData })?; - let ctx = Rc::new(RefCell::new(env.ctx.clone())); - let mut actions = IbcActions::new(ctx.clone()); - let module = TransferModule::new(ctx); + let state = Rc::new(RefCell::new(env.state())); + let mut actions = IbcActions::new(state.clone()); + let module = TransferModule::new(state); actions.add_transfer_module(module.module_id(), module); actions.execute(&tx_data)?; @@ -2082,28 +2130,29 @@ where } /// Validate a VP WASM code hash in a tx environment. -fn tx_validate_vp_code_hash( - env: &TxVmEnv, +fn tx_validate_vp_code_hash( + env: &TxVmEnv, code_hash: &[u8], code_tag: &Option, ) -> TxResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let code_hash = Hash::try_from(code_hash) .map_err(|e| TxRuntimeError::InvalidVpCodeHash(e.to_string()))?; + let state = env.state(); // First check that code hash corresponds to the code tag if it is present if let Some(tag) = code_tag { - let storage = unsafe { env.ctx.storage.get() }; let hash_key = Key::wasm_hash(tag); - let (result, gas) = storage - .read(&hash_key) + let (result, gas) = state + .db_read(&hash_key) .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; if let Some(tag_hash) = result { let tag_hash = Hash::try_from(&tag_hash[..]).map_err(|e| { TxRuntimeError::InvalidVpCodeHash(e.to_string()) @@ -2123,7 +2172,7 @@ where } // Then check that VP code hash is in the allowlist. - if !crate::parameters::is_vp_allowed(&env.ctx, &code_hash) + if !crate::parameters::is_vp_allowed(&env.ctx.state(), &code_hash) .map_err(TxRuntimeError::StorageError)? { return Err(TxRuntimeError::DisallowedVp); @@ -2131,15 +2180,13 @@ where // Then check that the corresponding VP code does indeed exist let code_key = Key::wasm_code(&code_hash); - let write_log = unsafe { env.ctx.write_log.get() }; - let (result, gas) = write_log.read(&code_key); - tx_charge_gas(env, gas)?; + let (result, gas) = state.write_log().read(&code_key); + tx_charge_gas::(env, gas)?; if result.is_none() { - let storage = unsafe { env.ctx.storage.get() }; - let (is_present, gas) = storage - .has_key(&code_key) + let (is_present, gas) = state + .db_has_key(&code_key) .map_err(TxRuntimeError::StateError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; if !is_present { return Err(TxRuntimeError::InvalidVpCodeHash( "The corresponding VP code doesn't exist".to_string(), @@ -2150,21 +2197,21 @@ where } /// Set the sentinel for an invalid tx section commitment -pub fn tx_set_commitment_sentinel(env: &TxVmEnv) +pub fn tx_set_commitment_sentinel(env: &TxVmEnv) where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, CA: WasmCacheAccess, { let sentinel = unsafe { env.ctx.sentinel.get() }; - sentinel.set_invalid_commitment(); + sentinel.borrow_mut().set_invalid_commitment(); } /// Verify a transaction signature #[allow(clippy::too_many_arguments)] -pub fn tx_verify_tx_section_signature( - env: &TxVmEnv, +pub fn tx_verify_tx_section_signature( + env: &TxVmEnv, hash_list_ptr: u64, hash_list_len: u64, public_keys_map_ptr: u64, @@ -2175,8 +2222,9 @@ pub fn tx_verify_tx_section_signature( ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let (hash_list, gas) = env @@ -2184,9 +2232,8 @@ where .read_bytes(hash_list_ptr, hash_list_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - tx_charge_gas(env, gas)?; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + tx_charge_gas::(env, gas)?; let hashes = <[Hash; 1]>::try_from_slice(&hash_list) .map_err(TxRuntimeError::EncodingError)?; @@ -2194,20 +2241,20 @@ where .memory .read_bytes(public_keys_map_ptr, public_keys_map_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let public_keys_map = namada_core::account::AccountPublicKeysMap::try_from_slice( &public_keys_map, ) .map_err(TxRuntimeError::EncodingError)?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let (max_signatures, gas) = env .memory .read_bytes(max_signatures_ptr, max_signatures_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let max_signatures = Option::::try_from_slice(&max_signatures) .map_err(TxRuntimeError::EncodingError)?; @@ -2236,15 +2283,16 @@ where } /// Appends the new note commitments to the tree in storage -pub fn tx_update_masp_note_commitment_tree( - env: &TxVmEnv, +pub fn tx_update_masp_note_commitment_tree( + env: &TxVmEnv, transaction_ptr: u64, transaction_len: u64, ) -> TxResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + CA: WasmCacheAccess, { let _sentinel = unsafe { env.ctx.sentinel.get() }; @@ -2254,13 +2302,12 @@ where .read_bytes(transaction_ptr, transaction_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; - tx_charge_gas(env, gas)?; + tx_charge_gas::(env, gas)?; let transaction = Transaction::try_from_slice(&serialized_transaction) .map_err(TxRuntimeError::EncodingError)?; - let mut ctx = env.ctx.clone(); match crate::token::utils::update_note_commitment_tree( - &mut ctx, + &mut env.state(), &transaction, ) { Ok(()) => Ok(HostEnvResult::Success.to_i64()), @@ -2274,8 +2321,8 @@ where } /// Evaluate a validity predicate with the given input data. -pub fn vp_eval( - env: &VpVmEnv<'static, MEM, DB, H, EVAL, CA>, +pub fn vp_eval( + env: &VpVmEnv<'static, MEM, D, H, EVAL, CA>, vp_code_hash_ptr: u64, vp_code_hash_len: u64, input_data_ptr: u64, @@ -2283,76 +2330,80 @@ pub fn vp_eval( ) -> vp_host_fns::EnvResult where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - EVAL: VpEvaluator, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { let (vp_code_hash, gas) = env .memory .read_bytes(vp_code_hash_ptr, vp_code_hash_len as _) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; - let (input_data, gas) = env - .memory - .read_bytes(input_data_ptr, input_data_len as _) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel)?; - let input_data: Tx = BorshDeserialize::try_from_slice(&input_data) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + // The borrowed `gas_meter` and `sentinel` must be dropped before eval, + // which has to borrow these too. + let tx = { + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; + + let (input_data, gas) = env + .memory + .read_bytes(input_data_ptr, input_data_len as _) + .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel)?; + let tx: Tx = BorshDeserialize::try_from_slice(&input_data) + .map_err(vp_host_fns::RuntimeError::EncodingError)?; + tx + }; + + let eval_runner = unsafe { env.ctx.eval_runner.get() }; let vp_code_hash = Hash(vp_code_hash.try_into().map_err(|e| { vp_host_fns::RuntimeError::EncodingError(std::io::Error::new( std::io::ErrorKind::InvalidData, format!("Not a valid hash: {:?}", e), )) })?); - - let eval_runner = unsafe { env.ctx.eval_runner.get() }; - Ok(eval_runner - .eval(env.ctx.clone(), vp_code_hash, input_data) - .to_i64()) + Ok(eval_runner.eval(env.ctx.clone(), vp_code_hash, tx).to_i64()) } /// Get the native token's address -pub fn vp_get_native_token( - env: &VpVmEnv, +pub fn vp_get_native_token( + env: &VpVmEnv, result_ptr: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { - let gas_meter = unsafe { env.ctx.gas_meter.get() }; - let sentinel = unsafe { env.ctx.sentinel.get() }; - let storage = unsafe { env.ctx.storage.get() }; + let (mut gas_meter, mut sentinel) = env.ctx.borrow_mut_gas_meter(); + let state = env.state(); let native_token = - vp_host_fns::get_native_token(gas_meter, storage, sentinel)?; + vp_host_fns::get_native_token(&mut gas_meter, &state, &mut sentinel)?; let native_token_string = native_token.encode(); let gas = env .memory .write_string(result_ptr, native_token_string) .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; - vp_host_fns::add_gas(gas_meter, gas, sentinel) + vp_host_fns::add_gas(&mut gas_meter, gas, &mut sentinel) } /// Log a string from exposed to the wasm VM VP environment. The message will be /// printed at the [`tracing::Level::INFO`]. This function is for development /// only. -pub fn vp_log_string( - env: &VpVmEnv, +pub fn vp_log_string( + env: &VpVmEnv, str_ptr: u64, str_len: u64, ) -> vp_host_fns::EnvResult<()> where MEM: VmMemory, - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + EVAL: VpEvaluator, CA: WasmCacheAccess, { @@ -2364,407 +2415,45 @@ where Ok(()) } -// Temp. workaround for -use namada_state::StorageRead; - -use crate::storage::BlockHash; -impl<'a, DB, H, CA> StorageRead for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - // type PrefixIter<'iter> = KeyValIterator<(String, Vec)>; - type PrefixIter<'iter> = u64 where Self: 'iter; - - fn read_bytes( - &self, - key: &Key, - ) -> std::result::Result>, StorageError> { - let write_log = unsafe { self.write_log.get() }; - let (log_val, gas) = write_log.read(key); - ibc_tx_charge_gas(self, gas)?; - Ok(match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - Some(value.clone()) - } - Some(&write_log::StorageModification::Delete) => None, - Some(write_log::StorageModification::InitAccount { - ref vp_code_hash, - }) => Some(vp_code_hash.to_vec()), - Some(write_log::StorageModification::Temp { ref value }) => { - Some(value.clone()) - } - None => { - // when not found in write log, try to read from the storage - let storage = unsafe { self.storage.get() }; - let (value, gas) = storage.read(key).into_storage_result()?; - ibc_tx_charge_gas(self, gas)?; - value - } - }) - } - - fn has_key(&self, key: &Key) -> Result { - // try to read from the write log first - let write_log = unsafe { self.write_log.get() }; - let (log_val, gas) = write_log.read(key); - ibc_tx_charge_gas(self, gas)?; - Ok(match log_val { - Some(&write_log::StorageModification::Write { .. }) => true, - Some(&write_log::StorageModification::Delete) => false, - Some(&write_log::StorageModification::InitAccount { .. }) => true, - Some(&write_log::StorageModification::Temp { .. }) => true, - None => { - // when not found in write log, try to check the storage - let storage = unsafe { self.storage.get() }; - let (present, gas) = - storage.has_key(key).into_storage_result()?; - ibc_tx_charge_gas(self, gas)?; - present - } - }) - } - - fn iter_prefix<'iter>( - &'iter self, - prefix: &Key, - ) -> Result, StorageError> { - let write_log = unsafe { self.write_log.get() }; - let storage = unsafe { self.storage.get() }; - let (iter, gas) = - namada_state::iter_prefix_post(write_log, storage, prefix); - ibc_tx_charge_gas(self, gas)?; - - let iterators = unsafe { self.iterators.get() }; - Ok(iterators.insert(iter).id()) - } - - fn iter_next<'iter>( - &'iter self, - iter_id: &mut Self::PrefixIter<'iter>, - ) -> Result)>, StorageError> { - let write_log = unsafe { self.write_log.get() }; - let iterators = unsafe { self.iterators.get() }; - let iter_id = PrefixIteratorId::new(*iter_id); - while let Some((key, val, iter_gas)) = iterators.next(iter_id) { - let (log_val, log_gas) = - write_log.read(&Key::parse(key.clone()).into_storage_result()?); - ibc_tx_charge_gas(self, iter_gas + log_gas)?; - match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - return Ok(Some((key, value.clone()))); - } - Some(&write_log::StorageModification::Delete) => { - // check the next because the key has already deleted - continue; - } - Some(&write_log::StorageModification::InitAccount { - .. - }) => { - // a VP of a new account doesn't need to be iterated - continue; - } - Some(write_log::StorageModification::Temp { ref value }) => { - return Ok(Some((key, value.clone()))); - } - None => { - return Ok(Some((key, val))); - } - } - } - Ok(None) - } - - fn get_chain_id(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (chain_id, gas) = storage.get_chain_id(); - ibc_tx_charge_gas(self, gas)?; - Ok(chain_id) - } - - fn get_block_height(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (height, gas) = storage.get_block_height(); - ibc_tx_charge_gas(self, gas)?; - Ok(height) - } - - fn get_block_header( - &self, - height: BlockHeight, - ) -> Result, StorageError> { - let storage = unsafe { self.storage.get() }; - let (header, gas) = storage - .get_block_header(Some(height)) - .into_storage_result()?; - ibc_tx_charge_gas(self, gas)?; - Ok(header) - } - - fn get_block_hash(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (hash, gas) = storage.get_block_hash(); - ibc_tx_charge_gas(self, gas)?; - Ok(hash) - } - - fn get_block_epoch(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let (epoch, gas) = storage.get_current_epoch(); - ibc_tx_charge_gas(self, gas)?; - Ok(epoch) - } - - fn get_tx_index(&self) -> Result { - let tx_index = unsafe { self.tx_index.get() }; - ibc_tx_charge_gas( - self, - crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, - )?; - Ok(TxIndex(tx_index.0)) - } - - fn get_native_token(&self) -> Result { - let storage = unsafe { self.storage.get() }; - let native_token = storage.native_token.clone(); - ibc_tx_charge_gas( - self, - crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, - )?; - Ok(native_token) - } - - fn get_pred_epochs(&self) -> namada_state::StorageResult { - let storage = unsafe { self.storage.get() }; - ibc_tx_charge_gas( - self, - crate::vm::host_env::gas::STORAGE_ACCESS_GAS_PER_BYTE, - )?; - Ok(storage.block.pred_epochs.clone()) - } -} - -// Temp. workaround for -use namada_state::StorageWrite; -impl<'a, DB, H, CA> StorageWrite for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - fn write_bytes( - &mut self, - key: &Key, - data: impl AsRef<[u8]>, - ) -> Result<(), StorageError> { - let write_log = unsafe { self.write_log.get() }; - let (gas, _size_diff) = write_log - .write(key, data.as_ref().to_vec()) - .into_storage_result()?; - ibc_tx_charge_gas(self, gas) - } - - fn delete(&mut self, key: &Key) -> Result<(), StorageError> { - if key.is_validity_predicate().is_some() { - return Err(TxRuntimeError::CannotDeleteVp).into_storage_result(); - } - - let write_log = unsafe { self.write_log.get() }; - let (gas, _size_diff) = write_log.delete(key).into_storage_result()?; - ibc_tx_charge_gas(self, gas) - } -} - -// Temp. workaround for -impl<'a, DB, H, CA> namada_ibc::IbcStorageContext for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { - let write_log = unsafe { self.write_log.get() }; - let gas = write_log.emit_ibc_event(event); - ibc_tx_charge_gas(self, gas) - } - - fn get_ibc_events( - &self, - event_type: impl AsRef, - ) -> Result, StorageError> { - let write_log = unsafe { self.write_log.get() }; - Ok(write_log - .get_ibc_events() - .iter() - .filter(|event| event.event_type == event_type.as_ref()) - .cloned() - .collect()) - } - - fn transfer_token( - &mut self, - src: &Address, - dest: &Address, - token: &Address, - amount: crate::token::DenominatedAmount, - ) -> Result<(), StorageError> { - use crate::token; - - let amount = token::denom_to_amount(amount, token, self)?; - if amount != token::Amount::default() && src != dest { - let src_key = balance_key(token, src); - let dest_key = balance_key(token, dest); - let src_bal = self.read::(&src_key)?; - let mut src_bal = src_bal.ok_or_else(|| { - StorageError::new_const("the source has no balance") - })?; - src_bal.spend(&amount).into_storage_result()?; - let mut dest_bal = - self.read::(&dest_key)?.unwrap_or_default(); - dest_bal.receive(&amount).into_storage_result()?; - self.write(&src_key, src_bal)?; - self.write(&dest_key, dest_bal)?; - } - Ok(()) - } - - fn handle_masp_tx( - &mut self, - shielded: &masp_primitives::transaction::Transaction, - pin_key: Option<&str>, - ) -> Result<(), StorageError> { - crate::token::utils::handle_masp_tx(self, shielded, pin_key)?; - crate::token::utils::update_note_commitment_tree(self, shielded) - } - - fn mint_token( - &mut self, - target: &Address, - token: &Address, - amount: crate::token::DenominatedAmount, - ) -> Result<(), StorageError> { - use crate::token; - - let amount = token::denom_to_amount(amount, token, self)?; - let target_key = balance_key(token, target); - let mut target_bal = - self.read::(&target_key)?.unwrap_or_default(); - target_bal.receive(&amount).into_storage_result()?; - - let minted_key = minted_balance_key(token); - let mut minted_bal = - self.read::(&minted_key)?.unwrap_or_default(); - minted_bal.receive(&amount).into_storage_result()?; - - self.write(&target_key, target_bal)?; - self.write(&minted_key, minted_bal)?; - - let minter_key = minter_key(token); - self.write( - &minter_key, - Address::Internal(address::InternalAddress::Ibc), - ) - } - - fn burn_token( - &mut self, - target: &Address, - token: &Address, - amount: crate::token::DenominatedAmount, - ) -> Result<(), StorageError> { - use crate::token; - - let amount = token::denom_to_amount(amount, token, self)?; - let target_key = balance_key(token, target); - let mut target_bal = - self.read::(&target_key)?.unwrap_or_default(); - target_bal.spend(&amount).into_storage_result()?; - - // burn the minted amount - let minted_key = minted_balance_key(token); - let mut minted_bal = - self.read::(&minted_key)?.unwrap_or_default(); - minted_bal.spend(&amount).into_storage_result()?; - - self.write(&target_key, target_bal)?; - self.write(&minted_key, minted_bal) - } - - fn log_string(&self, message: String) { - tracing::info!("IBC host env log: {}", message); - } -} - -/// Add a gas cost incured in a transaction -// Temp helper. -fn ibc_tx_charge_gas<'a, DB, H, CA>( - ctx: &TxCtx<'a, DB, H, CA>, - used_gas: u64, -) -> Result<(), StorageError> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ - let gas_meter = unsafe { ctx.gas_meter.get() }; - // if we run out of gas, we need to stop the execution - let result = gas_meter.consume(used_gas).into_storage_result(); - if let Err(err) = &result { - let sentinel = unsafe { ctx.sentinel.get() }; - sentinel.set_out_of_gas(); - tracing::info!( - "Stopping transaction execution because of gas error: {}", - err - ); - } - result -} - -// Temp. workaround for -impl<'a, DB, H, CA> namada_ibc::IbcCommonContext for TxCtx<'a, DB, H, CA> -where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, - CA: WasmCacheAccess, -{ -} - /// A helper module for testing #[cfg(feature = "testing")] pub mod testing { use std::collections::BTreeSet; - use namada_state::StorageHasher; - use super::*; use crate::vm::memory::testing::NativeMemory; use crate::vm::wasm::memory::WasmMemory; /// Setup a transaction environment #[allow(clippy::too_many_arguments)] - pub fn tx_env( - storage: &State, - write_log: &mut WriteLog, - iterators: &mut PrefixIterators<'static, DB>, + pub fn tx_env( + state: &mut S, + iterators: &mut PrefixIterators<'static, ::D>, verifiers: &mut BTreeSet
, - gas_meter: &mut TxGasMeter, - sentinel: &mut TxSentinel, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, result_buffer: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, - ) -> TxVmEnv<'static, NativeMemory, DB, H, CA> + ) -> TxVmEnv< + 'static, + NativeMemory, + ::D, + ::H, + CA, + > where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: State, CA: WasmCacheAccess, { + let (write_log, in_mem, db) = state.split_borrow(); TxVmEnv::new( NativeMemory, - storage, write_log, + in_mem, + db, iterators, gas_meter, sentinel, @@ -2781,22 +2470,26 @@ pub mod testing { /// Setup a transaction environment #[allow(clippy::too_many_arguments)] - pub fn tx_env_with_wasm_memory( - storage: &State, - write_log: &mut WriteLog, - iterators: &mut PrefixIterators<'static, DB>, + pub fn tx_env_with_wasm_memory( + state: &mut S, + iterators: &mut PrefixIterators<'static, ::D>, verifiers: &mut BTreeSet
, - gas_meter: &mut TxGasMeter, - sentinel: &mut TxSentinel, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, result_buffer: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, - ) -> TxVmEnv<'static, WasmMemory, DB, H, CA> + ) -> TxVmEnv< + 'static, + WasmMemory, + ::D, + ::H, + CA, + > where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: State, CA: WasmCacheAccess, { let store = crate::vm::wasm::compilation_cache::common::store(); @@ -2805,10 +2498,12 @@ pub mod testing { let mut wasm_memory = WasmMemory::default(); wasm_memory.inner.initialize(initial_memory); + let (write_log, in_mem, db) = state.split_borrow(); TxVmEnv::new( wasm_memory, - storage, write_log, + in_mem, + db, iterators, gas_meter, sentinel, @@ -2825,13 +2520,12 @@ pub mod testing { /// Setup a validity predicate environment #[allow(clippy::too_many_arguments)] - pub fn vp_env( + pub fn vp_env( address: &Address, - storage: &State, - write_log: &WriteLog, - iterators: &mut PrefixIterators<'static, DB>, - gas_meter: &mut VpGasMeter, - sentinel: &mut VpSentinel, + state: &S, + iterators: &mut PrefixIterators<'static, ::D>, + gas_meter: &RefCell, + sentinel: &RefCell, tx: &Tx, tx_index: &TxIndex, verifiers: &BTreeSet
, @@ -2839,18 +2533,25 @@ pub mod testing { keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, - ) -> VpVmEnv<'static, NativeMemory, DB, H, EVAL, CA> + ) -> VpVmEnv< + 'static, + NativeMemory, + ::D, + ::H, + EVAL, + CA, + > where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: StorageHasher, + S: StateRead, EVAL: VpEvaluator, CA: WasmCacheAccess, { VpVmEnv::new( NativeMemory, address, - storage, - write_log, + state.write_log(), + state.in_mem(), + state.db(), gas_meter, sentinel, tx, diff --git a/crates/namada/src/vm/wasm/host_env.rs b/crates/namada/src/vm/wasm/host_env.rs index f6d45e23cb..58655037f6 100644 --- a/crates/namada/src/vm/wasm/host_env.rs +++ b/crates/namada/src/vm/wasm/host_env.rs @@ -3,7 +3,7 @@ //! Here, we expose the host functions into wasm's //! imports, so they can be called from inside the wasm. -use namada_core::hash::StorageHasher; +use namada_state::{DBIter, StorageHasher, DB}; use wasmer::{ Function, HostEnvInitError, ImportObject, Instance, Memory, Store, WasmerEnv, @@ -13,9 +13,9 @@ use crate::vm::host_env::{TxVmEnv, VpEvaluator, VpVmEnv}; use crate::vm::wasm::memory::WasmMemory; use crate::vm::{host_env, WasmCacheAccess}; -impl WasmerEnv for TxVmEnv<'_, WasmMemory, DB, H, CA> +impl WasmerEnv for TxVmEnv<'_, WasmMemory, D, H, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -27,9 +27,9 @@ where } } -impl WasmerEnv for VpVmEnv<'_, WasmMemory, DB, H, EVAL, CA> +impl WasmerEnv for VpVmEnv<'_, WasmMemory, D, H, EVAL, CA> where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, @@ -45,13 +45,13 @@ where /// Prepare imports (memory and host functions) exposed to the vm guest running /// transaction code #[allow(clippy::too_many_arguments)] -pub fn tx_imports( +pub fn tx_imports( wasm_store: &Store, initial_memory: Memory, - env: TxVmEnv<'static, WasmMemory, DB, H, CA>, + env: TxVmEnv<'static, WasmMemory, D, H, CA>, ) -> ImportObject where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { @@ -88,22 +88,22 @@ where "namada_tx_ibc_execute" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_ibc_execute), "namada_tx_set_commitment_sentinel" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_set_commitment_sentinel), "namada_tx_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_verify_tx_section_signature), - "namada_tx_update_masp_note_commitment_tree" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_masp_note_commitment_tree) + "namada_tx_update_masp_note_commitment_tree" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_masp_note_commitment_tree), }, } } /// Prepare imports (memory and host functions) exposed to the vm guest running /// validity predicate code -pub fn vp_imports( +pub fn vp_imports( wasm_store: &Store, initial_memory: Memory, - env: VpVmEnv<'static, WasmMemory, DB, H, EVAL, CA>, + env: VpVmEnv<'static, WasmMemory, D, H, EVAL, CA>, ) -> ImportObject where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, - EVAL: VpEvaluator, + EVAL: VpEvaluator, CA: WasmCacheAccess, { wasmer::imports! { diff --git a/crates/namada/src/vm/wasm/run.rs b/crates/namada/src/vm/wasm/run.rs index 5c3a3c392d..756e39a036 100644 --- a/crates/namada/src/vm/wasm/run.rs +++ b/crates/namada/src/vm/wasm/run.rs @@ -1,13 +1,15 @@ //! Wasm runners +use std::cell::RefCell; use std::collections::BTreeSet; +use std::fmt::Debug; use std::marker::PhantomData; use borsh::BorshDeserialize; use namada_core::validity_predicate::VpSentinel; use namada_gas::{GasMetering, TxGasMeter, WASM_MEMORY_PAGE_GAS}; use namada_state::write_log::StorageModification; -use namada_state::{State, StorageHasher}; +use namada_state::{DBIter, State, StateRead, StorageHasher, DB}; use namada_tx::data::TxSentinel; use namada_tx::{Commitment, Section, Tx}; use parity_wasm::elements; @@ -20,7 +22,6 @@ use crate::address::Address; use crate::hash::{Error as TxHashError, Hash}; use crate::internal::HostEnvResult; use crate::ledger::gas::VpGasMeter; -use crate::state::write_log::WriteLog; use crate::storage::{Key, TxIndex}; use crate::vm::host_env::{TxVmEnv, VpCtx, VpEvaluator, VpVmEnv}; use crate::vm::prefix_iter::PrefixIterators; @@ -91,18 +92,16 @@ pub type Result = std::result::Result; /// Execute a transaction code. Returns the set verifiers addresses requested by /// the transaction. #[allow(clippy::too_many_arguments)] -pub fn tx( - storage: &State, - write_log: &mut WriteLog, - gas_meter: &mut TxGasMeter, +pub fn tx( + state: &mut S, + gas_meter: &RefCell, tx_index: &TxIndex, tx: &Tx, vp_wasm_cache: &mut VpCache, tx_wasm_cache: &mut TxCache, ) -> Result> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead + State, CA: 'static + WasmCacheAccess, { let tx_code = tx @@ -115,16 +114,12 @@ where if let Some(tag) = &tx_code.tag { // Get the WASM code hash corresponding to the tag from storage let hash_key = Key::wasm_hash(tag); - let hash_value = match storage - .read(&hash_key) - .map_err(|e| { - Error::LoadWasmCode(format!( - "Read wasm code hash failed from storage: key {}, error {}", - hash_key, e - )) - })? - .0 - { + let hash_value = match state.read_bytes(&hash_key).map_err(|e| { + Error::LoadWasmCode(format!( + "Read wasm code hash failed from storage: key {}, error {}", + hash_key, e + )) + })? { Some(v) => Hash::try_from_slice(&v) .map_err(|e| Error::ConversionError(e.to_string()))?, None => { @@ -145,26 +140,24 @@ where } } - let (module, store) = fetch_or_compile( - tx_wasm_cache, - &tx_code.code, - write_log, - storage, - gas_meter, - )?; + let (module, store) = + fetch_or_compile(tx_wasm_cache, &tx_code.code, state, gas_meter)?; - let mut iterators: PrefixIterators<'_, DB> = PrefixIterators::default(); + let mut iterators: PrefixIterators<'_, ::D> = + PrefixIterators::default(); let mut verifiers = BTreeSet::new(); let mut result_buffer: Option> = None; - let mut sentinel = TxSentinel::default(); + let sentinel = RefCell::new(TxSentinel::default()); + let (write_log, in_mem, db) = state.split_borrow(); let env = TxVmEnv::new( WasmMemory::default(), - storage, write_log, + in_mem, + db, &mut iterators, gas_meter, - &mut sentinel, + &sentinel, tx, tx_index, &mut verifiers, @@ -203,7 +196,7 @@ where })?; apply_tx.call(tx_data_ptr, tx_data_len).map_err(|err| { tracing::debug!("Tx WASM failed with {}", err); - match sentinel { + match *sentinel.borrow() { TxSentinel::None => Error::RuntimeError(err), TxSentinel::OutOfGas => Error::GasError(err.to_string()), TxSentinel::InvalidCommitment => { @@ -219,48 +212,47 @@ where /// predicate accepted storage modifications performed by the transaction /// that triggered the execution. #[allow(clippy::too_many_arguments)] -pub fn vp( +pub fn vp( vp_code_hash: Hash, tx: &Tx, tx_index: &TxIndex, address: &Address, - storage: &State, - write_log: &WriteLog, - gas_meter: &mut VpGasMeter, + state: &S, + gas_meter: &RefCell, keys_changed: &BTreeSet, verifiers: &BTreeSet
, mut vp_wasm_cache: VpCache, ) -> Result where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CA: 'static + WasmCacheAccess, { // Compile the wasm module let (module, store) = fetch_or_compile( &mut vp_wasm_cache, &Commitment::Hash(vp_code_hash), - write_log, - storage, + state, gas_meter, )?; - let mut iterators: PrefixIterators<'_, DB> = PrefixIterators::default(); + let mut iterators: PrefixIterators<'_, ::D> = + PrefixIterators::default(); let mut result_buffer: Option> = None; - let eval_runner = VpEvalWasm { - db: PhantomData, - hasher: PhantomData, - cache_access: PhantomData, - }; - - let mut sentinel = VpSentinel::default(); + let eval_runner = + VpEvalWasm::<::D, ::H, CA> { + db: PhantomData, + hasher: PhantomData, + cache_access: PhantomData, + }; + let sentinel = RefCell::new(VpSentinel::default()); let env = VpVmEnv::new( WasmMemory::default(), address, - storage, - write_log, + state.write_log(), + state.in_mem(), + state.db(), gas_meter, - &mut sentinel, + &sentinel, tx, tx_index, &mut iterators, @@ -283,10 +275,9 @@ where address, keys_changed, verifiers, - gas_meter, ) { Ok(accept) => { - if sentinel.is_invalid_signature() { + if sentinel.borrow().is_invalid_signature() { if accept { // This is unexpected, if the signature is invalid the vp // should have rejected the tx. Something must be wrong with @@ -305,7 +296,7 @@ where } } Err(err) => { - if sentinel.is_out_of_gas() { + if sentinel.borrow().is_out_of_gas() { Err(Error::GasError(err.to_string())) } else { Err(err) @@ -323,7 +314,6 @@ fn run_vp( address: &Address, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - _gas_meter: &mut VpGasMeter, ) -> Result { let input: VpInput = VpInput { addr: address, @@ -381,34 +371,34 @@ fn run_vp( /// Validity predicate wasm evaluator for `eval` host function calls. #[derive(Default, Debug)] -pub struct VpEvalWasm +pub struct VpEvalWasm where - DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, + D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { /// Phantom type for DB - pub db: PhantomData<*const DB>, - /// Phantom type for DB Hasher + pub db: PhantomData<*const D>, + /// Phantom type for hasher pub hasher: PhantomData<*const H>, /// Phantom type for WASM compilation cache access pub cache_access: PhantomData<*const CA>, } -impl VpEvaluator for VpEvalWasm +impl VpEvaluator for VpEvalWasm where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, CA: WasmCacheAccess, { type CA = CA; - type Db = DB; + type Db = D; type Eval = Self; type H = H; fn eval( &self, - ctx: VpCtx<'static, DB, H, Self, CA>, + ctx: VpCtx<'static, D, H, Self, CA>, vp_code_hash: Hash, input_data: Tx, ) -> HostEnvResult { @@ -422,16 +412,16 @@ where } } -impl VpEvalWasm +impl VpEvalWasm where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, CA: WasmCacheAccess, { /// Evaluate the given VP. pub fn eval_native_result( &self, - ctx: VpCtx<'static, DB, H, Self, CA>, + ctx: VpCtx<'static, D, H, Self, CA>, vp_code_hash: Hash, input_data: Tx, ) -> Result { @@ -439,26 +429,23 @@ where let keys_changed = unsafe { ctx.keys_changed.get() }; let verifiers = unsafe { ctx.verifiers.get() }; let vp_wasm_cache = unsafe { ctx.vp_wasm_cache.get() }; - let write_log = unsafe { ctx.write_log.get() }; - let storage = unsafe { ctx.storage.get() }; let gas_meter = unsafe { ctx.gas_meter.get() }; - let env = VpVmEnv { - memory: WasmMemory::default(), - ctx, - }; // Compile the wasm module let (module, store) = fetch_or_compile( vp_wasm_cache, &Commitment::Hash(vp_code_hash), - write_log, - storage, + &ctx.state(), gas_meter, )?; let initial_memory = memory::prepare_vp_memory(&store).map_err(Error::MemoryError)?; + let env = VpVmEnv { + memory: WasmMemory::default(), + ctx, + }; let imports = vp_imports(&store, initial_memory, env); run_vp( @@ -469,7 +456,6 @@ where address, keys_changed, verifiers, - gas_meter, ) } } @@ -504,16 +490,14 @@ pub fn prepare_wasm_code>(code: T) -> Result> { // Fetch or compile a WASM code from the cache or storage. Account for the // loading and code compilation gas costs. -fn fetch_or_compile( +fn fetch_or_compile( wasm_cache: &mut Cache, code_or_hash: &Commitment, - write_log: &WriteLog, - storage: &State, - gas_meter: &mut dyn GasMetering, + state: &S, + gas_meter: &RefCell, ) -> Result<(Module, Store)> where - DB: 'static + namada_state::DB + for<'iter> namada_state::DBIter<'iter>, - H: 'static + StorageHasher, + S: StateRead, CN: 'static + CacheName, CA: 'static + WasmCacheAccess, { @@ -523,14 +507,14 @@ where Some((module, store)) => { // Gas accounting even if the compiled module is in cache let key = Key::wasm_code_len(code_hash); - let tx_len = match write_log.read(&key).0 { + let tx_len = match state.write_log().read(&key).0 { Some(StorageModification::Write { value }) => { u64::try_from_slice(value).map_err(|e| { Error::ConversionError(e.to_string()) }) } - _ => match storage - .read(&key) + _ => match state + .db_read(&key) .map_err(|e| { Error::LoadWasmCode(format!( "Read wasm code length failed from \ @@ -554,12 +538,12 @@ where } None => { let key = Key::wasm_code(code_hash); - let code = match write_log.read(&key).0 { + let code = match state.write_log().read(&key).0 { Some(StorageModification::Write { value }) => { value.clone() } - _ => match storage - .read(&key) + _ => match state + .db_read(&key) .map_err(|e| { Error::LoadWasmCode(format!( "Read wasm code failed from storage: key \ @@ -589,9 +573,11 @@ where }; gas_meter + .borrow_mut() .add_wasm_load_from_storage_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; gas_meter + .borrow_mut() .add_compiling_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; Ok((module, store)) @@ -599,11 +585,13 @@ where Commitment::Id(code) => { let tx_len = code.len() as u64; gas_meter + .borrow_mut() .add_wasm_validation_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; validate_untrusted_wasm(code).map_err(Error::ValidationError)?; gas_meter + .borrow_mut() .add_compiling_gas(tx_len) .map_err(|e| Error::GasError(e.to_string()))?; match wasm_cache.compile_or_fetch(code)? { @@ -634,6 +622,7 @@ mod tests { use borsh_ext::BorshSerializeExt; use itertools::Either; + use namada_state::StorageWrite; use namada_test_utils::TestWasms; use namada_tx::data::TxType; use namada_tx::{Code, Data}; @@ -642,7 +631,7 @@ mod tests { use super::*; use crate::hash::Hash; - use crate::state::testing::TestStorage; + use crate::state::testing::TestState; use crate::tx::data::eval_vp::EvalVp; use crate::vm::host_env::TxRuntimeError; use crate::vm::wasm; @@ -758,9 +747,9 @@ mod tests { /// wasm execution, the execution is aborted. #[test] fn test_tx_memory_limiter_in_guest() { - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let tx_index = TxIndex::default(); // This code will allocate memory of the given size @@ -770,8 +759,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (tx_code.len() as u64).serialize_to_vec(); - write_log.write(&key, tx_code.clone()).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state.write_log_mut().write(&key, tx_code.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::TX_MEMORY_MAX_PAGES, 200); @@ -787,9 +776,8 @@ mod tests { outer_tx.set_code(Code::new(tx_code.clone(), None)); outer_tx.set_data(Data::new(tx_data)); let result = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -804,9 +792,8 @@ mod tests { outer_tx.set_code(Code::new(tx_code, None)); outer_tx.set_data(Data::new(tx_data)); let error = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -822,12 +809,11 @@ mod tests { /// fails and hence returns `false`. #[test] fn test_vp_memory_limiter_in_guest_calling_eval() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -839,8 +825,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (vp_eval.len() as u64).serialize_to_vec(); - storage.write(&key, vp_eval).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_eval).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // This code will allocate memory of the given size let vp_memory_limit = TestWasms::VpMemoryLimit.read_bytes(); // store the wasm code @@ -848,8 +834,8 @@ mod tests { let key = Key::wasm_code(&limit_code_hash); let len_key = Key::wasm_code_len(&limit_code_hash); let code_len = (vp_memory_limit.len() as u64).serialize_to_vec(); - storage.write(&key, vp_memory_limit).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_memory_limit).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -858,7 +844,7 @@ mod tests { // shouldn't fail let input = 2_usize.pow(23).serialize_to_vec(); - let mut tx = Tx::new(storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(vec![], None).add_serialized_data(input); let eval_vp = EvalVp { @@ -866,7 +852,7 @@ mod tests { input: tx, }; - let mut outer_tx = Tx::new(storage.chain_id.clone(), None); + let mut outer_tx = Tx::new(state.in_mem().chain_id.clone(), None); outer_tx.add_code(vec![], None).add_data(eval_vp); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -877,9 +863,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache.clone(), @@ -890,7 +875,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail let input = 2_usize.pow(24).serialize_to_vec(); - let mut tx = Tx::new(storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(vec![], None).add_data(input); let eval_vp = EvalVp { @@ -898,7 +883,7 @@ mod tests { input: tx, }; - let mut outer_tx = Tx::new(storage.chain_id.clone(), None); + let mut outer_tx = Tx::new(state.in_mem().chain_id.clone(), None); outer_tx.add_code(vec![], None).add_data(eval_vp); // When the `eval`ed VP runs out of memory, its result should be @@ -909,9 +894,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -925,12 +909,11 @@ mod tests { /// inside the wasm execution, the execution is aborted. #[test] fn test_vp_memory_limiter_in_guest() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -942,8 +925,8 @@ mod tests { let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_code).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_code).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -952,7 +935,7 @@ mod tests { // shouldn't fail let tx_data = 2_usize.pow(23).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); outer_tx.set_code(Code::new(vec![], None)); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -961,9 +944,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache.clone(), @@ -974,16 +956,15 @@ mod tests { // should fail let tx_data = 2_usize.pow(24).serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); let error = vp( code_hash, &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -997,9 +978,9 @@ mod tests { /// host input, the execution fails. #[test] fn test_tx_memory_limiter_in_host_input() { - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let tx_index = TxIndex::default(); let tx_no_op = TestWasms::TxNoOp.read_bytes(); @@ -1008,8 +989,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (tx_no_op.len() as u64).serialize_to_vec(); - write_log.write(&key, tx_no_op.clone()).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state.write_log_mut().write(&key, tx_no_op.clone()).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::TX_MEMORY_MAX_PAGES, 200); @@ -1026,9 +1007,8 @@ mod tests { outer_tx.set_code(Code::new(tx_no_op, None)); outer_tx.set_data(Data::new(tx_data)); let result = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -1057,12 +1037,11 @@ mod tests { /// in the host input, the execution fails. #[test] fn test_vp_memory_limiter_in_host_input() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -1073,8 +1052,8 @@ mod tests { let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); let code_len = (vp_code.len() as u64).serialize_to_vec(); - storage.write(&key, vp_code).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_code).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Assuming 200 pages, 12.8 MiB limit assert_eq!(memory::VP_MEMORY_MAX_PAGES, 200); @@ -1084,7 +1063,7 @@ mod tests { let len = 2_usize.pow(24); let tx_data: Vec = vec![6_u8; len]; let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); outer_tx.set_code(Code::new(vec![], None)); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1093,9 +1072,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -1126,9 +1104,9 @@ mod tests { /// execution is aborted. #[test] fn test_tx_memory_limiter_in_host_env() { - let mut storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let tx_index = TxIndex::default(); let tx_read_key = TestWasms::TxReadStorageKey.read_bytes(); @@ -1137,8 +1115,11 @@ mod tests { let code_len = (tx_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - write_log.write(&key, tx_read_key.clone()).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state + .write_log_mut() + .write(&key, tx_read_key.clone()) + .unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -1150,7 +1131,7 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.serialize_to_vec()).unwrap(); + state.write_bytes(&key, value.serialize_to_vec()).unwrap(); let tx_data = key.serialize_to_vec(); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1160,9 +1141,8 @@ mod tests { outer_tx.set_code(Code::new(tx_read_key, None)); outer_tx.set_data(Data::new(tx_data)); let error = tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -1178,12 +1158,11 @@ mod tests { /// execution, the execution is aborted. #[test] fn test_vp_memory_limiter_in_host_env() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -1194,8 +1173,8 @@ mod tests { let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_read_key).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_read_key).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -1207,10 +1186,10 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.serialize_to_vec()).unwrap(); + state.write_bytes(&key, value.serialize_to_vec()).unwrap(); let tx_data = key.serialize_to_vec(); let mut outer_tx = Tx::from_type(TxType::Raw); - outer_tx.header.chain_id = storage.chain_id.clone(); + outer_tx.header.chain_id = state.in_mem().chain_id.clone(); outer_tx.set_data(Data::new(tx_data)); outer_tx.set_code(Code::new(vec![], None)); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1219,9 +1198,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -1237,12 +1215,11 @@ mod tests { /// and hence returns `false`. #[test] fn test_vp_memory_limiter_in_host_env_inside_guest_calling_eval() { - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let tx_index = TxIndex::default(); @@ -1254,8 +1231,8 @@ mod tests { let code_len = (vp_eval.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_eval).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_eval).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // This code will read value from the storage let vp_read_key = TestWasms::VpReadStorageKey.read_bytes(); // store the wasm code @@ -1263,8 +1240,8 @@ mod tests { let code_len = (vp_read_key.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&read_code_hash); let len_key = Key::wasm_code_len(&read_code_hash); - storage.write(&key, vp_read_key).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_read_key).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); // Allocating `2^24` (16 MiB) for a value in storage that the tx // attempts to read should be above the memory limit and should @@ -1276,10 +1253,10 @@ mod tests { // Write the value that should be read by the tx into the storage. When // writing directly to storage, the value has to be encoded with // Borsh. - storage.write(&key, value.serialize_to_vec()).unwrap(); + state.write_bytes(&key, value.serialize_to_vec()).unwrap(); let input = 2_usize.pow(23).serialize_to_vec(); - let mut tx = Tx::new(storage.chain_id.clone(), None); + let mut tx = Tx::new(state.in_mem().chain_id.clone(), None); tx.add_code(vec![], None).add_serialized_data(input); let eval_vp = EvalVp { @@ -1287,7 +1264,7 @@ mod tests { input: tx, }; - let mut outer_tx = Tx::new(storage.chain_id.clone(), None); + let mut outer_tx = Tx::new(state.in_mem().chain_id.clone(), None); outer_tx.add_code(vec![], None).add_data(eval_vp); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1296,9 +1273,8 @@ mod tests { &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, @@ -1310,9 +1286,9 @@ mod tests { fn execute_tx_with_code(tx_code: Vec) -> Result> { let tx_data = vec![]; let tx_index = TxIndex::default(); - let storage = TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut gas_meter = TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()); + let mut state = TestState::default(); + let gas_meter = + RefCell::new(TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into())); let (mut vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let (mut tx_cache, _) = @@ -1323,17 +1299,16 @@ mod tests { let code_len = (tx_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - write_log.write(&key, tx_code).unwrap(); - write_log.write(&len_key, code_len).unwrap(); + state.write_log_mut().write(&key, tx_code).unwrap(); + state.write_log_mut().write(&len_key, code_len).unwrap(); let mut outer_tx = Tx::from_type(TxType::Raw); outer_tx.set_code(Code::from_hash(code_hash, None)); outer_tx.set_data(Data::new(tx_data)); tx( - &storage, - &mut write_log, - &mut gas_meter, + &mut state, + &gas_meter, &tx_index, &outer_tx, &mut vp_cache, @@ -1410,12 +1385,11 @@ mod tests { let outer_tx = Tx::from_type(TxType::Raw); let tx_index = TxIndex::default(); - let mut storage = TestStorage::default(); - let addr = storage.address_gen.generate_address("rng seed"); - let write_log = WriteLog::default(); - let mut gas_meter = VpGasMeter::new_from_tx_meter( + let mut state = TestState::default(); + let addr = state.in_mem_mut().address_gen.generate_address("rng seed"); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(TX_GAS_LIMIT.into()), - ); + )); let keys_changed = BTreeSet::new(); let verifiers = BTreeSet::new(); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); @@ -1424,17 +1398,16 @@ mod tests { let code_len = (vp_code.len() as u64).serialize_to_vec(); let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - storage.write(&key, vp_code).unwrap(); - storage.write(&len_key, code_len).unwrap(); + state.write_bytes(&key, vp_code).unwrap(); + state.write_bytes(&len_key, code_len).unwrap(); vp( code_hash, &outer_tx, &tx_index, &addr, - &storage, - &write_log, - &mut gas_meter, + &state, + &gas_meter, &keys_changed, &verifiers, vp_cache, diff --git a/crates/proof_of_stake/src/epoched.rs b/crates/proof_of_stake/src/epoched.rs index a1767f0318..6b3395a3a0 100644 --- a/crates/proof_of_stake/src/epoched.rs +++ b/crates/proof_of_stake/src/epoched.rs @@ -1088,7 +1088,7 @@ mod test { use namada_core::address::testing::established_address_1; use namada_core::dec::Dec; use namada_core::{key, token}; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; use test_log::test; use super::*; @@ -1392,8 +1392,8 @@ mod test { Ok(()) } - fn init_storage() -> namada_storage::Result { - let mut s = TestWlStorage::default(); + fn init_storage() -> namada_storage::Result { + let mut s = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); gov_params.init_storage(&mut s)?; diff --git a/crates/proof_of_stake/src/pos_queries.rs b/crates/proof_of_stake/src/pos_queries.rs index 8b07205f9e..677737f3db 100644 --- a/crates/proof_of_stake/src/pos_queries.rs +++ b/crates/proof_of_stake/src/pos_queries.rs @@ -116,7 +116,7 @@ where let epoch = epoch.unwrap_or_else(|| self.storage.get_block_epoch().unwrap()); ConsensusValidators { - wl_storage: self.storage, + state: self.storage, validator_set: consensus_validator_set_handle().at(&epoch), } } @@ -259,7 +259,7 @@ pub struct ConsensusValidators<'db, S> where S: StorageRead, { - wl_storage: &'db S, + state: &'db S, validator_set: ConsensusValidatorSet, } @@ -273,7 +273,7 @@ where &'this self, ) -> impl Iterator + 'db { self.validator_set - .iter(self.wl_storage) + .iter(self.state) .expect("Must be able to iterate over consensus validators") .map(|res| { let ( diff --git a/crates/proof_of_stake/src/tests/helpers.rs b/crates/proof_of_stake/src/tests/helpers.rs index 26e9e53d73..f199c13e85 100644 --- a/crates/proof_of_stake/src/tests/helpers.rs +++ b/crates/proof_of_stake/src/tests/helpers.rs @@ -8,7 +8,7 @@ use namada_core::key::{self, RefTo}; use namada_core::storage::Epoch; use namada_core::token; use namada_core::token::testing::arb_amount_non_zero_ceiled; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use proptest::prop_oneof; use proptest::strategy::{Just, Strategy}; @@ -48,7 +48,7 @@ pub fn test_slashes_with_unbonding_params() } pub fn get_tendermint_set_updates( - s: &TestWlStorage, + s: &TestState, params: &PosParams, Epoch(epoch): Epoch, ) -> Vec { @@ -61,9 +61,9 @@ pub fn get_tendermint_set_updates( } /// Advance to the next epoch. Returns the new epoch. -pub fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { - s.storage.block.epoch = s.storage.block.epoch.next(); - let current_epoch = s.storage.block.epoch; +pub fn advance_epoch(s: &mut TestState, params: &PosParams) -> Epoch { + s.in_mem_mut().block.epoch = s.in_mem().block.epoch.next(); + let current_epoch = s.in_mem().block.epoch; compute_and_store_total_consensus_stake(s, current_epoch).unwrap(); copy_validator_sets_and_positions( s, diff --git a/crates/proof_of_stake/src/tests/state_machine.rs b/crates/proof_of_stake/src/tests/state_machine.rs index e2d1ad0cfe..7bf14ce6d5 100644 --- a/crates/proof_of_stake/src/tests/state_machine.rs +++ b/crates/proof_of_stake/src/tests/state_machine.rs @@ -13,7 +13,7 @@ use namada_core::key::common::PublicKey; use namada_core::storage::Epoch; use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::{ Collectable, NestedSubKey, SubKey, }; @@ -163,7 +163,7 @@ struct AbstractPosState { #[derive(Debug)] struct ConcretePosState { /// Storage - contains all the PoS state - s: TestWlStorage, + s: TestState, } /// State machine transitions @@ -225,7 +225,7 @@ impl StateMachineTest for ConcretePosState { .map(|val| &val.address) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); initial_state.gov_params.init_storage(&mut s).unwrap(); crate::test_utils::test_init_genesis( &mut s, @@ -245,7 +245,7 @@ impl StateMachineTest for ConcretePosState { let params = crate::read_pos_params(&state.s).unwrap(); let pos_balance = read_balance( &state.s, - &state.s.storage.native_token, + &state.s.in_mem().native_token, &crate::ADDRESS, ) .unwrap(); @@ -256,7 +256,7 @@ impl StateMachineTest for ConcretePosState { advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing - let current_epoch = state.s.storage.block.epoch; + let current_epoch = state.s.in_mem().block.epoch; crate::slashing::process_slashes(&mut state.s, current_epoch) .unwrap(); @@ -771,7 +771,7 @@ impl StateMachineTest for ConcretePosState { impl ConcretePosState { fn current_epoch(&self) -> Epoch { - self.s.storage.block.epoch + self.s.in_mem().block.epoch } fn check_next_epoch_post_conditions(&self, params: &PosParams) { @@ -1443,7 +1443,7 @@ impl ConcretePosState { params: &PosParams, validator: &Address, ) { - let current_epoch = self.s.storage.block.epoch; + let current_epoch = self.s.in_mem().block.epoch; // Make sure the validator is not in either set until the pipeline epoch for epoch in current_epoch.iter_range(params.pipeline_len) { diff --git a/crates/proof_of_stake/src/tests/state_machine_v2.rs b/crates/proof_of_stake/src/tests/state_machine_v2.rs index ee83c08f0d..c5f6a778f8 100644 --- a/crates/proof_of_stake/src/tests/state_machine_v2.rs +++ b/crates/proof_of_stake/src/tests/state_machine_v2.rs @@ -14,7 +14,7 @@ use namada_core::key::common::PublicKey; use namada_core::storage::Epoch; use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; use namada_storage::StorageRead; use proptest::prelude::*; @@ -1867,7 +1867,7 @@ impl Unbond { #[derivative(Debug)] struct ConcretePosState { /// Storage - contains all the PoS state - s: TestWlStorage, + s: TestState, /// Last reference state in debug format to print changes after transitions #[derivative(Debug = "ignore")] last_state_diff: DbgPrintDiff, @@ -1937,7 +1937,7 @@ impl StateMachineTest for ConcretePosState { .map(|val| &val.address) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); initial_state.gov_params.init_storage(&mut s).unwrap(); crate::test_utils::init_genesis_helper( &mut s, @@ -1973,7 +1973,7 @@ impl StateMachineTest for ConcretePosState { let params = crate::read_pos_params(&state.s).unwrap(); let pos_balance = read_balance( &state.s, - &state.s.storage.native_token, + &state.s.in_mem().native_token, &crate::ADDRESS, ) .unwrap(); @@ -1984,7 +1984,7 @@ impl StateMachineTest for ConcretePosState { advance_epoch(&mut state.s, ¶ms); // Need to apply some slashing - let current_epoch = state.s.storage.block.epoch; + let current_epoch = state.s.in_mem().block.epoch; crate::slashing::process_slashes(&mut state.s, current_epoch) .unwrap(); @@ -2724,7 +2724,7 @@ impl StateMachineTest for ConcretePosState { impl ConcretePosState { fn current_epoch(&self) -> Epoch { - self.s.storage.block.epoch + self.s.in_mem().block.epoch } fn check_next_epoch_post_conditions(&self, params: &PosParams) { @@ -3119,7 +3119,7 @@ impl ConcretePosState { params: &PosParams, validator: &Address, ) { - let current_epoch = self.s.storage.block.epoch; + let current_epoch = self.s.in_mem().block.epoch; // Make sure the validator is not in either set until the pipeline epoch for epoch in current_epoch.iter_range(params.pipeline_len) { diff --git a/crates/proof_of_stake/src/tests/test_helper_fns.rs b/crates/proof_of_stake/src/tests/test_helper_fns.rs index b4e08395fd..98c2ac37b0 100644 --- a/crates/proof_of_stake/src/tests/test_helper_fns.rs +++ b/crates/proof_of_stake/src/tests/test_helper_fns.rs @@ -6,7 +6,7 @@ use namada_core::address::testing::{ use namada_core::dec::Dec; use namada_core::storage::{Epoch, Key}; use namada_core::token; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::NestedMap; use namada_storage::collections::LazyCollection; @@ -35,7 +35,7 @@ use crate::{ /// `iterateBondsUpToAmountTest` #[test] fn test_find_bonds_to_remove() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); gov_params.init_storage(&mut storage).unwrap(); @@ -119,7 +119,7 @@ fn test_find_bonds_to_remove() { /// `computeModifiedRedelegationTest` #[test] fn test_compute_modified_redelegation() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let validator1 = established_address_1(); let validator2 = established_address_2(); let owner = established_address_3(); @@ -293,7 +293,7 @@ fn test_compute_modified_redelegation() { /// `computeBondAtEpochTest` #[test] fn test_compute_bond_at_epoch() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { pipeline_len: 2, unbonding_len: 4, @@ -443,7 +443,7 @@ fn test_compute_bond_at_epoch() { /// `computeSlashBondAtEpochTest` #[test] fn test_compute_slash_bond_at_epoch() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { pipeline_len: 2, unbonding_len: 4, @@ -541,7 +541,7 @@ fn test_compute_slash_bond_at_epoch() { /// `computeNewRedelegatedUnbondsTest` #[test] fn test_compute_new_redelegated_unbonds() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let alice = established_address_1(); let bob = established_address_2(); @@ -811,7 +811,7 @@ fn test_compute_slashable_amount() { /// `foldAndSlashRedelegatedBondsMapTest` #[test] fn test_fold_and_slash_redelegated_bonds() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -910,7 +910,7 @@ fn test_fold_and_slash_redelegated_bonds() { /// `slashRedelegationTest` #[test] fn test_slash_redelegation() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1098,7 +1098,7 @@ fn test_slash_redelegation() { /// `slashValidatorRedelegationTest` #[test] fn test_slash_validator_redelegation() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1280,7 +1280,7 @@ fn test_slash_validator_redelegation() { /// `slashValidatorTest` #[test] fn test_slash_validator() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1680,7 +1680,7 @@ fn test_slash_validator() { /// `computeAmountAfterSlashingUnbondTest` #[test] fn test_compute_amount_after_slashing_unbond() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1798,7 +1798,7 @@ fn test_compute_amount_after_slashing_unbond() { /// `computeAmountAfterSlashingWithdrawTest` #[test] fn test_compute_amount_after_slashing_withdraw() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() @@ -1929,7 +1929,7 @@ fn test_compute_amount_after_slashing_withdraw() { fn test_from_sm_case_1() { use namada_core::address::testing::established_address_4; - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); gov_params.init_storage(&mut storage).unwrap(); diff --git a/crates/proof_of_stake/src/tests/test_pos.rs b/crates/proof_of_stake/src/tests/test_pos.rs index fe9477b602..1071c5dc00 100644 --- a/crates/proof_of_stake/src/tests/test_pos.rs +++ b/crates/proof_of_stake/src/tests/test_pos.rs @@ -9,7 +9,7 @@ use namada_core::key::testing::{common_sk_from_simple_seed, gen_keypair}; use namada_core::key::RefTo; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::{address, key}; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::Collectable; use namada_storage::StorageRead; use proptest::prelude::*; @@ -203,8 +203,8 @@ fn test_test_init_genesis_aux( // "Test inputs: {params:?}, {start_epoch}, genesis validators: \ // {validators:#?}" // ); - let mut s = TestWlStorage::default(); - s.storage.block.epoch = start_epoch; + let mut s = TestState::default(); + s.in_mem_mut().block.epoch = start_epoch; validators.sort_by(|a, b| b.tokens.cmp(&a.tokens)); let params = test_init_genesis( @@ -287,11 +287,11 @@ fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { // params.unbonding_len = 4; // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Genesis - let start_epoch = s.storage.block.epoch; - let mut current_epoch = s.storage.block.epoch; + let start_epoch = s.in_mem().block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -561,7 +561,7 @@ fn test_bonds_aux(params: OwnedPosParams, validators: Vec) { let unbonded_genesis_self_bond = amount_self_unbond - amount_self_bond != token::Amount::zero(); - let self_unbond_epoch = s.storage.block.epoch; + let self_unbond_epoch = s.in_mem().block.epoch; unbond_tokens( &mut s, @@ -823,7 +823,7 @@ fn test_unjail_validator_aux( ) { // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Find the validator with the most stake and 100x his stake to keep the // cubic slash rate small @@ -836,7 +836,7 @@ fn test_unjail_validator_aux( // let val_tokens = validators[num_vals - 2].tokens; // Genesis - let mut current_epoch = s.storage.block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -960,14 +960,14 @@ fn test_unjail_validator_aux( } fn test_unslashed_bond_amount_aux(validators: Vec) { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1153,9 +1153,9 @@ fn test_log_block_rewards_aux( .map(|v| (&v.address, v.tokens.to_string_native())) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Init genesis - let current_epoch = s.storage.block.epoch; + let current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -1372,9 +1372,9 @@ fn test_update_rewards_products_aux(validators: Vec) { .map(|v| (&v.address, v.tokens.to_string_native())) .collect::>() ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Init genesis - let current_epoch = s.storage.block.epoch; + let current_epoch = s.in_mem().block.epoch; let params = OwnedPosParams::default(); let params = test_init_genesis( &mut s, @@ -1464,10 +1464,10 @@ fn test_consensus_key_change_aux(validators: Vec) { // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1542,7 +1542,7 @@ fn test_consensus_key_change_aux(validators: Vec) { change_consensus_key(&mut storage, &validator, &ck_3, current_epoch) .unwrap(); - let staking_token = storage.storage.native_token.clone(); + let staking_token = storage.in_mem().native_token.clone(); let amount_del = token::Amount::native_whole(5); credit_tokens(&mut storage, &staking_token, &validator, amount_del) .unwrap(); @@ -1591,14 +1591,14 @@ fn test_is_delegator_aux(mut validators: Vec) { let validator1 = validators[0].address.clone(); let validator2 = validators[1].address.clone(); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1699,13 +1699,13 @@ fn test_jail_for_liveness_aux(validators: Vec) { let missed_votes = 1_u64; // Open 2 storages - let mut storage = TestWlStorage::default(); - let mut storage_clone = TestWlStorage::default(); + let mut storage = TestState::default(); + let mut storage_clone = TestState::default(); // Apply the same changes to each storage for s in [&mut storage, &mut storage_clone] { // Genesis - let current_epoch = s.storage.block.epoch; + let current_epoch = s.in_mem().block.epoch; let jail_epoch = current_epoch.next(); let params = test_init_genesis( s, @@ -1739,5 +1739,8 @@ fn test_jail_for_liveness_aux(validators: Vec) { } // Assert that the changes from `jail_for_liveness` are the same - pretty_assertions::assert_eq!(&storage.write_log, &storage_clone.write_log); + pretty_assertions::assert_eq!( + &storage.write_log(), + &storage_clone.write_log() + ); } diff --git a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs index b7a04b3c86..34a9c574d8 100644 --- a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs +++ b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs @@ -6,7 +6,7 @@ use namada_core::address; use namada_core::dec::Dec; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map::Collectable; use namada_storage::StorageRead; use proptest::prelude::*; @@ -66,14 +66,14 @@ fn test_simple_redelegation_aux( let src_validator = validators[0].address.clone(); let dest_validator = validators[1].address.clone(); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -293,7 +293,7 @@ fn test_slashes_with_unbonding_aux( params.unbonding_len = 4; // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Find the validator with the least stake to avoid the cubic slash rate // going to 100% @@ -305,8 +305,8 @@ fn test_slashes_with_unbonding_aux( let val_tokens = validator.tokens; // Genesis - // let start_epoch = s.storage.block.epoch; - let mut current_epoch = s.storage.block.epoch; + // let start_epoch = s.in_mem().block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -461,7 +461,7 @@ fn test_redelegation_with_slashing_aux( let src_validator = validators[0].address.clone(); let dest_validator = validators[1].address.clone(); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, // Avoid empty consensus set by removing the threshold @@ -470,7 +470,7 @@ fn test_redelegation_with_slashing_aux( }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -682,14 +682,14 @@ fn test_chain_redelegations_aux(mut validators: Vec) { let dest_validator_2 = validators[2].address.clone(); let _init_stake_dest_2 = validators[2].tokens; - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, ..Default::default() }; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1073,10 +1073,10 @@ fn test_overslashing_aux(mut validators: Vec) { // println!("\nTest inputs: {params:?}, genesis validators: // {validators:#?}"); - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, @@ -1087,7 +1087,7 @@ fn test_overslashing_aux(mut validators: Vec) { storage.commit_block().unwrap(); // Get a delegator with some tokens - let staking_token = storage.storage.native_token.clone(); + let staking_token = storage.in_mem().native_token.clone(); let delegator = address::testing::gen_implicit_address(); let amount_del = token::Amount::native_whole(5); credit_tokens(&mut storage, &staking_token, &delegator, amount_del) @@ -1243,7 +1243,7 @@ proptest! { } fn test_slashed_bond_amount_aux(validators: Vec) { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let params = OwnedPosParams { unbonding_len: 4, validator_stake_threshold: token::Amount::zero(), @@ -1260,7 +1260,7 @@ fn test_slashed_bond_amount_aux(validators: Vec) { validators[0].tokens = (init_tot_stake - val1_init_stake) / 30; // Genesis - let mut current_epoch = storage.storage.block.epoch; + let mut current_epoch = storage.in_mem().block.epoch; let params = test_init_genesis( &mut storage, params, diff --git a/crates/proof_of_stake/src/tests/test_validator.rs b/crates/proof_of_stake/src/tests/test_validator.rs index 2d06174cfb..da7b44a86d 100644 --- a/crates/proof_of_stake/src/tests/test_validator.rs +++ b/crates/proof_of_stake/src/tests/test_validator.rs @@ -9,7 +9,7 @@ use namada_core::key::testing::{ use namada_core::key::{self, common, RefTo}; use namada_core::storage::Epoch; use namada_core::token; -use namada_state::testing::TestWlStorage; +use namada_state::testing::TestState; use namada_storage::collections::lazy_map; use proptest::prelude::*; use proptest::test_runner::Config; @@ -78,10 +78,10 @@ fn test_become_validator_aux( // validators: {validators:#?}" // ); - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Genesis - let mut current_epoch = s.storage.block.epoch; + let mut current_epoch = s.in_mem().block.epoch; let params = test_init_genesis( &mut s, params, @@ -273,7 +273,7 @@ fn test_become_validator_aux( #[test] fn test_validator_raw_hash() { - let mut storage = TestWlStorage::default(); + let mut storage = TestState::default(); let address = address::testing::established_address_1(); let consensus_sk = key::testing::keypair_1(); let consensus_pk = consensus_sk.to_public(); @@ -293,7 +293,7 @@ fn test_validator_raw_hash() { #[test] fn test_validator_sets() { - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Only 3 consensus validator slots let params = OwnedPosParams { max_validator_slots: 3, @@ -388,7 +388,7 @@ fn test_validator_sets() { .unwrap(); // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, + let insert_validator = |s: &mut TestState, addr, pk: &common::PublicKey, stake: token::Amount, @@ -976,7 +976,7 @@ fn test_validator_sets() { /// with 0 voting power, because it wasn't it its set before #[test] fn test_validator_sets_swap() { - let mut s = TestWlStorage::default(); + let mut s = TestState::default(); // Only 2 consensus validator slots let params = OwnedPosParams { max_validator_slots: 2, @@ -1066,7 +1066,7 @@ fn test_validator_sets_swap() { .unwrap(); // A helper to insert a non-genesis validator - let insert_validator = |s: &mut TestWlStorage, + let insert_validator = |s: &mut TestState, addr, pk: &common::PublicKey, stake: token::Amount, @@ -1281,8 +1281,8 @@ fn test_purge_validator_information_aux(validators: Vec) { ..Default::default() }; - let mut s = TestWlStorage::default(); - let mut current_epoch = s.storage.block.epoch; + let mut s = TestState::default(); + let mut current_epoch = s.in_mem().block.epoch; // Genesis let gov_params = namada_governance::parameters::GovernanceParameters { @@ -1306,7 +1306,7 @@ fn test_purge_validator_information_aux(validators: Vec) { let validator_positions = validator_set_positions_handle(); let all_validator_addresses = validator_addresses_handle(); - let check_is_data = |storage: &TestWlStorage, start: Epoch, end: Epoch| { + let check_is_data = |storage: &TestState, start: Epoch, end: Epoch| { for ep in Epoch::iter_bounds_inclusive(start, end) { assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); // assert!(!below_cap_val_set.at(&ep).is_empty(storage). @@ -1325,7 +1325,7 @@ fn test_purge_validator_information_aux(validators: Vec) { for _ in 0..default_past_epochs { current_epoch = advance_epoch(&mut s, ¶ms); } - assert_eq!(s.storage.block.epoch.0, default_past_epochs); + assert_eq!(s.in_mem().block.epoch.0, default_past_epochs); assert_eq!(current_epoch.0, default_past_epochs); check_is_data( diff --git a/crates/sdk/src/queries/mod.rs b/crates/sdk/src/queries/mod.rs index 5753b989c5..d9e2c79392 100644 --- a/crates/sdk/src/queries/mod.rs +++ b/crates/sdk/src/queries/mod.rs @@ -60,7 +60,7 @@ where { if request.height.value() != 0 && request.height.value() - != ctx.wl_storage.storage.get_last_block_height().0 + != ctx.state.in_mem().get_last_block_height().0 { return Err(namada_storage::Error::new_const( "This query doesn't support arbitrary block heights, only the \ @@ -98,7 +98,8 @@ pub fn require_no_data(request: &RequestQuery) -> namada_storage::Result<()> { mod testing { use namada_core::storage::BlockHeight; - use namada_state::testing::TestWlStorage; + use namada_state::testing::TestState; + use namada_storage::StorageWrite; use tendermint_rpc::Response; use super::*; @@ -112,8 +113,8 @@ mod testing { { /// RPC router pub rpc: RPC, - /// storage - pub wl_storage: TestWlStorage, + /// state + pub state: TestState, /// event log pub event_log: EventLog, } @@ -126,21 +127,18 @@ mod testing { /// Initialize a test client for the given root RPC router pub fn new(rpc: RPC) -> Self { // Initialize the `TestClient` - let mut wl_storage = TestWlStorage::default(); + let mut state = TestState::default(); // Initialize mock gas limit let max_block_gas_key = namada_parameters::storage::get_max_block_gas_key(); - wl_storage - .storage - .write(&max_block_gas_key, namada_core::encode(&20_000_000_u64)) - .expect( - "Max block gas parameter must be initialized in storage", - ); + state.write(&max_block_gas_key, 20_000_000_u64).expect( + "Max block gas parameter must be initialized in storage", + ); let event_log = EventLog::default(); Self { rpc, - wl_storage, + state, event_log, } } @@ -176,7 +174,7 @@ mod testing { prove, }; let ctx = RequestCtx { - wl_storage: &self.wl_storage, + state: self.state.read_only(), event_log: &self.event_log, vp_wasm_cache: (), tx_wasm_cache: (), diff --git a/crates/sdk/src/queries/router.rs b/crates/sdk/src/queries/router.rs index a401094f6c..1b1ef422a4 100644 --- a/crates/sdk/src/queries/router.rs +++ b/crates/sdk/src/queries/router.rs @@ -1032,7 +1032,7 @@ mod test { }; let ctx = RequestCtx { event_log: &client.event_log, - wl_storage: &client.wl_storage, + state: &client.state, vp_wasm_cache: (), tx_wasm_cache: (), storage_read_past_height_limit: None, @@ -1049,7 +1049,7 @@ mod test { }; let ctx = RequestCtx { event_log: &client.event_log, - wl_storage: &client.wl_storage, + state: &client.state, vp_wasm_cache: (), tx_wasm_cache: (), storage_read_past_height_limit: None, diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index 4c37ade7ce..611bba59bb 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -17,7 +17,7 @@ use namada_core::storage::{ }; use namada_core::token::{Denomination, MaspDigitPos}; use namada_core::uint::Uint; -use namada_state::{DBIter, LastBlock, StorageHasher, DB}; +use namada_state::{DBIter, LastBlock, StateRead, StorageHasher, DB}; use namada_storage::{self, ResultExt, StorageRead}; #[cfg(any(test, feature = "async-client"))] use namada_tx::data::TxResult; @@ -135,10 +135,10 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let (iter, _gas) = ctx.wl_storage.storage.iter_results(); + let (iter, _gas) = ctx.state.db_iter_results(); let mut results = vec![ BlockResults::default(); - ctx.wl_storage.storage.block.height.0 as usize + 1 + ctx.state.in_mem().block.height.0 as usize + 1 ]; for (key, value, _gas) in iter { let key = u64::parse(key.clone()).map_err(|_| { @@ -173,8 +173,8 @@ where H: 'static + StorageHasher + Sync, { Ok(ctx - .wl_storage - .storage + .state + .in_mem() .conversion_state .assets .iter() @@ -199,12 +199,8 @@ where H: 'static + StorageHasher + Sync, { // Conversion values are constructed on request - if let Some(((addr, denom, digit), epoch, conv, pos)) = ctx - .wl_storage - .storage - .conversion_state - .assets - .get(&asset_type) + if let Some(((addr, denom, digit), epoch, conv, pos)) = + ctx.state.in_mem().conversion_state.assets.get(&asset_type) { Ok(Some(( addr.clone(), @@ -214,7 +210,7 @@ where Into::::into( conv.clone(), ), - ctx.wl_storage.storage.conversion_state.tree.path(*pos), + ctx.state.in_mem().conversion_state.tree.path(*pos), ))) } else { Ok(None) @@ -229,11 +225,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let tokens = ctx.wl_storage.storage.conversion_state.tokens.clone(); + let tokens = ctx.state.in_mem().conversion_state.tokens.clone(); let mut data = Vec::::new(); for (name, token) in tokens { let max_reward_rate = ctx - .wl_storage + .state .read::(&namada_token::storage_key::masp_max_reward_rate_key( &token, ))? @@ -247,7 +243,7 @@ where )) })?; let kd_gain = ctx - .wl_storage + .state .read::(&namada_token::storage_key::masp_kd_gain_key(&token))? .ok_or_else(|| { namada_storage::Error::new(std::io::Error::new( @@ -259,7 +255,7 @@ where )) })?; let kp_gain = ctx - .wl_storage + .state .read::(&namada_token::storage_key::masp_kp_gain_key(&token))? .ok_or_else(|| { namada_storage::Error::new(std::io::Error::new( @@ -271,7 +267,7 @@ where )) })?; let locked_amount_target = ctx - .wl_storage + .state .read::( &namada_token::storage_key::masp_locked_amount_target_key( &token, @@ -307,7 +303,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = ctx.wl_storage.storage.last_epoch; + let data = ctx.state.in_mem().last_epoch; Ok(data) } @@ -318,7 +314,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = ctx.wl_storage.storage.native_token.clone(); + let data = ctx.state.in_mem().native_token.clone(); Ok(data) } @@ -330,7 +326,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - Ok(ctx.wl_storage.storage.block.pred_epochs.get_epoch(height)) + Ok(ctx.state.in_mem().block.pred_epochs.get_epoch(height)) } fn last_block( @@ -340,7 +336,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - Ok(ctx.wl_storage.storage.last_block.clone()) + Ok(ctx.state.in_mem().last_block.clone()) } /// Returns data with `vec![]` when the storage key is not found. For all @@ -356,7 +352,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let last_committed_height = ctx.wl_storage.storage.get_last_block_height(); + let last_committed_height = ctx.state.in_mem().get_last_block_height(); let queried_height = { let height: BlockHeight = request.height.into(); let is_last_height_query = height.0 == 0; @@ -382,16 +378,14 @@ where } match ctx - .wl_storage - .storage - .read_with_height(&storage_key, queried_height) + .state + .db_read_with_height(&storage_key, queried_height) .into_storage_result()? { (Some(value), _gas) => { let proof = if request.prove { let proof = ctx - .wl_storage - .storage + .state .get_existence_proof(&storage_key, &value, queried_height) .into_storage_result()?; Some(proof) @@ -407,8 +401,7 @@ where (None, _gas) => { let proof = if request.prove { let proof = ctx - .wl_storage - .storage + .state .get_non_existence_proof(&storage_key, queried_height) .into_storage_result()?; Some(proof) @@ -435,7 +428,7 @@ where { require_latest_height(&ctx, request)?; - let iter = namada_storage::iter_prefix_bytes(ctx.wl_storage, &storage_key)?; + let iter = namada_storage::iter_prefix_bytes(ctx.state, &storage_key)?; let data: namada_storage::Result> = iter .map(|iter_result| { let (key, value) = iter_result?; @@ -446,7 +439,7 @@ where let proof = if request.prove { let queried_height = { let last_committed_height = - ctx.wl_storage.storage.get_last_block_height(); + ctx.state.in_mem().get_last_block_height(); let height: BlockHeight = request.height.into(); let is_last_height_query = height.0 == 0; @@ -460,8 +453,7 @@ where let mut ops = vec![]; for PrefixValue { key, value } in &data { let mut proof = ctx - .wl_storage - .storage + .state .get_existence_proof(key, value, queried_height) .into_storage_result()?; ops.append(&mut proof.ops); @@ -488,7 +480,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let data = StorageRead::has_key(ctx.wl_storage, &storage_key)?; + let data = StorageRead::has_key(ctx.state, &storage_key)?; Ok(data) } @@ -584,11 +576,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let account_exists = namada_account::exists(ctx.wl_storage, &owner)?; + let account_exists = namada_account::exists(ctx.state, &owner)?; if account_exists { - let public_keys = namada_account::public_keys(ctx.wl_storage, &owner)?; - let threshold = namada_account::threshold(ctx.wl_storage, &owner)?; + let public_keys = namada_account::public_keys(ctx.state, &owner)?; + let threshold = namada_account::threshold(ctx.state, &owner)?; Ok(Some(Account { public_keys_map: AccountPublicKeysMap::from_iter(public_keys), @@ -608,7 +600,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let public_keys = namada_account::public_keys(ctx.wl_storage, &owner)?; + let public_keys = namada_account::public_keys(ctx.state, &owner)?; Ok(!public_keys.is_empty()) } diff --git a/crates/sdk/src/queries/shell/eth_bridge.rs b/crates/sdk/src/queries/shell/eth_bridge.rs index 0962f08d3b..c6d49f99fc 100644 --- a/crates/sdk/src/queries/shell/eth_bridge.rs +++ b/crates/sdk/src/queries/shell/eth_bridge.rs @@ -230,16 +230,15 @@ where } let mut status = TransferToEthereumStatus { - queried_height: ctx.wl_storage.storage.get_last_block_height(), + queried_height: ctx.state.in_mem().get_last_block_height(), ..Default::default() }; // check which transfers in the Bridge pool match the requested hashes let merkle_tree = ctx - .wl_storage - .storage + .state .get_merkle_tree( - ctx.wl_storage.storage.get_last_block_height(), + ctx.state.in_mem().get_last_block_height(), Some(StoreType::BridgePool), ) .expect("We should always be able to read the database"); @@ -338,7 +337,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let ethbridge_queries = ctx.wl_storage.ethbridge_queries(); + let ethbridge_queries = ctx.state.ethbridge_queries(); let whitelisted = ethbridge_queries.is_token_whitelisted(&asset); let supply = ethbridge_queries @@ -363,7 +362,7 @@ where H: 'static + StorageHasher + Sync, T: BorshDeserialize, { - let Some(contract) = StorageRead::read(ctx.wl_storage, key)? else { + let Some(contract) = StorageRead::read(ctx.state, key)? else { return Err(namada_storage::Error::SimpleMessage( "Failed to read contract: The Ethereum bridge \ storage is not initialized", @@ -408,7 +407,7 @@ where H: 'static + StorageHasher + Sync, { Ok(read_ethereum_bridge_pool_at_height( - ctx.wl_storage.storage.get_last_block_height(), + ctx.state.in_mem().get_last_block_height(), ctx, )) } @@ -424,7 +423,7 @@ where { // get the latest signed merkle root of the Ethereum bridge pool let (_, height) = ctx - .wl_storage + .state .ethbridge_queries() .get_signed_bridge_pool_root() .ok_or(namada_storage::Error::SimpleMessage( @@ -446,8 +445,7 @@ where // get the backing store of the merkle tree corresponding // at the specified height. let merkle_tree = ctx - .wl_storage - .storage + .state .get_merkle_tree(height, Some(StoreType::BridgePool)) .expect("We should always be able to read the database"); let stores = merkle_tree.stores(); @@ -460,9 +458,8 @@ where .keys() .map(|hash| { let value = ctx - .wl_storage - .storage - .read_with_height(&get_key_from_hash(hash), height) + .state + .db_read_with_height(&get_key_from_hash(hash), height) .unwrap() .0 .unwrap(); @@ -490,7 +487,7 @@ where { // get the latest signed merkle root of the Ethereum bridge pool let (signed_root, height) = ctx - .wl_storage + .state .ethbridge_queries() .get_signed_bridge_pool_root() .ok_or(namada_storage::Error::SimpleMessage( @@ -502,7 +499,7 @@ where // make sure a relay attempt won't happen before the new signed // root has had time to be generated let latest_bp_nonce = - ctx.wl_storage.ethbridge_queries().get_bridge_pool_nonce(); + ctx.state.ethbridge_queries().get_bridge_pool_nonce(); if latest_bp_nonce != signed_root.data.1 { return Err(namada_storage::Error::Custom(CustomError( format!( @@ -516,8 +513,7 @@ where // get the merkle tree corresponding to the above root. let tree = ctx - .wl_storage - .storage + .state .get_merkle_tree(height, Some(StoreType::BridgePool)) .into_storage_result()?; // from the hashes of the transfers, get the actual values. @@ -526,7 +522,7 @@ where .iter() .filter_map(|hash| { let key = get_key_from_hash(hash); - match ctx.wl_storage.read_bytes(&key) { + match ctx.state.read_bytes(&key) { Ok(Some(bytes)) => Some((key, bytes)), _ => { missing_hashes.push(hash); @@ -565,7 +561,7 @@ where ) { Ok(BridgePool(proof)) => { let (validator_args, voting_powers) = ctx - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(None); let relay_proof = ethereum_structs::RelayProof { @@ -616,7 +612,7 @@ where { let mut pending_events = HashMap::new(); for (mut key, value) in ctx - .wl_storage + .state .iter_prefix(ð_msgs_prefix())? .filter_map(|(k, v, _)| { let key = Key::from_str(&k).expect( @@ -637,11 +633,8 @@ where *key.segments.last_mut().unwrap() = DbKeySeg::StringSeg(Keys::segments().seen.into()); // check if the event has been seen - let is_seen = ctx - .wl_storage - .read::(&key) - .into_storage_result()? - .expect( + let is_seen = + ctx.state.read::(&key).into_storage_result()?.expect( "Iterating over storage should not yield keys without values.", ); if is_seen { @@ -655,18 +648,18 @@ where *key.segments.last_mut().unwrap() = DbKeySeg::StringSeg(Keys::segments().voting_power.into()); let voting_power = ctx - .wl_storage + .state .read::(&key) .into_storage_result()? .expect( "Iterating over storage should not yield keys without \ values.", ) - .fractional_stake(ctx.wl_storage); + .fractional_stake(ctx.state); for transfer in transfers { let key = get_key_from_hash(&transfer.keccak256()); let transfer = ctx - .wl_storage + .state .read::(&key) .into_storage_result()? .expect("The transfer must be present in storage"); @@ -696,7 +689,7 @@ where .into(), ))); } - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; if epoch > current_epoch.next() { return Err(namada_storage::Error::Custom(CustomError( format!( @@ -707,7 +700,7 @@ where ))); } - if !ctx.wl_storage.ethbridge_queries().valset_upd_seen(epoch) { + if !ctx.state.ethbridge_queries().valset_upd_seen(epoch) { return Err(namada_storage::Error::Custom(CustomError( format!( "Validator set update proof is not yet available for the \ @@ -719,7 +712,7 @@ where let valset_upd_keys = vote_tallies::Keys::from(&epoch); let proof: EthereumProof = - StorageRead::read(ctx.wl_storage, &valset_upd_keys.body())?.expect( + StorageRead::read(ctx.state, &valset_upd_keys.body())?.expect( "EthereumProof is seen in storage, therefore it must exist", ); @@ -739,7 +732,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; if epoch > current_epoch.next() { Err(namada_storage::Error::Custom(CustomError( format!( @@ -750,7 +743,7 @@ where ))) } else { Ok(ctx - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(Some(epoch)) .0) @@ -769,7 +762,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; if epoch > current_epoch.next() { Err(namada_storage::Error::Custom(CustomError( format!( @@ -780,7 +773,7 @@ where ))) } else { Ok(ctx - .wl_storage + .state .ethbridge_queries() .get_governance_validator_set(Some(epoch)) .0) @@ -797,7 +790,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let maybe_epoch = ctx.wl_storage.pos_queries().get_epoch(height); + let maybe_epoch = ctx.state.pos_queries().get_epoch(height); let Some(epoch) = maybe_epoch else { return Err(namada_storage::Error::SimpleMessage( "The epoch of the requested height does not exist", @@ -816,14 +809,14 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.get_current_epoch().0; + let current_epoch = ctx.state.in_mem().get_current_epoch().0; if epoch > current_epoch + 1u64 { return Err(namada_storage::Error::SimpleMessage( "The requested epoch cannot be queried", )); } let (_, voting_powers) = ctx - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(Some(epoch)); Ok(voting_powers) @@ -851,7 +844,7 @@ mod test_ethbridge_router { use namada_ethereum_bridge::storage::proof::BridgePoolRootProof; use namada_ethereum_bridge::storage::whitelist; use namada_proof_of_stake::pos_queries::PosQueries; - use namada_state::mockdb::MockDBWriteBatch; + use namada_storage::mockdb::MockDBWriteBatch; use namada_storage::StorageWrite; use namada_vote_ext::validator_set_update; use namada_vote_ext::validator_set_update::{ @@ -868,16 +861,15 @@ mod test_ethbridge_router { async fn test_read_consensus_valset() { let mut client = TestClient::new(RPC); let epoch = Epoch(0); - assert_eq!(client.wl_storage.storage.last_epoch, epoch); + assert_eq!(client.state.in_mem().last_epoch, epoch); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -889,13 +881,13 @@ mod test_ethbridge_router { .unwrap(); let expected = { let total_power = client - .wl_storage + .state .pos_queries() .get_total_voting_power(Some(epoch)) .into(); let voting_powers_map: VotingPowersMap = client - .wl_storage + .state .ethbridge_queries() .get_consensus_eth_addresses(Some(epoch)) .iter() @@ -928,16 +920,15 @@ mod test_ethbridge_router { #[tokio::test] async fn test_read_consensus_valset_too_far_ahead() { let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -961,10 +952,10 @@ mod test_ethbridge_router { #[tokio::test] async fn test_read_valset_upd_proof() { let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // write validator to storage - let keys = test_utils::init_default_storage(&mut client.wl_storage); + let keys = test_utils::init_default_storage(&mut client.state); // write proof to storage let vext = validator_set_update::Vext { @@ -979,7 +970,7 @@ mod test_ethbridge_router { .eth_bridge, ); let tx_result = aggregate_votes( - &mut client.wl_storage, + &mut client.state, validator_set_update::VextDigest::singleton(vext.clone()), 0.into(), ) @@ -988,9 +979,8 @@ mod test_ethbridge_router { // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -1005,7 +995,7 @@ mod test_ethbridge_router { EthereumProof::new((1.into(), vext.0.data.voting_powers)); proof.attach_signature( client - .wl_storage + .state .ethbridge_queries() .get_eth_addr_book(&established_address_1(), Some(0.into())) .expect("Test failed"), @@ -1022,16 +1012,15 @@ mod test_ethbridge_router { #[tokio::test] async fn test_read_valset_upd_proof_too_far_ahead() { let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // commit the changes client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); // check the response @@ -1072,15 +1061,15 @@ mod test_ethbridge_router { }; // write a transfer into the bridge pool - client.wl_storage.storage.block.height = 1.into(); + client.state.in_mem_mut().block.height = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // check the response let pool = RPC @@ -1114,29 +1103,29 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool client - .wl_storage + .state .delete(&get_pending_key(&transfer)) .expect("Test failed"); let mut transfer2 = transfer; transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // check the response let pool = RPC @@ -1169,11 +1158,11 @@ mod test_ethbridge_router { }; // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1182,23 +1171,23 @@ mod test_ethbridge_router { signatures: Default::default(), data: (transfer.keccak256(), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write( &get_signed_root_key(), (signed_root.clone(), written_height), @@ -1206,8 +1195,8 @@ mod test_ethbridge_router { .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; let resp = RPC .shell() @@ -1237,7 +1226,7 @@ mod test_ethbridge_router { .expect("Test failed"); let (validator_args, voting_powers) = client - .wl_storage + .state .ethbridge_queries() .get_bridge_validator_set(None); let relay_proof = ethereum_structs::RelayProof { @@ -1279,11 +1268,11 @@ mod test_ethbridge_router { }, }; // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1295,33 +1284,31 @@ mod test_ethbridge_router { // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer; transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), &transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, BlockHeight::from(0))) .expect("Test failed"); // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; // this is in the pool, but its merkle root has not been signed yet let resp = RPC @@ -1365,11 +1352,11 @@ mod test_ethbridge_router { }, }; // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1378,29 +1365,29 @@ mod test_ethbridge_router { signatures: Default::default(), data: (transfer.keccak256(), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; let resp = RPC .shell() .eth_bridge() @@ -1431,11 +1418,11 @@ mod test_ethbridge_router { }; // write validator to storage let (_, dummy_validator_stake) = test_utils::default_validator(); - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1449,11 +1436,11 @@ mod test_ethbridge_router { let eth_msg_key = vote_tallies::Keys::from(ð_event); let voting_power = FractionalVotingPower::HALF; client - .wl_storage + .state .write(ð_msg_key.body(), eth_event) .expect("Test failed"); client - .wl_storage + .state .write( ð_msg_key.voting_power(), EpochedVotingPower::from([( @@ -1463,32 +1450,30 @@ mod test_ethbridge_router { ) .expect("Test failed"); client - .wl_storage + .state .write(ð_msg_key.seen(), false) .expect("Test failed"); // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // commit the changes and increase block height client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.in_mem_mut().block.height += 1; let resp = RPC .shell() .eth_bridge() @@ -1509,7 +1494,7 @@ mod test_ethbridge_router { async fn test_cannot_get_proof_for_removed_transfer() { let mut client = TestClient::new(RPC); // write validator to storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); let transfer = PendingTransfer { transfer: TransferToEthereum { kind: TransferToEthereumKind::Erc20, @@ -1527,7 +1512,7 @@ mod test_ethbridge_router { // write a transfer into the bridge pool client - .wl_storage + .state .write(&get_pending_key(&transfer), &transfer) .expect("Test failed"); @@ -1536,29 +1521,29 @@ mod test_ethbridge_router { signatures: Default::default(), data: (transfer.keccak256(), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // update the pool let mut transfer2 = transfer.clone(); transfer2.transfer.amount = 1.into(); client - .wl_storage + .state .write(&get_pending_key(&transfer2), transfer2) .expect("Test failed"); // add the signature for the pool at the previous block height client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); // commit the changes and increase block height - client.wl_storage.commit_block().expect("Test failed"); - client.wl_storage.storage.block.height += 1; + client.state.commit_block().expect("Test failed"); + client.state.in_mem_mut().block.height += 1; // this was in the pool, covered by an old signed Merkle root. let resp = RPC .shell() @@ -1581,7 +1566,7 @@ mod test_ethbridge_router { // remove a transfer from the pool. client - .wl_storage + .state .delete(&get_pending_key(&transfer)) .expect("Test failed"); @@ -1613,10 +1598,10 @@ mod test_ethbridge_router { const ERC20_TOKEN: EthAddress = EthAddress([0; 20]); let mut client = TestClient::new(RPC); - assert_eq!(client.wl_storage.storage.last_epoch.0, 0); + assert_eq!(client.state.in_mem().last_epoch.0, 0); // initialize storage - test_utils::init_default_storage(&mut client.wl_storage); + test_utils::init_default_storage(&mut client.state); // check supply - should be 0 let result = RPC @@ -1638,7 +1623,7 @@ mod test_ethbridge_router { } .into(); client - .wl_storage + .state .write(&key, supply_amount) .expect("Test failed"); let key = whitelist::Key { @@ -1646,10 +1631,7 @@ mod test_ethbridge_router { suffix: whitelist::KeyType::Cap, } .into(); - client - .wl_storage - .write(&key, cap_amount) - .expect("Test failed"); + client.state.write(&key, cap_amount).expect("Test failed"); // check that the supply was updated let result = RPC @@ -1685,7 +1667,7 @@ mod test_ethbridge_router { }, }; client - .wl_storage + .state .write(&get_pending_key(&transfer), transfer.clone()) .expect("Test failed"); @@ -1714,7 +1696,7 @@ mod test_ethbridge_router { transfer4.transfer.amount = 3.into(); // change block height - client.wl_storage.storage.block.height = 1.into(); + client.state.in_mem_mut().block.height = 1.into(); // write bridge pool signed root { @@ -1722,20 +1704,19 @@ mod test_ethbridge_router { signatures: Default::default(), data: (KeccakHash([0; 32]), 0.into()), }; - let written_height = client.wl_storage.storage.block.height; + let written_height = client.state.in_mem().block.height; client - .wl_storage + .state .write(&get_signed_root_key(), (signed_root, written_height)) .expect("Test failed"); client - .wl_storage - .storage - .commit_block(MockDBWriteBatch) + .state + .commit_block_from_batch(MockDBWriteBatch) .expect("Test failed"); } // commit storage changes - client.wl_storage.commit_block().expect("Test failed"); + client.state.commit_block().expect("Test failed"); // check transfer statuses let status = RPC diff --git a/crates/sdk/src/queries/types.rs b/crates/sdk/src/queries/types.rs index 00b57368cc..b191cb59b5 100644 --- a/crates/sdk/src/queries/types.rs +++ b/crates/sdk/src/queries/types.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use namada_core::storage::BlockHeight; -use namada_state::{DBIter, StorageHasher, WlStorage, DB}; +use namada_state::{DBIter, StorageHasher, WlState, DB}; use thiserror::Error; use crate::events::log::EventLog; @@ -15,8 +15,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - /// Reference to the ledger's [`WlStorage`]. - pub wl_storage: &'shell WlStorage, + /// Reference to the ledger's [`WlState`]. + pub state: &'shell WlState, /// Log of events emitted by `FinalizeBlock` ABCI calls. pub event_log: &'shell EventLog, /// Cache of VP wasm compiled artifacts. diff --git a/crates/sdk/src/queries/vp/governance.rs b/crates/sdk/src/queries/vp/governance.rs index 60de9a3835..2e41282e43 100644 --- a/crates/sdk/src/queries/vp/governance.rs +++ b/crates/sdk/src/queries/vp/governance.rs @@ -24,7 +24,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_proposal_by_id(ctx.wl_storage, id) + namada_governance::storage::get_proposal_by_id(ctx.state, id) } /// Query all the votes for the given proposal id @@ -36,7 +36,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_proposal_votes(ctx.wl_storage, id) + namada_governance::storage::get_proposal_votes(ctx.state, id) } /// Get the governance parameters @@ -47,7 +47,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_parameters(ctx.wl_storage) + namada_governance::storage::get_parameters(ctx.state) } /// Get the governance proposal result stored in storage @@ -59,5 +59,5 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::storage::get_proposal_result(ctx.wl_storage, id) + namada_governance::storage::get_proposal_result(ctx.state, id) } diff --git a/crates/sdk/src/queries/vp/pgf.rs b/crates/sdk/src/queries/vp/pgf.rs index 7b767911bb..7a4dbf0673 100644 --- a/crates/sdk/src/queries/vp/pgf.rs +++ b/crates/sdk/src/queries/vp/pgf.rs @@ -22,7 +22,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::get_stewards(ctx.wl_storage) + namada_governance::pgf::storage::get_stewards(ctx.state) } /// Check if an address is a pgf steward @@ -34,7 +34,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::is_steward(ctx.wl_storage, &address) + namada_governance::pgf::storage::is_steward(ctx.state, &address) } /// Query the continuous pgf fundings @@ -45,7 +45,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::get_payments(ctx.wl_storage) + namada_governance::pgf::storage::get_payments(ctx.state) } /// Query the PGF parameters @@ -56,5 +56,5 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_governance::pgf::storage::get_parameters(ctx.wl_storage) + namada_governance::pgf::storage::get_parameters(ctx.state) } diff --git a/crates/sdk/src/queries/vp/pos.rs b/crates/sdk/src/queries/vp/pos.rs index 2182ebbc03..4b81e7b9bb 100644 --- a/crates/sdk/src/queries/vp/pos.rs +++ b/crates/sdk/src/queries/vp/pos.rs @@ -184,7 +184,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_pos_params(ctx.wl_storage) + read_pos_params(ctx.state) } /// Find if the given address belongs to a validator account. @@ -196,7 +196,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::is_validator(ctx.wl_storage, &addr) + namada_proof_of_stake::is_validator(ctx.state, &addr) } /// Find a consensus key of a validator account. @@ -208,9 +208,9 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; + let current_epoch = ctx.state.in_mem().last_epoch; namada_proof_of_stake::storage::get_consensus_key( - ctx.wl_storage, + ctx.state, &addr, current_epoch, ) @@ -226,7 +226,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::is_delegator(ctx.wl_storage, &addr, epoch) + namada_proof_of_stake::is_delegator(ctx.state, &addr, epoch) } /// Get all the validator known addresses. These validators may be in any state, @@ -239,8 +239,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - read_all_validator_addresses(ctx.wl_storage, epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + read_all_validator_addresses(ctx.state, epoch) } /// Get the validator commission rate and max commission rate change per epoch @@ -253,15 +253,12 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - let commission_rate = validator_commission_rate_handle(&validator).get( - ctx.wl_storage, - epoch, - ¶ms, - )?; + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + let commission_rate = validator_commission_rate_handle(&validator) + .get(ctx.state, epoch, ¶ms)?; let max_commission_change_per_epoch = - read_validator_max_commission_rate_change(ctx.wl_storage, &validator)?; + read_validator_max_commission_rate_change(ctx.state, &validator)?; match (commission_rate, max_commission_change_per_epoch) { (Some(commission_rate), Some(max_commission_change_per_epoch)) => { @@ -283,12 +280,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let email = read_validator_email(ctx.wl_storage, &validator)?; - let description = read_validator_description(ctx.wl_storage, &validator)?; - let website = read_validator_website(ctx.wl_storage, &validator)?; - let discord_handle = - read_validator_discord_handle(ctx.wl_storage, &validator)?; - let avatar = read_validator_avatar(ctx.wl_storage, &validator)?; + let email = read_validator_email(ctx.state, &validator)?; + let description = read_validator_description(ctx.state, &validator)?; + let website = read_validator_website(ctx.state, &validator)?; + let discord_handle = read_validator_discord_handle(ctx.state, &validator)?; + let avatar = read_validator_avatar(ctx.state, &validator)?; // Email is the only required field for a validator in storage match email { @@ -313,13 +309,10 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - let state = validator_state_handle(&validator).get( - ctx.wl_storage, - epoch, - ¶ms, - )?; + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + let state = + validator_state_handle(&validator).get(ctx.state, epoch, ¶ms)?; Ok(state) } @@ -332,7 +325,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_validator_last_slash_epoch(ctx.wl_storage, &validator) + read_validator_last_slash_epoch(ctx.state, &validator) } /// Get the total stake of a validator at the given epoch or current when @@ -349,11 +342,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - if namada_proof_of_stake::is_validator(ctx.wl_storage, &validator)? { + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + if namada_proof_of_stake::is_validator(ctx.state, &validator)? { let stake = - read_validator_stake(ctx.wl_storage, ¶ms, &validator, epoch)?; + read_validator_stake(ctx.state, ¶ms, &validator, epoch)?; Ok(Some(stake)) } else { Ok(None) @@ -372,7 +365,7 @@ where H: 'static + StorageHasher + Sync, { let handle = validator_incoming_redelegations_handle(&src_validator); - handle.get(ctx.wl_storage, &delegator) + handle.get(ctx.state, &delegator) } /// Get all the validator in the consensus set with their bonded stake. @@ -384,8 +377,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - read_consensus_validator_set_addresses_with_stake(ctx.wl_storage, epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + read_consensus_validator_set_addresses_with_stake(ctx.state, epoch) } /// Get all the validator in the below-capacity set with their bonded stake. @@ -397,11 +390,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - read_below_capacity_validator_set_addresses_with_stake( - ctx.wl_storage, - epoch, - ) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + read_below_capacity_validator_set_addresses_with_stake(ctx.state, epoch) } /// Get the total stake in PoS system at the given epoch or current when `None`. @@ -413,9 +403,9 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - let params = read_pos_params(ctx.wl_storage)?; - read_total_stake(ctx.wl_storage, ¶ms, epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + let params = read_pos_params(ctx.state)?; + read_total_stake(ctx.state, ¶ms, epoch) } fn bond_deltas( @@ -427,7 +417,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - bond_handle(&source, &validator).to_hashmap(ctx.wl_storage) + bond_handle(&source, &validator).to_hashmap(ctx.state) } /// Find the sum of bond amount up the given epoch when `Some`, or up to the @@ -442,13 +432,13 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let params = read_pos_params(ctx.wl_storage)?; - let epoch = epoch - .unwrap_or(ctx.wl_storage.storage.last_epoch + params.pipeline_len); + let params = read_pos_params(ctx.state)?; + let epoch = + epoch.unwrap_or(ctx.state.in_mem().last_epoch + params.pipeline_len); let handle = bond_handle(&source, &validator); handle - .get_sum(ctx.wl_storage, epoch, ¶ms)? + .get_sum(ctx.state, epoch, ¶ms)? .ok_or_err_msg("Cannot find bond") } @@ -462,10 +452,10 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); let bond_id = BondId { source, validator }; - bond_amount(ctx.wl_storage, &bond_id, epoch) + bond_amount(ctx.state, &bond_id, epoch) } fn unbond( @@ -478,7 +468,7 @@ where H: 'static + StorageHasher + Sync, { let handle = unbond_handle(&source, &validator); - let iter = handle.iter(ctx.wl_storage)?; + let iter = handle.iter(ctx.state)?; iter.map(|next_result| { next_result.map( |( @@ -504,7 +494,7 @@ where { // TODO slashes let handle = unbond_handle(&source, &validator); - let iter = handle.iter(ctx.wl_storage)?; + let iter = handle.iter(ctx.state)?; iter.map(|next_result| { next_result.map( |( @@ -529,11 +519,11 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); let handle = unbond_handle(&source, &validator); let mut total = token::Amount::zero(); - for result in handle.iter(ctx.wl_storage)? { + for result in handle.iter(ctx.state)? { let ( lazy_map::NestedSubKey::Data { key: end, @@ -557,13 +547,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; - query_reward_tokens( - ctx.wl_storage, - source.as_ref(), - &validator, - current_epoch, - ) + let current_epoch = ctx.state.in_mem().last_epoch; + query_reward_tokens(ctx.state, source.as_ref(), &validator, current_epoch) } fn bonds_and_unbonds( @@ -576,9 +561,7 @@ where H: 'static + StorageHasher + Sync, { namada_proof_of_stake::queries::bonds_and_unbonds( - ctx.wl_storage, - source, - validator, + ctx.state, source, validator, ) } @@ -592,7 +575,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - find_delegation_validators(ctx.wl_storage, &owner) + find_delegation_validators(ctx.state, &owner) } /// Find all the validator addresses to whom the given `owner` address has @@ -606,8 +589,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let epoch = epoch.unwrap_or(ctx.wl_storage.storage.last_epoch); - find_delegations(ctx.wl_storage, &owner, &epoch) + let epoch = epoch.unwrap_or(ctx.state.in_mem().last_epoch); + find_delegations(ctx.state, &owner, &epoch) } /// Validator slashes @@ -620,7 +603,7 @@ where H: 'static + StorageHasher + Sync, { let slash_handle = validator_slashes_handle(&validator); - slash_handle.iter(ctx.wl_storage)?.collect() + slash_handle.iter(ctx.state)?.collect() } /// All slashes @@ -631,7 +614,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - find_all_slashes(ctx.wl_storage) + find_all_slashes(ctx.state) } /// Enqueued slashes @@ -642,8 +625,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - let current_epoch = ctx.wl_storage.storage.last_epoch; - find_all_enqueued_slashes(ctx.wl_storage, current_epoch) + let current_epoch = ctx.state.in_mem().last_epoch; + find_all_enqueued_slashes(ctx.state, current_epoch) } /// Native validator address by looking up the Tendermint address @@ -656,8 +639,7 @@ where H: 'static + StorageHasher + Sync, { namada_proof_of_stake::storage::find_validator_by_raw_hash( - ctx.wl_storage, - tm_addr, + ctx.state, tm_addr, ) } @@ -669,7 +651,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::storage::get_consensus_key_set(ctx.wl_storage) + namada_proof_of_stake::storage::get_consensus_key_set(ctx.state) } /// Find if the given source address has any bonds. @@ -681,7 +663,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - namada_proof_of_stake::queries::has_bonds(ctx.wl_storage, &source) + namada_proof_of_stake::queries::has_bonds(ctx.state, &source) } /// Client-only methods for the router type are composed from router functions. diff --git a/crates/sdk/src/queries/vp/token.rs b/crates/sdk/src/queries/vp/token.rs index e9696e796d..cf6e1b9c42 100644 --- a/crates/sdk/src/queries/vp/token.rs +++ b/crates/sdk/src/queries/vp/token.rs @@ -22,7 +22,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_denom(ctx.wl_storage, &addr) + read_denom(ctx.state, &addr) } /// Get the total supply for a token address @@ -34,7 +34,7 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { - read_total_supply(ctx.wl_storage, &addr) + read_total_supply(ctx.state, &addr) } #[cfg(any(test, feature = "async-client"))] diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index f27d8fbeb0..f4515fc3ab 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -1,11 +1,13 @@ //! Ledger's state storage with key-value backed store and a merkle tree -pub mod wl_storage; pub mod write_log; -use core::fmt::Debug; +use std::cell::RefCell; use std::cmp::Ordering; +use std::fmt::Debug; use std::format; +use std::iter::Peekable; +use std::ops::{Deref, DerefMut}; use namada_core::address::{Address, EstablishedAddressGen, InternalAddress}; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; @@ -20,10 +22,11 @@ pub use namada_core::storage::{ }; use namada_core::tendermint::merkle::proof::ProofOps; use namada_core::time::DateTimeUtc; +use namada_core::validity_predicate::VpSentinel; use namada_core::{encode, ethereum_structs, storage}; use namada_gas::{ - MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_ACCESS_GAS_PER_BYTE, - STORAGE_WRITE_GAS_PER_BYTE, + GasMetering, TxGasMeter, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE, + STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE, }; pub use namada_merkle_tree::{ self as merkle_tree, ics23_specs, MembershipProof, MerkleTree, @@ -31,13 +34,15 @@ pub use namada_merkle_tree::{ }; use namada_merkle_tree::{Error as MerkleTreeError, MerkleRoot}; use namada_parameters::{self, EpochDuration, Parameters}; -pub use namada_storage::conversion_state::ConversionState; +use namada_replay_protection as replay_protection; +pub use namada_storage::conversion_state::{ + ConversionState, WithConversionState, +}; pub use namada_storage::{Error as StorageError, Result as StorageResult, *}; +use namada_tx::data::TxSentinel; use thiserror::Error; use tx_queue::{ExpiredTxsQueue, TxQueue}; -pub use wl_storage::{ - iter_prefix_post, iter_prefix_pre, PrefixIter, TempWlStorage, WlStorage, -}; +use write_log::{ReProtStorageModification, StorageModification, WriteLog}; /// A result of a function that may fail pub type Result = std::result::Result; @@ -46,314 +51,47 @@ pub type Result = std::result::Result; /// it has 2 blocks delay on validator set update. pub const EPOCH_SWITCH_BLOCKS_DELAY: u32 = 2; -/// The ledger's state -#[derive(Debug)] -pub struct State -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// The database for the storage - pub db: D, - /// The ID of the chain - pub chain_id: ChainId, - /// The address of the native token - this is not stored in DB, but read - /// from genesis - pub native_token: Address, - /// Block storage data - pub block: BlockStorage, - /// During `FinalizeBlock`, this is the header of the block that is - /// going to be committed. After a block is committed, this is reset to - /// `None` until the next `FinalizeBlock` phase is reached. - pub header: Option
, - /// The most recently committed block, if any. - pub last_block: Option, - /// The epoch of the most recently committed block. If it is `Epoch(0)`, - /// then no block may have been committed for this chain yet. - pub last_epoch: Epoch, - /// Minimum block height at which the next epoch may start - pub next_epoch_min_start_height: BlockHeight, - /// Minimum block time at which the next epoch may start - pub next_epoch_min_start_time: DateTimeUtc, - /// The current established address generator - pub address_gen: EstablishedAddressGen, - /// We delay the switch to a new epoch by the number of blocks set in here. - /// This is `Some` when minimum number of blocks has been created and - /// minimum time has passed since the beginning of the last epoch. - /// Once the value is `Some(0)`, we're ready to switch to a new epoch and - /// this is reset back to `None`. - pub update_epoch_blocks_delay: Option, - /// The shielded transaction index - pub tx_index: TxIndex, - /// The currently saved conversion state - pub conversion_state: ConversionState, - /// Wrapper txs to be decrypted in the next block proposal - pub tx_queue: TxQueue, - /// Queue of expired transactions that need to be retransmitted. - /// - /// These transactions do not need to be persisted, as they are - /// retransmitted at the **COMMIT** phase immediately following - /// the block when they were queued. - pub expired_txs_queue: ExpiredTxsQueue, - /// The latest block height on Ethereum processed, if - /// the bridge is enabled. - pub ethereum_height: Option, - /// The queue of Ethereum events to be processed in order. - pub eth_events_queue: EthEventsQueue, - /// How many block heights in the past can the storage be queried - pub storage_read_past_height_limit: Option, - /// Static merkle tree storage key filter - pub merkle_tree_key_filter: fn(&storage::Key) -> bool, -} - -/// Last committed block -#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] -pub struct LastBlock { - /// Block height - pub height: BlockHeight, - /// Block hash - pub hash: BlockHash, - /// Block time - pub time: DateTimeUtc, -} - -/// The block storage data +/// Owned state with full R/W access. #[derive(Debug)] -pub struct BlockStorage { - /// Merkle tree of all the other data in block storage - pub tree: MerkleTree, - /// During `FinalizeBlock`, this is updated to be the hash of the block - /// that is going to be committed. If it is `BlockHash::default()`, - /// then no `FinalizeBlock` stage has been reached yet. - pub hash: BlockHash, - /// From the start of `FinalizeBlock` until the end of `Commit`, this is - /// height of the block that is going to be committed. Otherwise, it is the - /// height of the most recently committed block, or `BlockHeight::sentinel` - /// (0) if no block has been committed yet. - pub height: BlockHeight, - /// From the start of `FinalizeBlock` until the end of `Commit`, this is - /// height of the block that is going to be committed. Otherwise it is the - /// epoch of the most recently committed block, or `Epoch(0)` if no block - /// has been committed yet. - pub epoch: Epoch, - /// Results of applying transactions - pub results: BlockResults, - /// Predecessor block epochs - pub pred_epochs: Epochs, -} - -pub fn merklize_all_keys(_key: &storage::Key) -> bool { - true -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("TEMPORARY error: {error}")] - Temporary { error: String }, - #[error("Found an unknown key: {key}")] - UnknownKey { key: String }, - #[error("Storage key error {0}")] - KeyError(namada_core::storage::Error), - #[error("Coding error: {0}")] - CodingError(#[from] namada_core::DecodeError), - #[error("Merkle tree error: {0}")] - MerkleTreeError(MerkleTreeError), - #[error("DB error: {0}")] - DBError(String), - #[error("Borsh (de)-serialization error: {0}")] - BorshCodingError(std::io::Error), - #[error("Merkle tree at the height {height} is not stored")] - NoMerkleTree { height: BlockHeight }, - #[error("Code hash error: {0}")] - InvalidCodeHash(HashError), - #[error("DB error: {0}")] - DbError(#[from] namada_storage::DbError), -} - -impl State +pub struct FullAccessState(WlState) where D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// open up a new instance of the storage given path to db and chain id - pub fn open( - db_path: impl AsRef, - chain_id: ChainId, - native_token: Address, - cache: Option<&D::Cache>, - storage_read_past_height_limit: Option, - merkle_tree_key_filter: fn(&storage::Key) -> bool, - ) -> Self { - let block = BlockStorage { - tree: MerkleTree::default(), - hash: BlockHash::default(), - height: BlockHeight::default(), - epoch: Epoch::default(), - pred_epochs: Epochs::default(), - results: BlockResults::default(), - }; - State:: { - db: D::open(db_path, cache), - chain_id, - block, - header: None, - last_block: None, - last_epoch: Epoch::default(), - next_epoch_min_start_height: BlockHeight::default(), - next_epoch_min_start_time: DateTimeUtc::now(), - address_gen: EstablishedAddressGen::new( - "Privacy is a function of liberty.", - ), - update_epoch_blocks_delay: None, - tx_index: TxIndex::default(), - conversion_state: ConversionState::default(), - tx_queue: TxQueue::default(), - expired_txs_queue: ExpiredTxsQueue::default(), - native_token, - ethereum_height: None, - eth_events_queue: EthEventsQueue::default(), - storage_read_past_height_limit, - merkle_tree_key_filter, - } - } - - /// Load the full state at the last committed height, if any. Returns the - /// Merkle root hash and the height of the committed block. - pub fn load_last_state(&mut self) -> Result<()> { - if let Some(BlockStateRead { - merkle_tree_stores, - hash, - height, - time, - epoch, - pred_epochs, - next_epoch_min_start_height, - next_epoch_min_start_time, - update_epoch_blocks_delay, - results, - address_gen, - conversion_state, - tx_queue, - ethereum_height, - eth_events_queue, - }) = self.db.read_last_block()? - { - self.block.hash = hash.clone(); - self.block.height = height; - self.block.epoch = epoch; - self.block.results = results; - self.block.pred_epochs = pred_epochs; - self.last_block = Some(LastBlock { height, hash, time }); - self.last_epoch = epoch; - self.next_epoch_min_start_height = next_epoch_min_start_height; - self.next_epoch_min_start_time = next_epoch_min_start_time; - self.update_epoch_blocks_delay = update_epoch_blocks_delay; - self.address_gen = address_gen; - // Rebuild Merkle tree - self.block.tree = MerkleTree::new(merkle_tree_stores) - .or_else(|_| self.rebuild_full_merkle_tree(height))?; - self.conversion_state = conversion_state; - self.tx_queue = tx_queue; - self.ethereum_height = ethereum_height; - self.eth_events_queue = eth_events_queue; - tracing::debug!("Loaded storage from DB"); - } else { - tracing::info!("No state could be found"); - } - Ok(()) - } + H: StorageHasher; - /// Returns the Merkle root hash and the height of the committed block. If - /// no block exists, returns None. - pub fn get_state(&self) -> Option<(MerkleRoot, u64)> { - if self.block.height.0 != 0 { - Some((self.block.tree.root(), self.block.height.0)) - } else { - None - } - } +/// Common trait for read-only access to write log, DB and in-memory state. +pub trait StateRead: StorageRead + Debug { + /// DB type + type D: 'static + DB + for<'iter> DBIter<'iter>; + /// DB hasher type + type H: 'static + StorageHasher; - /// Persist the current block's state to the database - pub fn commit_block(&mut self, mut batch: D::WriteBatch) -> Result<()> { - // All states are written only when the first height or a new epoch - let is_full_commit = - self.block.height.0 == 1 || self.last_epoch != self.block.epoch; + /// Borrow `WriteLog` + fn write_log(&self) -> &WriteLog; - // For convenience in tests, fill-in a header if it's missing. - // Normally, the header is added in `FinalizeBlock`. - #[cfg(any(test, feature = "testing"))] - { - if self.header.is_none() { - self.header = Some(Header { - hash: Hash::default(), - time: DateTimeUtc::now(), - next_validators_hash: Hash::default(), - }); - } - } + /// Borrow `DB` + fn db(&self) -> &Self::D; - let state = BlockStateWrite { - merkle_tree_stores: self.block.tree.stores(), - header: self.header.as_ref(), - hash: &self.block.hash, - height: self.block.height, - time: self - .header - .as_ref() - .expect("Must have a block header on commit") - .time, - epoch: self.block.epoch, - results: &self.block.results, - pred_epochs: &self.block.pred_epochs, - next_epoch_min_start_height: self.next_epoch_min_start_height, - next_epoch_min_start_time: self.next_epoch_min_start_time, - update_epoch_blocks_delay: self.update_epoch_blocks_delay, - address_gen: &self.address_gen, - conversion_state: &self.conversion_state, - tx_queue: &self.tx_queue, - ethereum_height: self.ethereum_height.as_ref(), - eth_events_queue: &self.eth_events_queue, - }; - self.db - .add_block_to_batch(state, &mut batch, is_full_commit)?; - let header = self - .header - .take() - .expect("Must have a block header on commit"); - self.last_block = Some(LastBlock { - height: self.block.height, - hash: header.hash.into(), - time: header.time, - }); - self.last_epoch = self.block.epoch; - if is_full_commit { - // prune old merkle tree stores - self.prune_merkle_tree_stores(&mut batch)?; - } - self.db.exec_batch(batch)?; - Ok(()) - } + /// Borrow `InMemory` state + fn in_mem(&self) -> &InMemory; - /// Find the root hash of the merkle tree - pub fn merkle_root(&self) -> MerkleRoot { - self.block.tree.root() - } + fn charge_gas(&self, gas: u64) -> Result<()>; + // TODO: the storage methods are taken from `State`, but they are not in the + // right place - they ignore write log and only access the DB. /// Check if the given key is present in storage. Returns the result and the /// gas cost. - pub fn has_key(&self, key: &Key) -> Result<(bool, u64)> { + fn db_has_key(&self, key: &storage::Key) -> Result<(bool, u64)> { Ok(( - self.db.read_subspace_val(key)?.is_some(), + self.db().read_subspace_val(key)?.is_some(), key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, )) } /// Returns a value from the specified subspace and the gas cost - pub fn read(&self, key: &Key) -> Result<(Option>, u64)> { + fn db_read(&self, key: &storage::Key) -> Result<(Option>, u64)> { tracing::debug!("storage read key {}", key); - match self.db.read_subspace_val(key)? { + match self.db().read_subspace_val(key)? { Some(v) => { let gas = (key.len() + v.len()) as u64 * STORAGE_ACCESS_GAS_PER_BYTE; @@ -363,139 +101,29 @@ where } } - /// Returns a value from the specified subspace at the given height (or the - /// last committed height when 0) and the gas cost. - pub fn read_with_height( - &self, - key: &Key, - height: BlockHeight, - ) -> Result<(Option>, u64)> { - // `0` means last committed height - if height == BlockHeight(0) || height >= self.get_last_block_height() { - self.read(key) - } else { - if !(self.merkle_tree_key_filter)(key) { - return Ok((None, 0)); - } - - match self.db.read_subspace_val_with_height( - key, - height, - self.get_last_block_height(), - )? { - Some(v) => { - let gas = (key.len() + v.len()) as u64 - * STORAGE_ACCESS_GAS_PER_BYTE; - Ok((Some(v), gas)) - } - None => { - Ok((None, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE)) - } - } - } - } - - /// WARNING: This only works for values that have been committed to DB. - /// To be able to see values written or deleted, but not yet committed, - /// use the `StorageWithWriteLog`. - /// - /// Returns a prefix iterator, ordered by storage keys, and the gas cost. - pub fn iter_prefix( + /// WARNING: This only works for values that have been committed to DB. + /// To be able to see values written or deleted, but not yet committed, + /// use the `StorageWithWriteLog`. + /// + /// Returns a prefix iterator, ordered by storage keys, and the gas cost. + fn db_iter_prefix( &self, prefix: &Key, - ) -> (>::PrefixIter, u64) { + ) -> (>::PrefixIter, u64) { ( - self.db.iter_prefix(Some(prefix)), + self.db().iter_prefix(Some(prefix)), prefix.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE, ) } /// Returns an iterator over the block results - pub fn iter_results(&self) -> (>::PrefixIter, u64) { - (self.db.iter_results(), 0) - } - - /// Write a value to the specified subspace and returns the gas cost and the - /// size difference - pub fn write( - &mut self, - key: &Key, - value: impl AsRef<[u8]>, - ) -> Result<(u64, i64)> { - // Note that this method is the same as `StorageWrite::write_bytes`, - // but with gas and storage bytes len diff accounting - tracing::debug!("storage write key {}", key,); - let value = value.as_ref(); - let is_key_merklized = (self.merkle_tree_key_filter)(key); - - if is_pending_transfer_key(key) { - // The tree of the bright pool stores the current height for the - // pending transfer - let height = self.block.height.serialize_to_vec(); - self.block.tree.update(key, height)?; - } else { - // Update the merkle tree - if is_key_merklized { - self.block.tree.update(key, value)?; - } - } - - let len = value.len(); - let gas = (key.len() + len) as u64 * STORAGE_WRITE_GAS_PER_BYTE; - let size_diff = self.db.write_subspace_val( - self.block.height, - key, - value, - is_key_merklized, - )?; - Ok((gas, size_diff)) - } - - /// Delete the specified subspace and returns the gas cost and the size - /// difference - pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { - // Note that this method is the same as `StorageWrite::delete`, - // but with gas and storage bytes len diff accounting - let mut deleted_bytes_len = 0; - if self.has_key(key)?.0 { - let is_key_merklized = (self.merkle_tree_key_filter)(key); - if is_key_merklized { - self.block.tree.delete(key)?; - } - deleted_bytes_len = self.db.delete_subspace_val( - self.block.height, - key, - is_key_merklized, - )?; - } - let gas = (key.len() + deleted_bytes_len as usize) as u64 - * STORAGE_WRITE_GAS_PER_BYTE; - Ok((gas, deleted_bytes_len)) - } - - /// Set the block header. - /// The header is not in the Merkle tree as it's tracked by Tendermint. - /// Hence, we don't update the tree when this is set. - pub fn set_header(&mut self, header: Header) -> Result<()> { - self.header = Some(header); - Ok(()) - } - - /// Block data is in the Merkle tree as it's tracked by Tendermint in the - /// block header. Hence, we don't update the tree when this is set. - pub fn begin_block( - &mut self, - hash: BlockHash, - height: BlockHeight, - ) -> Result<()> { - self.block.hash = hash; - self.block.height = height; - Ok(()) + fn db_iter_results(&self) -> (>::PrefixIter, u64) { + (self.db().iter_results(), 0) } /// Get the hash of a validity predicate for the given account address and /// the gas cost for reading it. - pub fn validity_predicate( + fn validity_predicate( &self, addr: &Address, ) -> Result<(Option, u64)> { @@ -504,7 +132,7 @@ where } else { Key::validity_predicate(addr) }; - match self.read(&key)? { + match self.db_read(&key)? { (Some(value), gas) => { let vp_code_hash = Hash::try_from(&value[..]) .map_err(Error::InvalidCodeHash)?; @@ -514,154 +142,1240 @@ where } } - #[allow(dead_code)] - /// Check if the given address exists on chain and return the gas cost. - pub fn exists(&self, addr: &Address) -> Result<(bool, u64)> { - let key = Key::validity_predicate(addr); - self.has_key(&key) + /// Get the block header + fn get_block_header( + &self, + height: Option, + ) -> Result<(Option
, u64)> { + match height { + Some(h) if h == self.in_mem().get_block_height().0 => { + let header = self.in_mem().header.clone(); + let gas = match header { + Some(ref header) => { + header.encoded_len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE + } + None => MEMORY_ACCESS_GAS_PER_BYTE, + }; + Ok((header, gas)) + } + Some(h) => match self.db().read_block_header(h)? { + Some(header) => { + let gas = header.encoded_len() as u64 + * STORAGE_ACCESS_GAS_PER_BYTE; + Ok((Some(header), gas)) + } + None => Ok((None, STORAGE_ACCESS_GAS_PER_BYTE)), + }, + None => { + Ok((self.in_mem().header.clone(), STORAGE_ACCESS_GAS_PER_BYTE)) + } + } } +} - /// Get the chain ID as a raw string - pub fn get_chain_id(&self) -> (String, u64) { - ( - self.chain_id.to_string(), - CHAIN_ID_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) +/// Common trait for write log, DB and in-memory state. +pub trait State: StateRead + StorageWrite { + /// Borrow mutable `WriteLog` + fn write_log_mut(&mut self) -> &mut WriteLog; + + /// Splitting borrow to get mutable reference to `WriteLog`, immutable + /// reference to the `InMemory` state and DB when in need of both (avoids + /// complain from the borrow checker) + fn split_borrow(&mut self) + -> (&mut WriteLog, &InMemory, &Self::D); + + /// Write the provided tx hash to write log. + fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { + self.write_log_mut().write_tx_hash(hash) } +} - /// Get the block height - pub fn get_block_height(&self) -> (BlockHeight, u64) { - ( - self.block.height, - BLOCK_HEIGHT_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) +impl StateRead for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn db(&self) -> &D { + &self.0.db } - /// Get the block hash - pub fn get_block_hash(&self) -> (BlockHash, u64) { - ( - self.block.hash.clone(), - BLOCK_HASH_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, - ) + fn in_mem(&self) -> &InMemory { + &self.0.in_mem } - /// Rebuild full Merkle tree after [`read_last_block()`] - fn rebuild_full_merkle_tree( - &self, - height: BlockHeight, - ) -> Result> { - self.get_merkle_tree(height, None) + fn write_log(&self) -> &WriteLog { + &self.0.write_log } - /// Rebuild Merkle tree with diffs in the DB. - /// Base tree and the specified `store_type` subtree is rebuilt. - /// If `store_type` isn't given, full Merkle tree is restored. - pub fn get_merkle_tree( - &self, - height: BlockHeight, - store_type: Option, - ) -> Result> { - // `0` means last committed height - let height = if height == BlockHeight(0) { - self.get_last_block_height() - } else { - height - }; + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} - let epoch = self - .block - .pred_epochs - .get_epoch(height) - .unwrap_or(Epoch::default()); - let epoch_start_height = - match self.block.pred_epochs.get_start_height_of_epoch(epoch) { - Some(height) if height == BlockHeight(0) => BlockHeight(1), - Some(height) => height, - None => BlockHeight(1), - }; - let stores = self - .db - .read_merkle_tree_stores(epoch, epoch_start_height, store_type)? - .ok_or(Error::NoMerkleTree { height })?; - let prefix = store_type.and_then(|st| st.provable_prefix()); - let mut tree = match store_type { - Some(_) => MerkleTree::::new_partial(stores), - None => MerkleTree::::new(stores).expect("invalid stores"), - }; - // Restore the tree state with diffs - let mut target_height = epoch_start_height; - while target_height < height { - target_height = target_height.next_height(); - let mut old_diff_iter = - self.db.iter_old_diffs(target_height, prefix.as_ref()); - let mut new_diff_iter = - self.db.iter_new_diffs(target_height, prefix.as_ref()); +impl State for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.0.write_log + } - let mut old_diff = old_diff_iter.next(); - let mut new_diff = new_diff_iter.next(); - loop { - match (&old_diff, &new_diff) { - (Some(old), Some(new)) => { - let old_key = Key::parse(old.0.clone()) - .expect("the key should be parsable"); - let new_key = Key::parse(new.0.clone()) - .expect("the key should be parsable"); + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (&mut self.0.write_log, &self.0.in_mem, &self.0.db) + } +} - // compare keys as String - match old.0.cmp(&new.0) { - Ordering::Equal => { - // the value was updated - if (self.merkle_tree_key_filter)(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; - } - old_diff = old_diff_iter.next(); - new_diff = new_diff_iter.next(); - } - Ordering::Less => { - // the value was deleted - if (self.merkle_tree_key_filter)(&old_key) { - tree.delete(&old_key)?; - } - old_diff = old_diff_iter.next(); - } - Ordering::Greater => { - // the value was inserted - if (self.merkle_tree_key_filter)(&new_key) { - tree.update( - &new_key, - if is_pending_transfer_key(&new_key) { - target_height.serialize_to_vec() - } else { - new.1.clone() - }, - )?; - } - new_diff = new_diff_iter.next(); - } - } - } - (Some(old), None) => { - // the value was deleted - let key = Key::parse(old.0.clone()) - .expect("the key should be parsable"); +impl WithConversionState for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn conversion_state(&self) -> &ConversionState { + &self.in_mem().conversion_state + } - if (self.merkle_tree_key_filter)(&key) { - tree.delete(&key)?; - } + fn conversion_state_mut(&mut self) -> &mut ConversionState { + &mut self.in_mem_mut().conversion_state + } +} - old_diff = old_diff_iter.next(); - } - (None, Some(new)) => { - // the value was inserted - let key = Key::parse(new.0.clone()) - .expect("the key should be parsable"); +impl StateRead for WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn db(&self) -> &D { + &self.db + } + + fn in_mem(&self) -> &InMemory { + &self.in_mem + } + + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} + +impl State for WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (&mut self.write_log, &self.in_mem, &self.db) + } +} + +impl StateRead for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, _gas: u64) -> Result<()> { + Ok(()) + } +} + +impl State for TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (&mut self.write_log, (self.in_mem), (self.db)) + } +} + +impl StateRead for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, gas: u64) -> Result<()> { + self.gas_meter.borrow_mut().consume(gas).map_err(|err| { + self.sentinel.borrow_mut().set_out_of_gas(); + tracing::info!( + "Stopping transaction execution because of gas error: {}", + err + ); + Error::Gas(err) + }) + } +} + +impl State for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + fn write_log_mut(&mut self) -> &mut WriteLog { + self.write_log + } + + fn split_borrow( + &mut self, + ) -> (&mut WriteLog, &InMemory, &Self::D) { + (self.write_log, (self.in_mem), (self.db)) + } +} + +impl StateRead for VpHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + self.write_log + } + + fn db(&self) -> &D { + self.db + } + + fn in_mem(&self) -> &InMemory { + self.in_mem + } + + fn charge_gas(&self, gas: u64) -> Result<()> { + self.gas_meter.borrow_mut().consume(gas).map_err(|err| { + self.sentinel.borrow_mut().set_out_of_gas(); + tracing::info!( + "Stopping VP execution because of gas error: {}", + err + ); + Error::Gas(err) + }) + } +} + +/// State with a write-logged storage. +#[derive(Debug)] +pub struct WlState +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + write_log: WriteLog, + // DB (usually a MockDB or PersistentDB) + // This should be immutable in WlState, but mutable in `FullAccess`. + // TODO: maybe now can use &D and shit can be public? Since host_env is + // using `trait State`. + db: D, + /// State in memory + in_mem: InMemory, + /// Static merkle tree storage key filter + pub merkle_tree_key_filter: fn(&storage::Key) -> bool, +} + +/// State with a temporary write log. This is used for dry-running txs and ABCI +/// prepare and processs proposal, which must not modify the actual state. +#[derive(Debug)] +pub struct TempWlState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + write_log: WriteLog, + // DB + db: &'a D, + /// State + in_mem: &'a InMemory, +} + +// State with mutable write log and gas metering for tx host env. +#[derive(Debug)] +pub struct TxHostEnvState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub write_log: &'a mut WriteLog, + // DB + pub db: &'a D, + /// State + pub in_mem: &'a InMemory, + /// Tx gas meter + pub gas_meter: &'a RefCell, + /// Errors sentinel + pub sentinel: &'a RefCell, +} + +// Read-only state with gas metering for VP host env. +#[derive(Debug)] +pub struct VpHostEnvState<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub write_log: &'a WriteLog, + // DB + pub db: &'a D, + /// State + pub in_mem: &'a InMemory, + /// VP gas meter + pub gas_meter: &'a RefCell, + /// Errors sentinel + pub sentinel: &'a RefCell, +} + +impl FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + pub fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.0.write_log + } + + pub fn in_mem_mut(&mut self) -> &mut InMemory { + &mut self.0.in_mem + } + + pub fn db_mut(&mut self) -> &mut D { + &mut self.0.db + } + + pub fn restrict_writes_to_write_log(&mut self) -> &mut WlState { + &mut self.0 + } + + pub fn read_only(&self) -> &WlState { + &self.0 + } + + pub fn open( + db_path: impl AsRef, + cache: Option<&D::Cache>, + chain_id: ChainId, + native_token: Address, + storage_read_past_height_limit: Option, + merkle_tree_key_filter: fn(&storage::Key) -> bool, + ) -> Self { + let write_log = WriteLog::default(); + let db = D::open(db_path, cache); + let in_mem = InMemory::new( + chain_id, + native_token, + storage_read_past_height_limit, + ); + let mut state = Self(WlState { + write_log, + db, + in_mem, + merkle_tree_key_filter, + }); + state.load_last_state(); + state + } + + #[allow(dead_code)] + /// Check if the given address exists on chain and return the gas cost. + pub fn db_exists(&self, addr: &Address) -> Result<(bool, u64)> { + let key = Key::validity_predicate(addr); + self.db_has_key(&key) + } + + /// Initialize a new epoch when the current epoch is finished. Returns + /// `true` on a new epoch. + pub fn update_epoch( + &mut self, + height: BlockHeight, + time: DateTimeUtc, + ) -> StorageResult { + let parameters = namada_parameters::read(self) + .expect("Couldn't read protocol parameters"); + + match self.in_mem.update_epoch_blocks_delay.as_mut() { + None => { + // Check if the new epoch minimum start height and start time + // have been fulfilled. If so, queue the next + // epoch to start two blocks into the future so + // as to align validator set updates + etc with + // tendermint. This is because tendermint has a two block delay + // to validator changes. + let current_epoch_duration_satisfied = height + >= self.in_mem.next_epoch_min_start_height + && time >= self.in_mem.next_epoch_min_start_time; + if current_epoch_duration_satisfied { + self.in_mem.update_epoch_blocks_delay = + Some(EPOCH_SWITCH_BLOCKS_DELAY); + } + } + Some(blocks_until_switch) => { + *blocks_until_switch -= 1; + } + }; + let new_epoch = + matches!(self.in_mem.update_epoch_blocks_delay, Some(0)); + + if new_epoch { + // Reset the delay tracker + self.in_mem.update_epoch_blocks_delay = None; + + // Begin a new epoch + self.in_mem.block.epoch = self.in_mem.block.epoch.next(); + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.in_mem.next_epoch_min_start_height = + height + min_num_of_blocks; + self.in_mem.next_epoch_min_start_time = time + min_duration; + + self.in_mem.block.pred_epochs.new_epoch(height); + tracing::info!("Began a new epoch {}", self.in_mem.block.epoch); + } + Ok(new_epoch) + } + + /// Commit the current block's write log to the storage and commit the block + /// to DB. Starts a new block write log. + pub fn commit_block(&mut self) -> StorageResult<()> { + if self.in_mem.last_epoch != self.in_mem.block.epoch { + self.in_mem_mut() + .update_epoch_in_merkle_tree() + .into_storage_result()?; + } + + let mut batch = D::batch(); + self.commit_write_log_block(&mut batch) + .into_storage_result()?; + self.commit_block_from_batch(batch).into_storage_result() + } + + /// Commit the current block's write log to the storage. Starts a new block + /// write log. + pub fn commit_write_log_block( + &mut self, + batch: &mut D::WriteBatch, + ) -> Result<()> { + for (key, entry) in + std::mem::take(&mut self.0.write_log.block_write_log).into_iter() + { + match entry { + StorageModification::Write { value } => { + self.batch_write_subspace_val(batch, &key, value)?; + } + StorageModification::Delete => { + self.batch_delete_subspace_val(batch, &key)?; + } + StorageModification::InitAccount { vp_code_hash } => { + self.batch_write_subspace_val(batch, &key, vp_code_hash)?; + } + // temporary value isn't persisted + StorageModification::Temp { .. } => {} + } + } + debug_assert!(self.0.write_log.block_write_log.is_empty()); + + // Replay protections specifically + for (hash, entry) in + std::mem::take(&mut self.0.write_log.replay_protection).into_iter() + { + match entry { + ReProtStorageModification::Write => self + .write_replay_protection_entry( + batch, + // Can only write tx hashes to the previous block, no + // further + &replay_protection::last_key(&hash), + )?, + ReProtStorageModification::Delete => self + .delete_replay_protection_entry( + batch, + // Can only delete tx hashes from the previous block, + // no further + &replay_protection::last_key(&hash), + )?, + ReProtStorageModification::Finalize => { + self.write_replay_protection_entry( + batch, + &replay_protection::all_key(&hash), + )?; + self.delete_replay_protection_entry( + batch, + &replay_protection::last_key(&hash), + )?; + } + } + } + debug_assert!(self.0.write_log.replay_protection.is_empty()); + + if let Some(address_gen) = self.0.write_log.address_gen.take() { + self.0.in_mem.address_gen = address_gen + } + Ok(()) + } + + /// Start write batch. + pub fn batch() -> D::WriteBatch { + D::batch() + } + + /// Execute write batch. + pub fn exec_batch(&mut self, batch: D::WriteBatch) -> Result<()> { + Ok(self.db.exec_batch(batch)?) + } + + /// Batch write the value with the given height and account subspace key to + /// the DB. Returns the size difference from previous value, if any, or + /// the size of the value otherwise. + pub fn batch_write_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result { + let value = value.as_ref(); + let is_key_merklized = (self.merkle_tree_key_filter)(key); + + if is_pending_transfer_key(key) { + // The tree of the bridge pool stores the current height for the + // pending transfer + let height = self.in_mem.block.height.serialize_to_vec(); + self.in_mem.block.tree.update(key, height)?; + } else { + // Update the merkle tree + if is_key_merklized { + self.in_mem.block.tree.update(key, value)?; + } + } + Ok(self.db.batch_write_subspace_val( + batch, + self.in_mem.block.height, + key, + value, + is_key_merklized, + )?) + } + + /// Batch delete the value with the given height and account subspace key + /// from the DB. Returns the size of the removed value, if any, 0 if no + /// previous value was found. + pub fn batch_delete_subspace_val( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result { + let is_key_merklized = (self.merkle_tree_key_filter)(key); + // Update the merkle tree + if is_key_merklized { + self.in_mem.block.tree.delete(key)?; + } + Ok(self.db.batch_delete_subspace_val( + batch, + self.in_mem.block.height, + key, + is_key_merklized, + )?) + } + + // Prune merkle tree stores. Use after updating self.block.height in the + // commit. + fn prune_merkle_tree_stores( + &mut self, + batch: &mut D::WriteBatch, + ) -> Result<()> { + if self.in_mem.block.epoch.0 == 0 { + return Ok(()); + } + // Prune non-provable stores at the previous epoch + for st in StoreType::iter_non_provable() { + self.0.db.prune_merkle_tree_store( + batch, + st, + self.in_mem.block.epoch.prev(), + )?; + } + // Prune provable stores + let oldest_epoch = self.in_mem.get_oldest_epoch(); + if oldest_epoch.0 > 0 { + // Remove stores at the previous epoch because the Merkle tree + // stores at the starting height of the epoch would be used to + // restore stores at a height (> oldest_height) in the epoch + for st in StoreType::iter_provable() { + self.db.prune_merkle_tree_store( + batch, + st, + oldest_epoch.prev(), + )?; + } + + // Prune the BridgePool subtree stores with invalid nonce + let mut epoch = match self.get_oldest_epoch_with_valid_nonce()? { + Some(epoch) => epoch, + None => return Ok(()), + }; + while oldest_epoch < epoch { + epoch = epoch.prev(); + self.db.prune_merkle_tree_store( + batch, + &StoreType::BridgePool, + epoch, + )?; + } + } + + Ok(()) + } + + /// Check it the given transaction's hash is already present in storage + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + Ok(self.db.has_replay_protection_entry(hash)?) + } + + /// Write the provided tx hash to storage + pub fn write_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result<()> { + self.db.write_replay_protection_entry(batch, key)?; + Ok(()) + } + + /// Delete the provided tx hash from storage + pub fn delete_replay_protection_entry( + &mut self, + batch: &mut D::WriteBatch, + key: &Key, + ) -> Result<()> { + self.db.delete_replay_protection_entry(batch, key)?; + Ok(()) + } + + /// Iterate the replay protection storage from the last block + pub fn iter_replay_protection( + &self, + ) -> Box + '_> { + Box::new(self.db.iter_replay_protection().map(|(raw_key, _, _)| { + raw_key.parse().expect("Failed hash conversion") + })) + } + + /// Get oldest epoch which has the valid signed nonce of the bridge pool + fn get_oldest_epoch_with_valid_nonce(&self) -> Result> { + let last_height = self.in_mem.get_last_block_height(); + let current_nonce = match self + .db + .read_bridge_pool_signed_nonce(last_height, last_height)? + { + Some(nonce) => nonce, + None => return Ok(None), + }; + let (mut epoch, _) = self.in_mem.get_last_epoch(); + // We don't need to check the older epochs because their Merkle tree + // snapshots have been already removed + let oldest_epoch = self.in_mem.get_oldest_epoch(); + // Look up the last valid epoch which has the previous nonce of the + // current one. It has the previous nonce, but it was + // incremented during the epoch. + while 0 < epoch.0 && oldest_epoch <= epoch { + epoch = epoch.prev(); + let height = match self + .in_mem + .block + .pred_epochs + .get_start_height_of_epoch(epoch) + { + Some(h) => h, + None => continue, + }; + let nonce = match self + .db + .read_bridge_pool_signed_nonce(height, last_height)? + { + Some(nonce) => nonce, + // skip pruning when the old epoch doesn't have the signed nonce + None => break, + }; + if nonce < current_nonce { + break; + } + } + Ok(Some(epoch)) + } + + /// Rebuild full Merkle tree after [`read_last_block()`] + fn rebuild_full_merkle_tree( + &self, + height: BlockHeight, + ) -> Result> { + self.get_merkle_tree(height, None) + } + + /// Load the full state at the last committed height, if any. Returns the + /// Merkle root hash and the height of the committed block. + fn load_last_state(&mut self) { + if let Some(BlockStateRead { + merkle_tree_stores, + hash, + height, + time, + epoch, + pred_epochs, + next_epoch_min_start_height, + next_epoch_min_start_time, + update_epoch_blocks_delay, + results, + address_gen, + conversion_state, + tx_queue, + ethereum_height, + eth_events_queue, + }) = self + .0 + .db + .read_last_block() + .expect("Read block call must not fail") + { + // Rebuild Merkle tree + let tree = MerkleTree::new(merkle_tree_stores) + .or_else(|_| self.rebuild_full_merkle_tree(height)) + .unwrap(); + + let in_mem = &mut self.0.in_mem; + in_mem.block.hash = hash.clone(); + in_mem.block.height = height; + in_mem.block.epoch = epoch; + in_mem.block.results = results; + in_mem.block.pred_epochs = pred_epochs; + in_mem.last_block = Some(LastBlock { height, hash, time }); + in_mem.last_epoch = epoch; + in_mem.next_epoch_min_start_height = next_epoch_min_start_height; + in_mem.next_epoch_min_start_time = next_epoch_min_start_time; + in_mem.update_epoch_blocks_delay = update_epoch_blocks_delay; + in_mem.address_gen = address_gen; + in_mem.block.tree = tree; + in_mem.conversion_state = conversion_state; + in_mem.tx_queue = tx_queue; + in_mem.ethereum_height = ethereum_height; + in_mem.eth_events_queue = eth_events_queue; + tracing::debug!("Loaded storage from DB"); + } else { + tracing::info!("No state could be found"); + } + } + + /// Persist the block's state from batch writes to the database. + /// Note that unlike `commit_block` this method doesn't commit the write + /// log. + pub fn commit_block_from_batch( + &mut self, + mut batch: D::WriteBatch, + ) -> Result<()> { + // All states are written only when the first height or a new epoch + let is_full_commit = self.in_mem.block.height.0 == 1 + || self.in_mem.last_epoch != self.in_mem.block.epoch; + + // For convenience in tests, fill-in a header if it's missing. + // Normally, the header is added in `FinalizeBlock`. + #[cfg(any(test, feature = "testing"))] + { + if self.in_mem.header.is_none() { + self.in_mem.header = Some(Header { + hash: Hash::default(), + time: DateTimeUtc::now(), + next_validators_hash: Hash::default(), + }); + } + } + + let state = BlockStateWrite { + merkle_tree_stores: self.in_mem.block.tree.stores(), + header: self.in_mem.header.as_ref(), + hash: &self.in_mem.block.hash, + height: self.in_mem.block.height, + time: self + .in_mem + .header + .as_ref() + .expect("Must have a block header on commit") + .time, + epoch: self.in_mem.block.epoch, + results: &self.in_mem.block.results, + pred_epochs: &self.in_mem.block.pred_epochs, + next_epoch_min_start_height: self + .in_mem + .next_epoch_min_start_height, + next_epoch_min_start_time: self.in_mem.next_epoch_min_start_time, + update_epoch_blocks_delay: self.in_mem.update_epoch_blocks_delay, + address_gen: &self.in_mem.address_gen, + conversion_state: &self.in_mem.conversion_state, + tx_queue: &self.in_mem.tx_queue, + ethereum_height: self.in_mem.ethereum_height.as_ref(), + eth_events_queue: &self.in_mem.eth_events_queue, + }; + self.db + .add_block_to_batch(state, &mut batch, is_full_commit)?; + let header = self + .in_mem + .header + .take() + .expect("Must have a block header on commit"); + self.in_mem.last_block = Some(LastBlock { + height: self.in_mem.block.height, + hash: header.hash.into(), + time: header.time, + }); + self.in_mem.last_epoch = self.in_mem.block.epoch; + if is_full_commit { + // prune old merkle tree stores + self.prune_merkle_tree_stores(&mut batch)?; + } + self.db.exec_batch(batch)?; + Ok(()) + } +} + +impl Deref for FullAccessState +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type Target = WlState; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for FullAccessState +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + pub fn write_log(&self) -> &WriteLog { + &self.write_log + } + + pub fn in_mem(&self) -> &InMemory { + &self.in_mem + } + + pub fn in_mem_mut(&mut self) -> &mut InMemory { + &mut self.in_mem + } + + pub fn db(&self) -> &D { + // NOTE: `WlState` must not be allowed mutable access to DB + &self.db + } + + pub fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + pub fn with_temp_write_log(&self) -> TempWlState<'_, D, H> { + TempWlState { + write_log: WriteLog::default(), + db: &self.db, + in_mem: &self.in_mem, + } + } + + /// Commit the current transaction's write log to the block when it's + /// accepted by all the triggered validity predicates. Starts a new + /// transaction write log. + pub fn commit_tx(&mut self) { + self.write_log.commit_tx() + } + + /// Drop the current transaction's write log when it's declined by any of + /// the triggered validity predicates. Starts a new transaction write log. + pub fn drop_tx(&mut self) { + self.write_log.drop_tx() + } + + /// Delete the provided transaction's hash from storage. + pub fn delete_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { + self.write_log.delete_tx_hash(hash) + } + + #[inline] + pub fn get_current_decision_height(&self) -> BlockHeight { + self.in_mem.get_last_block_height() + 1 + } + + /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, + /// within the current epoch. + pub fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool { + let current_decision_height = self.get_current_decision_height(); + + let pred_epochs = &self.in_mem.block.pred_epochs; + let fst_heights_of_each_epoch = pred_epochs.first_block_heights(); + + fst_heights_of_each_epoch + .last() + .map(|&h| { + let height_offset_within_epoch = h + height_offset; + current_decision_height == height_offset_within_epoch + }) + .unwrap_or(false) + } + + /// Returns a value from the specified subspace at the given height (or the + /// last committed height when 0) and the gas cost. + pub fn db_read_with_height( + &self, + key: &storage::Key, + height: BlockHeight, + ) -> Result<(Option>, u64)> { + // `0` means last committed height + if height == BlockHeight(0) + || height >= self.in_mem().get_last_block_height() + { + self.db_read(key) + } else { + if !(self.merkle_tree_key_filter)(key) { + return Ok((None, 0)); + } + + match self.db().read_subspace_val_with_height( + key, + height, + self.in_mem().get_last_block_height(), + )? { + Some(v) => { + let gas = (key.len() + v.len()) as u64 + * STORAGE_ACCESS_GAS_PER_BYTE; + Ok((Some(v), gas)) + } + None => { + Ok((None, key.len() as u64 * STORAGE_ACCESS_GAS_PER_BYTE)) + } + } + } + } + + /// Write a value to the specified subspace and returns the gas cost and the + /// size difference + pub fn db_write( + &mut self, + key: &Key, + value: impl AsRef<[u8]>, + ) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::write_bytes`, + // but with gas and storage bytes len diff accounting + tracing::debug!("storage write key {}", key,); + let value = value.as_ref(); + let is_key_merklized = (self.merkle_tree_key_filter)(key); + + if is_pending_transfer_key(key) { + // The tree of the bright pool stores the current height for the + // pending transfer + let height = self.in_mem.block.height.serialize_to_vec(); + self.in_mem.block.tree.update(key, height)?; + } else { + // Update the merkle tree + if is_key_merklized { + self.in_mem.block.tree.update(key, value)?; + } + } + + let len = value.len(); + let gas = (key.len() + len) as u64 * STORAGE_WRITE_GAS_PER_BYTE; + let size_diff = self.db.write_subspace_val( + self.in_mem.block.height, + key, + value, + is_key_merklized, + )?; + Ok((gas, size_diff)) + } + + /// Delete the specified subspace and returns the gas cost and the size + /// difference + pub fn db_delete(&mut self, key: &Key) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::delete`, + // but with gas and storage bytes len diff accounting + let mut deleted_bytes_len = 0; + if self.db_has_key(key)?.0 { + let is_key_merklized = (self.merkle_tree_key_filter)(key); + if is_key_merklized { + self.in_mem.block.tree.delete(key)?; + } + deleted_bytes_len = self.db.delete_subspace_val( + self.in_mem.block.height, + key, + is_key_merklized, + )?; + } + let gas = (key.len() + deleted_bytes_len as usize) as u64 + * STORAGE_WRITE_GAS_PER_BYTE; + Ok((gas, deleted_bytes_len)) + } + + /// Get a Tendermint-compatible existence proof. + /// + /// Proofs from the Ethereum bridge pool are not + /// Tendermint-compatible. Requesting for a key + /// belonging to the bridge pool will cause this + /// method to error. + pub fn get_existence_proof( + &self, + key: &Key, + value: namada_merkle_tree::StorageBytes, + height: BlockHeight, + ) -> Result { + use std::array; + + // `0` means last committed height + let height = if height == BlockHeight(0) { + self.in_mem.get_last_block_height() + } else { + height + }; + + if height > self.in_mem.get_last_block_height() { + if let MembershipProof::ICS23(proof) = self + .in_mem + .block + .tree + .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) + .map_err(Error::MerkleTreeError)? + { + self.in_mem + .block + .tree + .get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + } + } else { + let (store_type, _) = StoreType::sub_key(key)?; + let tree = self.get_merkle_tree(height, Some(store_type))?; + if let MembershipProof::ICS23(proof) = tree + .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) + .map_err(Error::MerkleTreeError)? + { + tree.get_sub_tree_proof(key, proof) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } else { + Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + } + } + } + + /// Get the non-existence proof + pub fn get_non_existence_proof( + &self, + key: &Key, + height: BlockHeight, + ) -> Result { + // `0` means last committed height + let height = if height == BlockHeight(0) { + self.in_mem.get_last_block_height() + } else { + height + }; + + if height > self.in_mem.get_last_block_height() { + Err(Error::Temporary { + error: format!( + "The block at the height {} hasn't committed yet", + height, + ), + }) + } else { + let (store_type, _) = StoreType::sub_key(key)?; + self.get_merkle_tree(height, Some(store_type))? + .get_non_existence_proof(key) + .map(Into::into) + .map_err(Error::MerkleTreeError) + } + } + + /// Rebuild Merkle tree with diffs in the DB. + /// Base tree and the specified `store_type` subtree is rebuilt. + /// If `store_type` isn't given, full Merkle tree is restored. + pub fn get_merkle_tree( + &self, + height: BlockHeight, + store_type: Option, + ) -> Result> { + // `0` means last committed height + let height = if height == BlockHeight(0) { + self.in_mem.get_last_block_height() + } else { + height + }; + + let epoch = self + .in_mem + .block + .pred_epochs + .get_epoch(height) + .unwrap_or(Epoch::default()); + let epoch_start_height = match self + .in_mem + .block + .pred_epochs + .get_start_height_of_epoch(epoch) + { + Some(height) if height == BlockHeight(0) => BlockHeight(1), + Some(height) => height, + None => BlockHeight(1), + }; + let stores = self + .db + .read_merkle_tree_stores(epoch, epoch_start_height, store_type)? + .ok_or(Error::NoMerkleTree { height })?; + let prefix = store_type.and_then(|st| st.provable_prefix()); + let mut tree = match store_type { + Some(_) => MerkleTree::::new_partial(stores), + None => MerkleTree::::new(stores).expect("invalid stores"), + }; + // Restore the tree state with diffs + let mut target_height = epoch_start_height; + while target_height < height { + target_height = target_height.next_height(); + let mut old_diff_iter = + self.db.iter_old_diffs(target_height, prefix.as_ref()); + let mut new_diff_iter = + self.db.iter_new_diffs(target_height, prefix.as_ref()); + + let mut old_diff = old_diff_iter.next(); + let mut new_diff = new_diff_iter.next(); + loop { + match (&old_diff, &new_diff) { + (Some(old), Some(new)) => { + let old_key = Key::parse(old.0.clone()) + .expect("the key should be parsable"); + let new_key = Key::parse(new.0.clone()) + .expect("the key should be parsable"); + + // compare keys as String + match old.0.cmp(&new.0) { + Ordering::Equal => { + // the value was updated + if (self.merkle_tree_key_filter)(&new_key) { + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } + old_diff = old_diff_iter.next(); + new_diff = new_diff_iter.next(); + } + Ordering::Less => { + // the value was deleted + if (self.merkle_tree_key_filter)(&old_key) { + tree.delete(&old_key)?; + } + old_diff = old_diff_iter.next(); + } + Ordering::Greater => { + // the value was inserted + if (self.merkle_tree_key_filter)(&new_key) { + tree.update( + &new_key, + if is_pending_transfer_key(&new_key) { + target_height.serialize_to_vec() + } else { + new.1.clone() + }, + )?; + } + new_diff = new_diff_iter.next(); + } + } + } + (Some(old), None) => { + // the value was deleted + let key = Key::parse(old.0.clone()) + .expect("the key should be parsable"); + + if (self.merkle_tree_key_filter)(&key) { + tree.delete(&key)?; + } + + old_diff = old_diff_iter.next(); + } + (None, Some(new)) => { + // the value was inserted + let key = Key::parse(new.0.clone()) + .expect("the key should be parsable"); if (self.merkle_tree_key_filter)(&key) { tree.update( @@ -674,106 +1388,529 @@ where )?; } - new_diff = new_diff_iter.next(); - } - (None, None) => break, - } - } - } - if let Some(st) = store_type { - // Add the base tree with the given height - let mut stores = self - .db - .read_merkle_tree_stores(epoch, height, Some(StoreType::Base))? - .ok_or(Error::NoMerkleTree { height })?; - let restored_stores = tree.stores(); - // Set the root and store of the rebuilt subtree - stores.set_root(&st, *restored_stores.root(&st)); - stores.set_store(restored_stores.store(&st).to_owned()); - tree = MerkleTree::::new_partial(stores); - } - Ok(tree) - } + new_diff = new_diff_iter.next(); + } + (None, None) => break, + } + } + } + if let Some(st) = store_type { + // Add the base tree with the given height + let mut stores = self + .db + .read_merkle_tree_stores(epoch, height, Some(StoreType::Base))? + .ok_or(Error::NoMerkleTree { height })?; + let restored_stores = tree.stores(); + // Set the root and store of the rebuilt subtree + stores.set_root(&st, *restored_stores.root(&st)); + stores.set_store(restored_stores.store(&st).to_owned()); + tree = MerkleTree::::new_partial(stores); + } + Ok(tree) + } + + /// Get the timestamp of the last committed block, or the current timestamp + /// if no blocks have been produced yet + pub fn get_last_block_timestamp(&self) -> Result { + let last_block_height = self.in_mem.get_block_height().0; + + Ok(self + .db + .read_block_header(last_block_height)? + .map_or_else(DateTimeUtc::now, |header| header.time)) + } +} + +impl TempWlState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + pub fn write_log(&self) -> &WriteLog { + &self.write_log + } + + pub fn in_mem(&self) -> &InMemory { + self.in_mem + } + + pub fn db(&self) -> &D { + self.db + } + + pub fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + /// Check if the given tx hash has already been processed + pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { + if let Some(present) = self.write_log.has_replay_protection_entry(hash) + { + return Ok(present); + } + + self.db() + .has_replay_protection_entry(hash) + .map_err(Error::DbError) + } + + /// Check if the given tx hash has already been committed to storage + pub fn has_committed_replay_protection_entry( + &self, + hash: &Hash, + ) -> Result { + self.db() + .has_replay_protection_entry(hash) + .map_err(Error::DbError) + } +} + +#[macro_export] +macro_rules! impl_storage_read { + ($($type:ty)*) => { + impl StorageRead for $($type)* + where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + { + type PrefixIter<'iter> = PrefixIter<'iter, D> where Self: 'iter; + + fn read_bytes( + &self, + key: &storage::Key, + ) -> namada_storage::Result>> { + // try to read from the write log first + let (log_val, gas) = self.write_log().read(key); + self.charge_gas(gas).into_storage_result()?; + match log_val { + Some(write_log::StorageModification::Write { ref value }) => { + Ok(Some(value.clone())) + } + Some(write_log::StorageModification::Delete) => Ok(None), + Some(write_log::StorageModification::InitAccount { + ref vp_code_hash, + }) => Ok(Some(vp_code_hash.to_vec())), + Some(write_log::StorageModification::Temp { ref value }) => { + Ok(Some(value.clone())) + } + None => { + // when not found in write log, try to read from the storage + let (value, gas) = self.db_read(key).into_storage_result()?; + dbg!(key, value.is_some()); + self.charge_gas(gas).into_storage_result()?; + Ok(value) + } + } + } + + fn has_key(&self, key: &storage::Key) -> namada_storage::Result { + // try to read from the write log first + let (log_val, gas) = self.write_log().read(key); + self.charge_gas(gas).into_storage_result()?; + match log_val { + Some(&write_log::StorageModification::Write { .. }) + | Some(&write_log::StorageModification::InitAccount { .. }) + | Some(&write_log::StorageModification::Temp { .. }) => Ok(true), + Some(&write_log::StorageModification::Delete) => { + // the given key has been deleted + Ok(false) + } + None => { + // when not found in write log, try to check the storage + let (present, gas) = self.db_has_key(key).into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(present) + } + } + } + + fn iter_prefix<'iter>( + &'iter self, + prefix: &storage::Key, + ) -> namada_storage::Result> { + let (iter, gas) = + iter_prefix_post(self.write_log(), self.db(), prefix); + self.charge_gas(gas).into_storage_result()?; + Ok(iter) + } + + fn iter_next<'iter>( + &'iter self, + iter: &mut Self::PrefixIter<'iter>, + ) -> namada_storage::Result)>> { + iter.next().map(|(key, val, gas)| { + self.charge_gas(gas).into_storage_result()?; + Ok((key, val)) + }).transpose() + } + + fn get_chain_id( + &self, + ) -> std::result::Result { + let (chain_id, gas) = self.in_mem().get_chain_id(); + self.charge_gas(gas).into_storage_result()?; + Ok(chain_id) + } + + fn get_block_height( + &self, + ) -> std::result::Result { + let (height, gas) = self.in_mem().get_block_height(); + self.charge_gas(gas).into_storage_result()?; + Ok(height) + } + + fn get_block_header( + &self, + height: storage::BlockHeight, + ) -> std::result::Result, namada_storage::Error> + { + let (header, gas) = + StateRead::get_block_header(self, Some(height)).into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(header) + } + + fn get_block_hash( + &self, + ) -> std::result::Result { + let (hash, gas) = self.in_mem().get_block_hash(); + self.charge_gas(gas).into_storage_result()?; + Ok(hash) + } + + fn get_block_epoch( + &self, + ) -> std::result::Result { + let (epoch, gas) = self.in_mem().get_current_epoch(); + self.charge_gas(gas).into_storage_result()?; + Ok(epoch) + } + + fn get_pred_epochs(&self) -> namada_storage::Result { + self.charge_gas( + namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ).into_storage_result()?; + Ok(self.in_mem().block.pred_epochs.clone()) + } + + fn get_tx_index( + &self, + ) -> std::result::Result { + self.charge_gas( + namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ).into_storage_result()?; + Ok(self.in_mem().tx_index) + } + + fn get_native_token(&self) -> namada_storage::Result
{ + self.charge_gas( + namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ).into_storage_result()?; + Ok(self.in_mem().native_token.clone()) + } + } + } +} + +#[macro_export] +macro_rules! impl_storage_write { + ($($type:ty)*) => { + impl StorageWrite for $($type)* + where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + { + fn write_bytes( + &mut self, + key: &storage::Key, + val: impl AsRef<[u8]>, + ) -> namada_storage::Result<()> { + let (gas, _size_diff) = self + .write_log_mut() + .write(key, val.as_ref().to_vec()) + .into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(()) + } + + fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { + let (gas, _size_diff) = self + .write_log_mut() + .delete(key) + .into_storage_result()?; + self.charge_gas(gas).into_storage_result()?; + Ok(()) + } + } + }; +} + +// Note: `FullAccessState` writes to a write-log at block-level, while all the +// other `StorageWrite` impls write at tx-level. +macro_rules! impl_storage_write_by_protocol { + ($($type:ty)*) => { + impl StorageWrite for $($type)* + where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, + { + fn write_bytes( + &mut self, + key: &storage::Key, + val: impl AsRef<[u8]>, + ) -> namada_storage::Result<()> { + self + .write_log_mut() + .protocol_write(key, val.as_ref().to_vec()) + .into_storage_result()?; + Ok(()) + } + + fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { + self + .write_log_mut() + .protocol_delete(key) + .into_storage_result()?; + Ok(()) + } + } + }; +} + +impl_storage_read!(FullAccessState); +impl_storage_read!(WlState); +impl_storage_read!(TempWlState<'_, D, H>); +impl_storage_write_by_protocol!(FullAccessState); +impl_storage_write_by_protocol!(WlState); +impl_storage_write_by_protocol!(TempWlState<'_, D, H>); + +impl_storage_read!(TxHostEnvState<'_, D, H>); +impl_storage_read!(VpHostEnvState<'_, D, H>); +impl_storage_write!(TxHostEnvState<'_, D, H>); + +/// The ledger's state +#[derive(Debug)] +pub struct InMemory +where + H: StorageHasher, +{ + /// The ID of the chain + pub chain_id: ChainId, + /// The address of the native token - this is not stored in DB, but read + /// from genesis + pub native_token: Address, + /// Block storage data + pub block: BlockStorage, + /// During `FinalizeBlock`, this is the header of the block that is + /// going to be committed. After a block is committed, this is reset to + /// `None` until the next `FinalizeBlock` phase is reached. + pub header: Option
, + /// The most recently committed block, if any. + pub last_block: Option, + /// The epoch of the most recently committed block. If it is `Epoch(0)`, + /// then no block may have been committed for this chain yet. + pub last_epoch: Epoch, + /// Minimum block height at which the next epoch may start + pub next_epoch_min_start_height: BlockHeight, + /// Minimum block time at which the next epoch may start + pub next_epoch_min_start_time: DateTimeUtc, + /// The current established address generator + pub address_gen: EstablishedAddressGen, + /// We delay the switch to a new epoch by the number of blocks set in here. + /// This is `Some` when minimum number of blocks has been created and + /// minimum time has passed since the beginning of the last epoch. + /// Once the value is `Some(0)`, we're ready to switch to a new epoch and + /// this is reset back to `None`. + pub update_epoch_blocks_delay: Option, + /// The shielded transaction index + pub tx_index: TxIndex, + /// The currently saved conversion state + pub conversion_state: ConversionState, + /// Wrapper txs to be decrypted in the next block proposal + pub tx_queue: TxQueue, + /// Queue of expired transactions that need to be retransmitted. + /// + /// These transactions do not need to be persisted, as they are + /// retransmitted at the **COMMIT** phase immediately following + /// the block when they were queued. + pub expired_txs_queue: ExpiredTxsQueue, + /// The latest block height on Ethereum processed, if + /// the bridge is enabled. + pub ethereum_height: Option, + /// The queue of Ethereum events to be processed in order. + pub eth_events_queue: EthEventsQueue, + /// How many block heights in the past can the storage be queried + pub storage_read_past_height_limit: Option, +} + +/// Last committed block +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +pub struct LastBlock { + /// Block height + pub height: BlockHeight, + /// Block hash + pub hash: BlockHash, + /// Block time + pub time: DateTimeUtc, +} + +/// The block storage data +#[derive(Debug)] +pub struct BlockStorage { + /// Merkle tree of all the other data in block storage + pub tree: MerkleTree, + /// During `FinalizeBlock`, this is updated to be the hash of the block + /// that is going to be committed. If it is `BlockHash::default()`, + /// then no `FinalizeBlock` stage has been reached yet. + pub hash: BlockHash, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise, it is the + /// height of the most recently committed block, or `BlockHeight::sentinel` + /// (0) if no block has been committed yet. + pub height: BlockHeight, + /// From the start of `FinalizeBlock` until the end of `Commit`, this is + /// height of the block that is going to be committed. Otherwise it is the + /// epoch of the most recently committed block, or `Epoch(0)` if no block + /// has been committed yet. + pub epoch: Epoch, + /// Results of applying transactions + pub results: BlockResults, + /// Predecessor block epochs + pub pred_epochs: Epochs, +} + +pub fn merklize_all_keys(_key: &storage::Key) -> bool { + true +} - /// Get a Tendermint-compatible existence proof. - /// - /// Proofs from the Ethereum bridge pool are not - /// Tendermint-compatible. Requesting for a key - /// belonging to the bridge pool will cause this - /// method to error. - pub fn get_existence_proof( - &self, - key: &Key, - value: namada_merkle_tree::StorageBytes, - height: BlockHeight, - ) -> Result { - use std::array; +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("TEMPORARY error: {error}")] + Temporary { error: String }, + #[error("Found an unknown key: {key}")] + UnknownKey { key: String }, + #[error("Storage key error {0}")] + KeyError(namada_core::storage::Error), + #[error("Coding error: {0}")] + CodingError(#[from] namada_core::DecodeError), + #[error("Merkle tree error: {0}")] + MerkleTreeError(MerkleTreeError), + #[error("DB error: {0}")] + DBError(String), + #[error("Borsh (de)-serialization error: {0}")] + BorshCodingError(std::io::Error), + #[error("Merkle tree at the height {height} is not stored")] + NoMerkleTree { height: BlockHeight }, + #[error("Code hash error: {0}")] + InvalidCodeHash(HashError), + #[error("DB error: {0}")] + DbError(#[from] namada_storage::DbError), + #[error("{0}")] + Gas(namada_gas::Error), + #[error("{0}")] + StorageError(#[from] namada_storage::Error), +} - // `0` means last committed height - let height = if height == BlockHeight(0) { - self.get_last_block_height() - } else { - height +impl InMemory +where + H: StorageHasher, +{ + /// Create a new instance of the state + pub fn new( + chain_id: ChainId, + native_token: Address, + storage_read_past_height_limit: Option, + ) -> Self { + let block = BlockStorage { + tree: MerkleTree::default(), + hash: BlockHash::default(), + height: BlockHeight::default(), + epoch: Epoch::default(), + pred_epochs: Epochs::default(), + results: BlockResults::default(), }; + InMemory:: { + chain_id, + block, + header: None, + last_block: None, + last_epoch: Epoch::default(), + next_epoch_min_start_height: BlockHeight::default(), + next_epoch_min_start_time: DateTimeUtc::now(), + address_gen: EstablishedAddressGen::new( + "Privacy is a function of liberty.", + ), + update_epoch_blocks_delay: None, + tx_index: TxIndex::default(), + conversion_state: ConversionState::default(), + tx_queue: TxQueue::default(), + expired_txs_queue: ExpiredTxsQueue::default(), + native_token, + ethereum_height: None, + eth_events_queue: EthEventsQueue::default(), + storage_read_past_height_limit, + } + } - if height > self.get_last_block_height() { - if let MembershipProof::ICS23(proof) = self - .block - .tree - .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) - .map_err(Error::MerkleTreeError)? - { - self.block - .tree - .get_sub_tree_proof(key, proof) - .map(Into::into) - .map_err(Error::MerkleTreeError) - } else { - Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) - } + /// Returns the Merkle root hash and the height of the committed block. If + /// no block exists, returns None. + pub fn get_state(&self) -> Option<(MerkleRoot, u64)> { + if self.block.height.0 != 0 { + Some((self.block.tree.root(), self.block.height.0)) } else { - let (store_type, _) = StoreType::sub_key(key)?; - let tree = self.get_merkle_tree(height, Some(store_type))?; - if let MembershipProof::ICS23(proof) = tree - .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) - .map_err(Error::MerkleTreeError)? - { - tree.get_sub_tree_proof(key, proof) - .map(Into::into) - .map_err(Error::MerkleTreeError) - } else { - Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) - } + None } } - /// Get the non-existence proof - pub fn get_non_existence_proof( - &self, - key: &Key, + /// Find the root hash of the merkle tree + pub fn merkle_root(&self) -> MerkleRoot { + self.block.tree.root() + } + + /// Set the block header. + /// The header is not in the Merkle tree as it's tracked by Tendermint. + /// Hence, we don't update the tree when this is set. + pub fn set_header(&mut self, header: Header) -> Result<()> { + self.header = Some(header); + Ok(()) + } + + /// Block data is in the Merkle tree as it's tracked by Tendermint in the + /// block header. Hence, we don't update the tree when this is set. + pub fn begin_block( + &mut self, + hash: BlockHash, height: BlockHeight, - ) -> Result { - // `0` means last committed height - let height = if height == BlockHeight(0) { - self.get_last_block_height() - } else { - height - }; + ) -> Result<()> { + self.block.hash = hash; + self.block.height = height; + Ok(()) + } - if height > self.get_last_block_height() { - Err(Error::Temporary { - error: format!( - "The block at the height {} hasn't committed yet", - height, - ), - }) - } else { - let (store_type, _) = StoreType::sub_key(key)?; - self.get_merkle_tree(height, Some(store_type))? - .get_non_existence_proof(key) - .map(Into::into) - .map_err(Error::MerkleTreeError) - } + /// Get the chain ID as a raw string + pub fn get_chain_id(&self) -> (String, u64) { + ( + self.chain_id.to_string(), + CHAIN_ID_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) + } + + /// Get the block height + pub fn get_block_height(&self) -> (BlockHeight, u64) { + ( + self.block.height, + BLOCK_HEIGHT_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) + } + + /// Get the block hash + pub fn get_block_hash(&self) -> (BlockHash, u64) { + ( + self.block.hash.clone(), + BLOCK_HASH_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, + ) } /// Get the current (yet to be committed) block epoch @@ -791,63 +1928,24 @@ where EPOCH_TYPE_LENGTH as u64 * MEMORY_ACCESS_GAS_PER_BYTE, ) } - - /// Initialize the first epoch. The first epoch begins at genesis time. - pub fn init_genesis_epoch( - &mut self, - initial_height: BlockHeight, - genesis_time: DateTimeUtc, - parameters: &Parameters, - ) -> Result<()> { - let EpochDuration { - min_num_of_blocks, - min_duration, - } = parameters.epoch_duration; - self.next_epoch_min_start_height = initial_height + min_num_of_blocks; - self.next_epoch_min_start_time = genesis_time + min_duration; - self.block.pred_epochs = Epochs { - first_block_heights: vec![initial_height], - }; - self.update_epoch_in_merkle_tree() - } - - /// Get the block header - pub fn get_block_header( - &self, - height: Option, - ) -> Result<(Option
, u64)> { - match height { - Some(h) if h == self.get_block_height().0 => { - let header = self.header.clone(); - let gas = match header { - Some(ref header) => { - header.encoded_len() as u64 * MEMORY_ACCESS_GAS_PER_BYTE - } - None => MEMORY_ACCESS_GAS_PER_BYTE, - }; - Ok((header, gas)) - } - Some(h) => match self.db.read_block_header(h)? { - Some(header) => { - let gas = header.encoded_len() as u64 - * STORAGE_ACCESS_GAS_PER_BYTE; - Ok((Some(header), gas)) - } - None => Ok((None, STORAGE_ACCESS_GAS_PER_BYTE)), - }, - None => Ok((self.header.clone(), STORAGE_ACCESS_GAS_PER_BYTE)), - } - } - - /// Get the timestamp of the last committed block, or the current timestamp - /// if no blocks have been produced yet - pub fn get_last_block_timestamp(&self) -> Result { - let last_block_height = self.get_block_height().0; - - Ok(self - .db - .read_block_header(last_block_height)? - .map_or_else(DateTimeUtc::now, |header| header.time)) + + /// Initialize the first epoch. The first epoch begins at genesis time. + pub fn init_genesis_epoch( + &mut self, + initial_height: BlockHeight, + genesis_time: DateTimeUtc, + parameters: &Parameters, + ) -> Result<()> { + let EpochDuration { + min_num_of_blocks, + min_duration, + } = parameters.epoch_duration; + self.next_epoch_min_start_height = initial_height + min_num_of_blocks; + self.next_epoch_min_start_time = genesis_time + min_duration; + self.block.pred_epochs = Epochs { + first_block_heights: vec![initial_height], + }; + self.update_epoch_in_merkle_tree() } /// Get the current conversions @@ -882,118 +1980,6 @@ where Ok(()) } - /// Start write batch. - pub fn batch() -> D::WriteBatch { - D::batch() - } - - /// Execute write batch. - pub fn exec_batch(&mut self, batch: D::WriteBatch) -> Result<()> { - Ok(self.db.exec_batch(batch)?) - } - - /// Batch write the value with the given height and account subspace key to - /// the DB. Returns the size difference from previous value, if any, or - /// the size of the value otherwise. - pub fn batch_write_subspace_val( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - value: impl AsRef<[u8]>, - ) -> Result { - let value = value.as_ref(); - let is_key_merklized = (self.merkle_tree_key_filter)(key); - - if is_pending_transfer_key(key) { - // The tree of the bridge pool stores the current height for the - // pending transfer - let height = self.block.height.serialize_to_vec(); - self.block.tree.update(key, height)?; - } else { - // Update the merkle tree - if is_key_merklized { - self.block.tree.update(key, value)?; - } - } - Ok(self.db.batch_write_subspace_val( - batch, - self.block.height, - key, - value, - is_key_merklized, - )?) - } - - /// Batch delete the value with the given height and account subspace key - /// from the DB. Returns the size of the removed value, if any, 0 if no - /// previous value was found. - pub fn batch_delete_subspace_val( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - ) -> Result { - let is_key_merklized = (self.merkle_tree_key_filter)(key); - // Update the merkle tree - if is_key_merklized { - self.block.tree.delete(key)?; - } - Ok(self.db.batch_delete_subspace_val( - batch, - self.block.height, - key, - is_key_merklized, - )?) - } - - // Prune merkle tree stores. Use after updating self.block.height in the - // commit. - fn prune_merkle_tree_stores( - &mut self, - batch: &mut D::WriteBatch, - ) -> Result<()> { - if self.block.epoch.0 == 0 { - return Ok(()); - } - // Prune non-provable stores at the previous epoch - for st in StoreType::iter_non_provable() { - self.db.prune_merkle_tree_store( - batch, - st, - self.block.epoch.prev(), - )?; - } - // Prune provable stores - let oldest_epoch = self.get_oldest_epoch(); - if oldest_epoch.0 > 0 { - // Remove stores at the previous epoch because the Merkle tree - // stores at the starting height of the epoch would be used to - // restore stores at a height (> oldest_height) in the epoch - for st in StoreType::iter_provable() { - self.db.prune_merkle_tree_store( - batch, - st, - oldest_epoch.prev(), - )?; - } - - // Prune the BridgePool subtree stores with invalid nonce - let mut epoch = match self.get_oldest_epoch_with_valid_nonce()? { - Some(epoch) => epoch, - None => return Ok(()), - }; - while oldest_epoch < epoch { - epoch = epoch.prev(); - self.db.prune_merkle_tree_store( - batch, - &StoreType::BridgePool, - epoch, - )?; - } - } - - Ok(()) - } - /// Get the height of the last committed block or 0 if no block has been /// committed yet. The first block is at height 1. pub fn get_last_block_height(&self) -> BlockHeight { @@ -1016,84 +2002,145 @@ where .get_epoch(oldest_height) .unwrap_or_default() } +} - /// Get oldest epoch which has the valid signed nonce of the bridge pool - fn get_oldest_epoch_with_valid_nonce(&self) -> Result> { - let last_height = self.get_last_block_height(); - let current_nonce = match self - .db - .read_bridge_pool_signed_nonce(last_height, last_height)? - { - Some(nonce) => nonce, - None => return Ok(None), - }; - let (mut epoch, _) = self.get_last_epoch(); - // We don't need to check the older epochs because their Merkle tree - // snapshots have been already removed - let oldest_epoch = self.get_oldest_epoch(); - // Look up the last valid epoch which has the previous nonce of the - // current one. It has the previous nonce, but it was - // incremented during the epoch. - while 0 < epoch.0 && oldest_epoch <= epoch { - epoch = epoch.prev(); - let height = - match self.block.pred_epochs.get_start_height_of_epoch(epoch) { - Some(h) => h, - None => continue, - }; - let nonce = match self - .db - .read_bridge_pool_signed_nonce(height, last_height)? - { - Some(nonce) => nonce, - // skip pruning when the old epoch doesn't have the signed nonce - None => break, - }; - if nonce < current_nonce { - break; - } - } - Ok(Some(epoch)) - } - - /// Check it the given transaction's hash is already present in storage - pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result { - Ok(self.db.has_replay_protection_entry(hash)?) +impl From for Error { + fn from(error: MerkleTreeError) -> Self { + Self::MerkleTreeError(error) } +} - /// Write the provided tx hash to storage - pub fn write_replay_protection_entry( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - ) -> Result<()> { - self.db.write_replay_protection_entry(batch, key)?; - Ok(()) - } +/// Prefix iterator for [`StorageRead`] implementations. +#[derive(Debug)] +pub struct PrefixIter<'iter, D> +where + D: DB + DBIter<'iter>, +{ + /// Peekable storage iterator + pub storage_iter: Peekable<>::PrefixIter>, + /// Peekable write log iterator + pub write_log_iter: Peekable, +} - /// Delete the provided tx hash from storage - pub fn delete_replay_protection_entry( - &mut self, - batch: &mut D::WriteBatch, - key: &Key, - ) -> Result<()> { - self.db.delete_replay_protection_entry(batch, key)?; - Ok(()) - } +/// Iterate write-log storage items prior to a tx execution, matching the +/// given prefix. Returns the iterator and gas cost. +pub fn iter_prefix_pre<'a, D>( + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. + write_log: &'a WriteLog, + db: &'a D, + prefix: &storage::Key, +) -> (PrefixIter<'a, D>, u64) +where + D: DB + for<'iter> DBIter<'iter>, +{ + let storage_iter = db.iter_prefix(Some(prefix)).peekable(); + let write_log_iter = write_log.iter_prefix_pre(prefix).peekable(); + ( + PrefixIter:: { + storage_iter, + write_log_iter, + }, + prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ) +} - /// Iterate the replay protection storage from the last block - pub fn iter_replay_protection( - &self, - ) -> Box + '_> { - Box::new(self.db.iter_replay_protection().map(|(raw_key, _, _)| { - raw_key.parse().expect("Failed hash conversion") - })) - } +/// Iterate write-log storage items posterior to a tx execution, matching the +/// given prefix. Returns the iterator and gas cost. +pub fn iter_prefix_post<'a, D>( + // We cannot use e.g. `&'a State`, because it doesn't live long + // enough - the lifetime of the `PrefixIter` must depend on the lifetime of + // references to the `WriteLog` and `DB`. + write_log: &'a WriteLog, + db: &'a D, + prefix: &storage::Key, +) -> (PrefixIter<'a, D>, u64) +where + D: DB + for<'iter> DBIter<'iter>, +{ + let storage_iter = db.iter_prefix(Some(prefix)).peekable(); + let write_log_iter = write_log.iter_prefix_post(prefix).peekable(); + ( + PrefixIter:: { + storage_iter, + write_log_iter, + }, + prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, + ) } -impl From for Error { - fn from(error: MerkleTreeError) -> Self { - Self::MerkleTreeError(error) +impl<'iter, D> Iterator for PrefixIter<'iter, D> +where + D: DB + DBIter<'iter>, +{ + type Item = (String, Vec, u64); + + fn next(&mut self) -> Option { + enum Next { + ReturnWl { advance_storage: bool }, + ReturnStorage, + } + loop { + let what: Next; + { + let storage_peeked = self.storage_iter.peek(); + let wl_peeked = self.write_log_iter.peek(); + match (storage_peeked, wl_peeked) { + (None, None) => return None, + (None, Some(_)) => { + what = Next::ReturnWl { + advance_storage: false, + }; + } + (Some(_), None) => { + what = Next::ReturnStorage; + } + (Some((storage_key, _, _)), Some((wl_key, _))) => { + if wl_key <= storage_key { + what = Next::ReturnWl { + advance_storage: wl_key == storage_key, + }; + } else { + what = Next::ReturnStorage; + } + } + } + } + match what { + Next::ReturnWl { advance_storage } => { + if advance_storage { + let _ = self.storage_iter.next(); + } + + if let Some((key, modification)) = + self.write_log_iter.next() + { + match modification { + write_log::StorageModification::Write { value } + | write_log::StorageModification::Temp { value } => { + let gas = value.len() as u64; + return Some((key, value, gas)); + } + write_log::StorageModification::InitAccount { + vp_code_hash, + } => { + let gas = vp_code_hash.len() as u64; + return Some((key, vp_code_hash.to_vec(), gas)); + } + write_log::StorageModification::Delete => { + continue; + } + } + } + } + Next::ReturnStorage => { + if let Some(next) = self.storage_iter.next() { + return Some(next); + } + } + } + } } } @@ -1106,17 +2153,23 @@ pub mod testing { use super::mockdb::MockDB; use super::*; - /// `WlStorage` with a mock DB for testing - pub type TestWlStorage = WlStorage; + pub type TestState = FullAccessState; - /// Storage with a mock DB for testing. - /// - /// Prefer to use [`TestWlStorage`], which implements - /// `namada_storageStorageRead + StorageWrite` with properly working - /// `prefix_iter`. - pub type TestStorage = State; + impl Default for TestState { + fn default() -> Self { + Self(WlState { + write_log: Default::default(), + db: MockDB::default(), + in_mem: Default::default(), + merkle_tree_key_filter: merklize_all_keys, + }) + } + } + + /// In memory State for testing. + pub type InMemoryState = InMemory; - impl Default for TestStorage { + impl Default for InMemoryState { fn default() -> Self { let chain_id = ChainId::default(); let tree = MerkleTree::default(); @@ -1129,7 +2182,6 @@ pub mod testing { results: BlockResults::default(), }; Self { - db: MockDB::default(), chain_id, block, header: None, @@ -1149,17 +2201,6 @@ pub mod testing { ethereum_height: None, eth_events_queue: EthEventsQueue::default(), storage_read_past_height_limit: Some(1000), - merkle_tree_key_filter: merklize_all_keys, - } - } - } - - #[allow(clippy::derivable_impls)] - impl Default for TestWlStorage { - fn default() -> Self { - Self { - write_log: Default::default(), - storage: Default::default(), } } } @@ -1237,17 +2278,11 @@ mod tests { min_blocks_delta, min_duration_delta, max_time_per_block_delta) in arb_and_epoch_duration_start_and_block()) { - let mut wl_storage = - TestWlStorage { - storage: TestStorage { - next_epoch_min_start_height: - start_height + epoch_duration.min_num_of_blocks, - next_epoch_min_start_time: - start_time + epoch_duration.min_duration, - ..Default::default() - }, - ..Default::default() - }; + let mut state =TestState::default(); + state.in_mem_mut().next_epoch_min_start_height= + start_height + epoch_duration.min_num_of_blocks; + state.in_mem_mut().next_epoch_min_start_time= + start_time + epoch_duration.min_duration; let mut parameters = Parameters { max_tx_bytes: 1024 * 1024, max_proposal_bytes: Default::default(), @@ -1265,19 +2300,20 @@ mod tests { fee_unshielding_descriptions_limit: 15, minimum_gas_price: BTreeMap::default(), }; - namada_parameters::init_storage(¶meters, &mut wl_storage).unwrap(); + namada_parameters::init_storage(¶meters, &mut state).unwrap(); // Initialize pred_epochs to the current height - wl_storage - .storage + let height = state.in_mem().block.height; + state + .in_mem_mut() .block .pred_epochs - .new_epoch(wl_storage.storage.block.height); + .new_epoch(height); - let epoch_before = wl_storage.storage.last_epoch; - assert_eq!(epoch_before, wl_storage.storage.block.epoch); + let epoch_before = state.in_mem().last_epoch; + assert_eq!(epoch_before, state.in_mem().block.epoch); // Try to apply the epoch update - wl_storage.update_epoch(block_height, block_time).unwrap(); + state.update_epoch(block_height, block_time).unwrap(); // Test for 1. if block_height.0 - start_height.0 @@ -1289,43 +2325,43 @@ mod tests { ) { // Update will now be enqueued for 2 blocks in the future - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(2)); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(2)); let block_height = block_height + 1; let block_time = block_time + Duration::seconds(1); - wl_storage.update_epoch(block_height, block_time).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(1)); + state.update_epoch(block_height, block_time).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(1)); let block_height = block_height + 1; let block_time = block_time + Duration::seconds(1); - wl_storage.update_epoch(block_height, block_time).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); + state.update_epoch(block_height, block_time).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before.next()); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); - assert_eq!(wl_storage.storage.next_epoch_min_start_height, + assert_eq!(state.in_mem().next_epoch_min_start_height, block_height + epoch_duration.min_num_of_blocks); - assert_eq!(wl_storage.storage.next_epoch_min_start_time, + assert_eq!(state.in_mem().next_epoch_min_start_time, block_time + epoch_duration.min_duration); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + state.in_mem().block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), Some(epoch_before)); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(block_height), + state.in_mem().block.pred_epochs.get_epoch(block_height), Some(epoch_before.next())); } else { - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); + assert_eq!(state.in_mem().block.epoch, epoch_before); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), + state.in_mem().block.pred_epochs.get_epoch(BlockHeight(block_height.0 - 1)), Some(epoch_before)); assert_eq!( - wl_storage.storage.block.pred_epochs.get_epoch(block_height), + state.in_mem().block.pred_epochs.get_epoch(block_height), Some(epoch_before)); } // Last epoch should only change when the block is committed - assert_eq!(wl_storage.storage.last_epoch, epoch_before); + assert_eq!(state.in_mem().last_epoch, epoch_before); // Update the epoch duration parameters parameters.epoch_duration.min_num_of_blocks = @@ -1335,57 +2371,57 @@ mod tests { Duration::seconds(min_duration + min_duration_delta).into(); parameters.max_expected_time_per_block = Duration::seconds(max_expected_time_per_block + max_time_per_block_delta).into(); - namada_parameters::update_max_expected_time_per_block_parameter(&mut wl_storage, ¶meters.max_expected_time_per_block).unwrap(); - namada_parameters::update_epoch_parameter(&mut wl_storage, ¶meters.epoch_duration).unwrap(); + namada_parameters::update_max_expected_time_per_block_parameter(&mut state, ¶meters.max_expected_time_per_block).unwrap(); + namada_parameters::update_epoch_parameter(&mut state, ¶meters.epoch_duration).unwrap(); // Test for 2. - let epoch_before = wl_storage.storage.block.epoch; - let height_of_update = wl_storage.storage.next_epoch_min_start_height.0 ; - let time_of_update = wl_storage.storage.next_epoch_min_start_time; + let epoch_before = state.in_mem().block.epoch; + let height_of_update = state.in_mem().next_epoch_min_start_height.0 ; + let time_of_update = state.in_mem().next_epoch_min_start_time; let height_before_update = BlockHeight(height_of_update - 1); let height_of_update = BlockHeight(height_of_update); let time_before_update = time_of_update - Duration::seconds(1); // No update should happen before both epoch duration conditions are // satisfied - wl_storage.update_epoch(height_before_update, time_before_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - wl_storage.update_epoch(height_of_update, time_before_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); - wl_storage.update_epoch(height_before_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); + state.update_epoch(height_before_update, time_before_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); + state.update_epoch(height_of_update, time_before_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); + state.update_epoch(height_before_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); // Update should be enqueued for 2 blocks in the future starting at or after this height and time - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(2)); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(2)); // Increment the block height and time to simulate new blocks now let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before); - assert_eq!(wl_storage.storage.update_epoch_blocks_delay, Some(1)); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before); + assert_eq!(state.in_mem().update_epoch_blocks_delay, Some(1)); let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); - assert!(wl_storage.storage.update_epoch_blocks_delay.is_none()); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before.next()); + assert!(state.in_mem().update_epoch_blocks_delay.is_none()); // The next epoch's minimum duration should change - assert_eq!(wl_storage.storage.next_epoch_min_start_height, + assert_eq!(state.in_mem().next_epoch_min_start_height, height_of_update + parameters.epoch_duration.min_num_of_blocks); - assert_eq!(wl_storage.storage.next_epoch_min_start_time, + assert_eq!(state.in_mem().next_epoch_min_start_time, time_of_update + parameters.epoch_duration.min_duration); // Increment the block height and time once more to make sure things reset let height_of_update = height_of_update + 1; let time_of_update = time_of_update + Duration::seconds(1); - wl_storage.update_epoch(height_of_update, time_of_update).unwrap(); - assert_eq!(wl_storage.storage.block.epoch, epoch_before.next()); + state.update_epoch(height_of_update, time_of_update).unwrap(); + assert_eq!(state.in_mem().block.epoch, epoch_before.next()); } } @@ -1403,10 +2439,10 @@ mod tests { #[test] fn test_writing_without_merklizing_or_diffs() { - let mut wls = TestWlStorage::default(); - assert_eq!(wls.storage.block.height.0, 0); + let mut state = TestState::default(); + assert_eq!(state.in_mem().block.height.0, 0); - (wls.storage.merkle_tree_key_filter) = merkle_tree_key_filter; + (state.0.merkle_tree_key_filter) = merkle_tree_key_filter; let key1 = test_key_1(); let val1 = 1u64; @@ -1414,60 +2450,59 @@ mod tests { let val2 = 2u64; // Standard write of key-val-1 - wls.write(&key1, val1).unwrap(); + state.write(&key1, val1).unwrap(); - // Read from WlStorage should return val1 - let res = wls.read::(&key1).unwrap().unwrap(); + // Read from State should return val1 + let res = state.read::(&key1).unwrap().unwrap(); assert_eq!(res, val1); - // Read from Storage shouldn't return val1 bc the block hasn't been + // Read from DB shouldn't return val1 bc the block hasn't been // committed - let (res, _) = wls.storage.read(&key1).unwrap(); + let (res, _) = state.db_read(&key1).unwrap(); assert!(res.is_none()); // Write key-val-2 without merklizing or diffs - wls.write(&key2, val2).unwrap(); + state.write(&key2, val2).unwrap(); - // Read from WlStorage should return val2 - let res = wls.read::(&key2).unwrap().unwrap(); + // Read from state should return val2 + let res = state.read::(&key2).unwrap().unwrap(); assert_eq!(res, val2); // Commit block and storage changes - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem().block.height.next_height(); - // Read key1 from Storage should return val1 - let (res1, _) = wls.storage.read(&key1).unwrap(); + // Read key1 from DB should return val1 + let (res1, _) = state.db_read(&key1).unwrap(); let res1 = u64::try_from_slice(&res1.unwrap()).unwrap(); assert_eq!(res1, val1); // Check merkle tree inclusion of key-val-1 explicitly - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); assert!(is_merklized1); // Key2 should be in storage. Confirm by reading from - // WlStorage and also by reading Storage subspace directly - let res2 = wls.read::(&key2).unwrap().unwrap(); + // state and also by reading DB subspace directly + let res2 = state.read::(&key2).unwrap().unwrap(); assert_eq!(res2, val2); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap().unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap().unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); // Check explicitly that key-val-2 is not in merkle tree - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized2); // Check that the proper diffs exist for key-val-1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, Default::default(), true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, Default::default(), false) .unwrap() .unwrap(); @@ -1476,15 +2511,13 @@ mod tests { // Check that there are diffs for key-val-2 in block 0, since all keys // need to have diffs for at least 1 block for rollback purposes - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), false) .unwrap() .unwrap(); @@ -1492,86 +2525,79 @@ mod tests { assert_eq!(res2, val2); // Now delete the keys properly - wls.delete(&key1).unwrap(); - wls.delete(&key2).unwrap(); + state.delete(&key1).unwrap(); + state.delete(&key2).unwrap(); // Commit the block again - wls.commit_block().unwrap(); - wls.storage.block.height = wls.storage.block.height.next_height(); + state.commit_block().unwrap(); + state.in_mem_mut().block.height = + state.in_mem().block.height.next_height(); // Check the key-vals are removed from the storage subspace - let res1 = wls.read::(&key1).unwrap(); - let res2 = wls.read::(&key2).unwrap(); + let res1 = state.read::(&key1).unwrap(); + let res2 = state.read::(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); - let res1 = wls.storage.db.read_subspace_val(&key1).unwrap(); - let res2 = wls.storage.db.read_subspace_val(&key2).unwrap(); + let res1 = state.db().read_subspace_val(&key1).unwrap(); + let res2 = state.db().read_subspace_val(&key2).unwrap(); assert!(res1.is_none() && res2.is_none()); // Check that the key-vals don't exist in the merkle tree anymore - let is_merklized1 = wls.storage.block.tree.has_key(&key1).unwrap(); - let is_merklized2 = wls.storage.block.tree.has_key(&key2).unwrap(); + let is_merklized1 = state.in_mem().block.tree.has_key(&key1).unwrap(); + let is_merklized2 = state.in_mem().block.tree.has_key(&key2).unwrap(); assert!(!is_merklized1 && !is_merklized2); // Check that key-val-1 diffs are properly updated for blocks 0 and 1 - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(0), true) .unwrap(); assert!(res1.is_none()); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(0), false) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(1), true) .unwrap() .unwrap(); let res1 = u64::try_from_slice(&res1).unwrap(); assert_eq!(res1, val1); - let res1 = wls - .storage - .db + let res1 = state + .db() .read_diffs_val(&key1, BlockHeight(1), false) .unwrap(); assert!(res1.is_none()); // Check that key-val-2 diffs don't exist for block 0 anymore - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), true) .unwrap(); assert!(res2.is_none()); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(0), false) .unwrap(); assert!(res2.is_none()); // Check that the block 1 diffs for key-val-2 include an "old" value of // val2 and no "new" value - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(1), true) .unwrap() .unwrap(); let res2 = u64::try_from_slice(&res2).unwrap(); assert_eq!(res2, val2); - let res2 = wls - .storage - .db + let res2 = state + .db() .read_diffs_val(&key2, BlockHeight(1), false) .unwrap(); assert!(res2.is_none()); diff --git a/crates/state/src/wl_storage.rs b/crates/state/src/wl_storage.rs deleted file mode 100644 index 4dc2f36fae..0000000000 --- a/crates/state/src/wl_storage.rs +++ /dev/null @@ -1,898 +0,0 @@ -//! Storage with write log. - -use std::iter::Peekable; - -use namada_core::address::Address; -use namada_core::hash::{Hash, StorageHasher}; -use namada_core::storage::{self, BlockHeight, Epochs}; -use namada_core::time::DateTimeUtc; -use namada_parameters::EpochDuration; -use namada_storage::conversion_state::{ConversionState, WithConversionState}; -use namada_storage::{ResultExt, StorageRead, StorageWrite}; - -use super::EPOCH_SWITCH_BLOCKS_DELAY; -use crate::write_log::{self, WriteLog}; -use crate::{DBIter, State, DB}; - -/// Storage with write log that allows to implement prefix iterator that works -/// with changes not yet committed to the DB. -#[derive(Debug)] -pub struct WlStorage -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// Write log - pub write_log: WriteLog, - /// Storage provides access to DB - pub storage: State, -} - -/// Temporary storage that can be used for changes that will never be committed -/// to the DB. This is useful for the shell `PrepareProposal` and -/// `ProcessProposal` handlers that should not change state, but need to apply -/// storage changes for replay protection to validate the proposal. -#[derive(Debug)] -pub struct TempWlStorage<'a, D, H> -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// Write log - pub write_log: WriteLog, - /// Storage provides access to DB - pub storage: &'a State, -} - -impl<'a, D, H> TempWlStorage<'a, D, H> -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - /// Create a temp storage that can mutated in memory, but never committed to - /// DB. - pub fn new(storage: &'a State) -> Self { - Self { - write_log: WriteLog::default(), - storage, - } - } - - /// Check if the given tx hash has already been processed - pub fn has_replay_protection_entry( - &self, - hash: &Hash, - ) -> Result { - if let Some(present) = self.write_log.has_replay_protection_entry(hash) - { - return Ok(present); - } - - self.storage.has_replay_protection_entry(hash) - } - - /// Check if the given tx hash has already been committed to storage - pub fn has_committed_replay_protection_entry( - &self, - hash: &Hash, - ) -> Result { - self.storage.has_replay_protection_entry(hash) - } -} - -/// Common trait for [`WlStorage`] and [`TempWlStorage`], used to implement -/// namada_storage traits. -pub trait WriteLogAndStorage { - /// DB type - type D: DB + for<'iter> DBIter<'iter>; - /// DB hasher type - type H: StorageHasher; - - /// Borrow `WriteLog` - fn write_log(&self) -> &WriteLog; - - /// Borrow mutable `WriteLog` - fn write_log_mut(&mut self) -> &mut WriteLog; - - /// Borrow `Storage` - fn storage(&self) -> &State; - - /// Splitting borrow to get immutable reference to the `Storage` and mutable - /// reference to `WriteLog` when in need of both (avoids complain from the - /// borrow checker) - fn split_borrow(&mut self) -> (&mut WriteLog, &State); - - /// Write the provided tx hash to storage. - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()>; -} - -impl WriteLogAndStorage for WlStorage -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - type D = D; - type H = H; - - fn write_log(&self) -> &WriteLog { - &self.write_log - } - - fn write_log_mut(&mut self) -> &mut WriteLog { - &mut self.write_log - } - - fn storage(&self) -> &State { - &self.storage - } - - fn split_borrow(&mut self) -> (&mut WriteLog, &State) { - (&mut self.write_log, &self.storage) - } - - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.write_log.write_tx_hash(hash) - } -} - -impl WriteLogAndStorage for TempWlStorage<'_, D, H> -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - type D = D; - type H = H; - - fn write_log(&self) -> &WriteLog { - &self.write_log - } - - fn write_log_mut(&mut self) -> &mut WriteLog { - &mut self.write_log - } - - fn storage(&self) -> &State { - self.storage - } - - fn split_borrow(&mut self) -> (&mut WriteLog, &State) { - (&mut self.write_log, (self.storage)) - } - - fn write_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.write_log.write_tx_hash(hash) - } -} - -impl WlStorage -where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, -{ - /// Combine storage with write-log - pub fn new(write_log: WriteLog, storage: State) -> Self { - Self { write_log, storage } - } - - /// Commit the current transaction's write log to the block when it's - /// accepted by all the triggered validity predicates. Starts a new - /// transaction write log. - pub fn commit_tx(&mut self) { - self.write_log.commit_tx() - } - - /// Drop the current transaction's write log when it's declined by any of - /// the triggered validity predicates. Starts a new transaction write log. - pub fn drop_tx(&mut self) { - self.write_log.drop_tx() - } - - /// Commit the current block's write log to the storage and commit the block - /// to DB. Starts a new block write log. - pub fn commit_block(&mut self) -> namada_storage::Result<()> { - if self.storage.last_epoch != self.storage.block.epoch { - self.storage - .update_epoch_in_merkle_tree() - .into_storage_result()?; - } - - let mut batch = D::batch(); - self.write_log - .commit_block(&mut self.storage, &mut batch) - .into_storage_result()?; - self.storage.commit_block(batch).into_storage_result() - } - - /// Initialize a new epoch when the current epoch is finished. Returns - /// `true` on a new epoch. - pub fn update_epoch( - &mut self, - height: BlockHeight, - time: DateTimeUtc, - ) -> storage::Result { - let parameters = namada_parameters::read(self) - .expect("Couldn't read protocol parameters"); - - match self.storage.update_epoch_blocks_delay.as_mut() { - None => { - // Check if the new epoch minimum start height and start time - // have been fulfilled. If so, queue the next - // epoch to start two blocks into the future so - // as to align validator set updates + etc with - // tendermint. This is because tendermint has a two block delay - // to validator changes. - let current_epoch_duration_satisfied = height - >= self.storage.next_epoch_min_start_height - && time >= self.storage.next_epoch_min_start_time; - if current_epoch_duration_satisfied { - self.storage.update_epoch_blocks_delay = - Some(EPOCH_SWITCH_BLOCKS_DELAY); - } - } - Some(blocks_until_switch) => { - *blocks_until_switch -= 1; - } - }; - let new_epoch = - matches!(self.storage.update_epoch_blocks_delay, Some(0)); - - if new_epoch { - // Reset the delay tracker - self.storage.update_epoch_blocks_delay = None; - - // Begin a new epoch - self.storage.block.epoch = self.storage.block.epoch.next(); - let EpochDuration { - min_num_of_blocks, - min_duration, - } = parameters.epoch_duration; - self.storage.next_epoch_min_start_height = - height + min_num_of_blocks; - self.storage.next_epoch_min_start_time = time + min_duration; - - self.storage.block.pred_epochs.new_epoch(height); - tracing::info!("Began a new epoch {}", self.storage.block.epoch); - } - Ok(new_epoch) - } - - /// Delete the provided transaction's hash from storage. - pub fn delete_tx_hash(&mut self, hash: Hash) -> write_log::Result<()> { - self.write_log.delete_tx_hash(hash) - } - - #[inline] - pub fn get_current_decision_height(&self) -> BlockHeight { - self.storage.get_last_block_height() + 1 - } - - /// Check if we are at a given [`BlockHeight`] offset, `height_offset`, - /// within the current epoch. - pub fn is_deciding_offset_within_epoch(&self, height_offset: u64) -> bool { - let current_decision_height = self.get_current_decision_height(); - - let pred_epochs = &self.storage.block.pred_epochs; - let fst_heights_of_each_epoch = pred_epochs.first_block_heights(); - - fst_heights_of_each_epoch - .last() - .map(|&h| { - let height_offset_within_epoch = h + height_offset; - current_decision_height == height_offset_within_epoch - }) - .unwrap_or(false) - } -} - -/// Prefix iterator for [`WlStorage`]. -#[derive(Debug)] -pub struct PrefixIter<'iter, D> -where - D: DB + DBIter<'iter>, -{ - /// Peekable storage iterator - pub storage_iter: Peekable<>::PrefixIter>, - /// Peekable write log iterator - pub write_log_iter: Peekable, -} - -/// Iterate write-log storage items prior to a tx execution, matching the -/// given prefix. Returns the iterator and gas cost. -pub fn iter_prefix_pre<'iter, D, H>( - // We cannot use e.g. `&'iter WlStorage`, because it doesn't live long - // enough - the lifetime of the `PrefixIter` must depend on the lifetime of - // references to the `WriteLog` and `Storage`. - write_log: &'iter WriteLog, - storage: &'iter State, - prefix: &storage::Key, -) -> (PrefixIter<'iter, D>, u64) -where - D: DB + for<'iter_> DBIter<'iter_>, - H: StorageHasher, -{ - let storage_iter = storage.db.iter_prefix(Some(prefix)).peekable(); - let write_log_iter = write_log.iter_prefix_pre(prefix).peekable(); - ( - PrefixIter { - storage_iter, - write_log_iter, - }, - prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, - ) -} - -/// Iterate write-log storage items posterior to a tx execution, matching the -/// given prefix. Returns the iterator and gas cost. -pub fn iter_prefix_post<'iter, D, H>( - // We cannot use e.g. `&'iter WlStorage`, because it doesn't live long - // enough - the lifetime of the `PrefixIter` must depend on the lifetime of - // references to the `WriteLog` and `Storage`. - write_log: &'iter WriteLog, - storage: &'iter State, - prefix: &storage::Key, -) -> (PrefixIter<'iter, D>, u64) -where - D: DB + for<'iter_> DBIter<'iter_>, - H: StorageHasher, -{ - let storage_iter = storage.db.iter_prefix(Some(prefix)).peekable(); - let write_log_iter = write_log.iter_prefix_post(prefix).peekable(); - ( - PrefixIter { - storage_iter, - write_log_iter, - }, - prefix.len() as u64 * namada_gas::STORAGE_ACCESS_GAS_PER_BYTE, - ) -} - -impl<'iter, D> Iterator for PrefixIter<'iter, D> -where - D: DB + DBIter<'iter>, -{ - type Item = (String, Vec, u64); - - fn next(&mut self) -> Option { - enum Next { - ReturnWl { advance_storage: bool }, - ReturnStorage, - } - loop { - let what: Next; - { - let storage_peeked = self.storage_iter.peek(); - let wl_peeked = self.write_log_iter.peek(); - match (storage_peeked, wl_peeked) { - (None, None) => return None, - (None, Some(_)) => { - what = Next::ReturnWl { - advance_storage: false, - }; - } - (Some(_), None) => { - what = Next::ReturnStorage; - } - (Some((storage_key, _, _)), Some((wl_key, _))) => { - if wl_key <= storage_key { - what = Next::ReturnWl { - advance_storage: wl_key == storage_key, - }; - } else { - what = Next::ReturnStorage; - } - } - } - } - match what { - Next::ReturnWl { advance_storage } => { - if advance_storage { - let _ = self.storage_iter.next(); - } - - if let Some((key, modification)) = - self.write_log_iter.next() - { - match modification { - write_log::StorageModification::Write { value } - | write_log::StorageModification::Temp { value } => { - let gas = value.len() as u64; - return Some((key, value, gas)); - } - write_log::StorageModification::InitAccount { - vp_code_hash, - } => { - let gas = vp_code_hash.len() as u64; - return Some((key, vp_code_hash.to_vec(), gas)); - } - write_log::StorageModification::Delete => { - continue; - } - } - } - } - Next::ReturnStorage => { - if let Some(next) = self.storage_iter.next() { - return Some(next); - } - } - } - } - } -} - -#[macro_export] -macro_rules! impl_storage_traits { - ($($type:ty)*) => { - impl StorageRead for $($type)* - where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: 'static + StorageHasher, - { - type PrefixIter<'iter> = PrefixIter<'iter, D> where Self: 'iter; - - fn read_bytes( - &self, - key: &storage::Key, - ) -> namada_storage::Result>> { - // try to read from the write log first - let (log_val, _gas) = self.write_log().read(key); - match log_val { - Some(write_log::StorageModification::Write { ref value }) => { - Ok(Some(value.clone())) - } - Some(write_log::StorageModification::Delete) => Ok(None), - Some(write_log::StorageModification::InitAccount { - ref vp_code_hash, - }) => Ok(Some(vp_code_hash.to_vec())), - Some(write_log::StorageModification::Temp { ref value }) => { - Ok(Some(value.clone())) - } - None => { - // when not found in write log, try to read from the storage - self.storage() - .db - .read_subspace_val(key) - .into_storage_result() - } - } - } - - fn has_key(&self, key: &storage::Key) -> namada_storage::Result { - // try to read from the write log first - let (log_val, _gas) = self.write_log().read(key); - match log_val { - Some(&write_log::StorageModification::Write { .. }) - | Some(&write_log::StorageModification::InitAccount { .. }) - | Some(&write_log::StorageModification::Temp { .. }) => Ok(true), - Some(&write_log::StorageModification::Delete) => { - // the given key has been deleted - Ok(false) - } - None => { - // when not found in write log, try to check the storage - Ok(self.storage().has_key(key).into_storage_result()?.0) - } - } - } - - fn iter_prefix<'iter>( - &'iter self, - prefix: &storage::Key, - ) -> namada_storage::Result> { - let (iter, _gas) = - iter_prefix_post(self.write_log(), self.storage(), prefix); - Ok(iter) - } - - fn iter_next<'iter>( - &'iter self, - iter: &mut Self::PrefixIter<'iter>, - ) -> namada_storage::Result)>> { - Ok(iter.next().map(|(key, val, _gas)| (key, val))) - } - - fn get_chain_id( - &self, - ) -> std::result::Result { - Ok(self.storage().chain_id.to_string()) - } - - fn get_block_height( - &self, - ) -> std::result::Result { - Ok(self.storage().block.height) - } - - fn get_block_header( - &self, - height: storage::BlockHeight, - ) -> std::result::Result, namada_storage::Error> - { - self.storage() - .db - .read_block_header(height) - .into_storage_result() - } - - fn get_block_hash( - &self, - ) -> std::result::Result { - Ok(self.storage().block.hash.clone()) - } - - fn get_block_epoch( - &self, - ) -> std::result::Result { - Ok(self.storage().block.epoch) - } - - fn get_pred_epochs(&self) -> namada_storage::Result { - Ok(self.storage().block.pred_epochs.clone()) - } - - fn get_tx_index( - &self, - ) -> std::result::Result { - Ok(self.storage().tx_index) - } - - fn get_native_token(&self) -> namada_storage::Result
{ - Ok(self.storage().native_token.clone()) - } - } - - impl StorageWrite for $($type)* - where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, - { - // N.B. Calling this when testing pre- and post- reads in - // regards to testing native vps is incorrect. - fn write_bytes( - &mut self, - key: &storage::Key, - val: impl AsRef<[u8]>, - ) -> namada_storage::Result<()> { - let _ = self - .write_log_mut() - .protocol_write(key, val.as_ref().to_vec()) - .into_storage_result(); - Ok(()) - } - - fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { - let _ = self - .write_log_mut() - .protocol_delete(key) - .into_storage_result(); - Ok(()) - } - } - }; -} -impl_storage_traits!(WlStorage); -impl_storage_traits!(TempWlStorage<'_, D, H>); - -impl WithConversionState for WlStorage -where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, -{ - fn conversion_state(&self) -> &ConversionState { - &self.storage.conversion_state - } - - fn conversion_state_mut(&mut self) -> &mut ConversionState { - &mut self.storage.conversion_state - } -} - -#[cfg(test)] -mod tests { - use std::collections::BTreeMap; - - use namada_core::address::InternalAddress; - use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; - use namada_core::storage::DbKeySeg; - use proptest::prelude::*; - use proptest::test_runner::Config; - // Use `RUST_LOG=info` (or another tracing level) and `--nocapture` to - // see `tracing` logs from tests - use test_log::test; - - use super::*; - use crate::testing::TestWlStorage; - - proptest! { - // Generate arb valid input for `test_prefix_iters_aux` - #![proptest_config(Config { - cases: 10, - .. Config::default() - })] - #[test] - fn test_prefix_iters( - key_vals in arb_key_vals(30), - ) { - test_prefix_iters_aux(key_vals) - } - } - - /// Check the `prefix_iter_pre` and `prefix_iter_post` return expected - /// values, generated in the input to this function - fn test_prefix_iters_aux(kvs: Vec>) { - let mut s = TestWlStorage::default(); - - // Partition the tx and storage kvs - let (tx_kvs, rest): (Vec<_>, Vec<_>) = kvs - .into_iter() - .partition(|(_key, val)| matches!(val, Level::TxWriteLog(_))); - // Partition the kvs to only apply block level first - let (block_kvs, storage_kvs): (Vec<_>, Vec<_>) = rest - .into_iter() - .partition(|(_key, val)| matches!(val, Level::BlockWriteLog(_))); - - // Apply the kvs in order of the levels - apply_to_wl_storage(&mut s, &storage_kvs); - apply_to_wl_storage(&mut s, &block_kvs); - apply_to_wl_storage(&mut s, &tx_kvs); - - // Collect the expected values in prior state - storage level then block - let mut expected_pre = BTreeMap::new(); - for (key, val) in storage_kvs { - if let Level::Storage(val) = val { - expected_pre.insert(key, val); - } - } - for (key, val) in &block_kvs { - if let Level::BlockWriteLog(WlMod::Write(val)) = val { - expected_pre.insert(key.clone(), *val); - } - } - for (key, val) in &block_kvs { - // Deletes have to be applied last - if let Level::BlockWriteLog(WlMod::Delete) = val { - expected_pre.remove(key); - } else if let Level::BlockWriteLog(WlMod::DeletePrefix) = val { - expected_pre.retain(|expected_key, _val| { - // Remove matching prefixes except for VPs - expected_key.is_validity_predicate().is_some() - || expected_key.split_prefix(key).is_none() - }) - } - } - - // Collect the values from prior state prefix iterator - let (iter_pre, _gas) = - iter_prefix_pre(&s.write_log, &s.storage, &storage::Key::default()); - let mut read_pre = BTreeMap::new(); - for (key, val, _gas) in iter_pre { - let key = storage::Key::parse(key).unwrap(); - let val: i8 = BorshDeserialize::try_from_slice(&val).unwrap(); - read_pre.insert(key, val); - } - - // A helper for dbg - let keys_to_string = |kvs: &BTreeMap| { - kvs.iter() - .map(|(key, val)| (key.to_string(), *val)) - .collect::>() - }; - dbg!(keys_to_string(&expected_pre), keys_to_string(&read_pre)); - // Clone the prior expected kvs for posterior state check - let mut expected_post = expected_pre.clone(); - itertools::assert_equal(expected_pre, read_pre); - - // Collect the expected values in posterior state - all the levels - for (key, val) in &tx_kvs { - if let Level::TxWriteLog(WlMod::Write(val)) = val { - expected_post.insert(key.clone(), *val); - } - } - for (key, val) in &tx_kvs { - // Deletes have to be applied last - if let Level::TxWriteLog(WlMod::Delete) = val { - expected_post.remove(key); - } else if let Level::TxWriteLog(WlMod::DeletePrefix) = val { - expected_post.retain(|expected_key, _val| { - // Remove matching prefixes except for VPs - expected_key.is_validity_predicate().is_some() - || expected_key.split_prefix(key).is_none() - }) - } - } - - // Collect the values from posterior state prefix iterator - let (iter_post, _gas) = iter_prefix_post( - &s.write_log, - &s.storage, - &storage::Key::default(), - ); - let mut read_post = BTreeMap::new(); - for (key, val, _gas) in iter_post { - let key = storage::Key::parse(key).unwrap(); - let val: i8 = BorshDeserialize::try_from_slice(&val).unwrap(); - read_post.insert(key, val); - } - dbg!(keys_to_string(&expected_post), keys_to_string(&read_post)); - itertools::assert_equal(expected_post, read_post); - } - - fn apply_to_wl_storage(s: &mut TestWlStorage, kvs: &[KeyVal]) { - // Apply writes first - for (key, val) in kvs { - match val { - Level::TxWriteLog(WlMod::Delete | WlMod::DeletePrefix) - | Level::BlockWriteLog(WlMod::Delete | WlMod::DeletePrefix) => { - } - Level::TxWriteLog(WlMod::Write(val)) => { - s.write_log.write(key, val.serialize_to_vec()).unwrap(); - } - Level::BlockWriteLog(WlMod::Write(val)) => { - s.write_log - // protocol only writes at block level - .protocol_write(key, val.serialize_to_vec()) - .unwrap(); - } - Level::Storage(val) => { - s.storage.write(key, val.serialize_to_vec()).unwrap(); - } - } - } - // Then apply deletions - for (key, val) in kvs { - match val { - Level::TxWriteLog(WlMod::Delete) => { - s.write_log.delete(key).unwrap(); - } - Level::BlockWriteLog(WlMod::Delete) => { - s.delete(key).unwrap(); - } - Level::TxWriteLog(WlMod::DeletePrefix) => { - // Find keys matching the prefix - let keys = namada_storage::iter_prefix_bytes(s, key) - .unwrap() - .map(|res| { - let (key, _val) = res.unwrap(); - key - }) - .collect::>(); - // Delete the matching keys - for key in keys { - // Skip validity predicates which cannot be deleted - if key.is_validity_predicate().is_none() { - s.write_log.delete(&key).unwrap(); - } - } - } - Level::BlockWriteLog(WlMod::DeletePrefix) => { - s.delete_prefix(key).unwrap(); - } - _ => {} - } - } - } - - /// WlStorage key written in the write log or storage - type KeyVal = (storage::Key, Level); - - /// WlStorage write level - #[derive(Clone, Copy, Debug)] - enum Level { - TxWriteLog(WlMod), - BlockWriteLog(WlMod), - Storage(VAL), - } - - /// Write log modification - #[derive(Clone, Copy, Debug)] - enum WlMod { - Write(VAL), - Delete, - DeletePrefix, - } - - fn arb_key_vals(len: usize) -> impl Strategy>> { - // Start with some arb. storage key-vals - let storage_kvs = prop::collection::vec( - (storage::testing::arb_key(), any::()), - 1..len, - ) - .prop_map(|kvs| { - kvs.into_iter() - .filter_map(|(key, val)| { - if let DbKeySeg::AddressSeg(Address::Internal( - InternalAddress::EthBridgePool, - )) = key.segments[0] - { - None - } else { - Some((key, Level::Storage(val))) - } - }) - .collect::>() - }); - - // Select some indices to override in write log - let overrides = prop::collection::vec( - (any::(), any::(), any::()), - 1..len / 2, - ); - - // Select some indices to delete - let deletes = prop::collection::vec( - (any::(), any::()), - 1..len / 3, - ); - - // Select some indices to delete prefix - let delete_prefix = prop::collection::vec( - ( - any::(), - any::(), - // An arbitrary number of key segments to drop from a selected - // key to obtain the prefix. Because `arb_key` generates `2..5` - // segments, we can drop one less of its upper bound. - (2_usize..4), - ), - 1..len / 4, - ); - - // Combine them all together - (storage_kvs, overrides, deletes, delete_prefix).prop_map( - |(mut kvs, overrides, deletes, delete_prefix)| { - for (ix, val, is_tx) in overrides { - let (key, _) = ix.get(&kvs); - let wl_mod = WlMod::Write(val); - let lvl = if is_tx { - Level::TxWriteLog(wl_mod) - } else { - Level::BlockWriteLog(wl_mod) - }; - kvs.push((key.clone(), lvl)); - } - for (ix, is_tx) in deletes { - let (key, _) = ix.get(&kvs); - // We have to skip validity predicate keys as they cannot be - // deleted - if key.is_validity_predicate().is_some() { - continue; - } - let wl_mod = WlMod::Delete; - let lvl = if is_tx { - Level::TxWriteLog(wl_mod) - } else { - Level::BlockWriteLog(wl_mod) - }; - kvs.push((key.clone(), lvl)); - } - for (ix, is_tx, num_of_seg_to_drop) in delete_prefix { - let (key, _) = ix.get(&kvs); - let wl_mod = WlMod::DeletePrefix; - let lvl = if is_tx { - Level::TxWriteLog(wl_mod) - } else { - Level::BlockWriteLog(wl_mod) - }; - // Keep at least one segment - let num_of_seg_to_keep = std::cmp::max( - 1, - key.segments - .len() - .checked_sub(num_of_seg_to_drop) - .unwrap_or_default(), - ); - let prefix = storage::Key { - segments: key - .segments - .iter() - .take(num_of_seg_to_keep) - .cloned() - .collect(), - }; - kvs.push((prefix, lvl)); - } - kvs - }, - ) - } -} diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 0261d3e288..be4d2a33a4 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -5,19 +5,16 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use itertools::Itertools; use namada_core::address::{Address, EstablishedAddressGen, InternalAddress}; -use namada_core::hash::{Hash, StorageHasher}; +use namada_core::hash::Hash; use namada_core::ibc::IbcEvent; use namada_core::storage; use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE}; -use namada_replay_protection as replay_protection; use namada_trans_token::storage_key::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, is_any_token_parameter_key, }; use thiserror::Error; -use crate::{DBIter, State, DB}; - #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -68,7 +65,7 @@ pub enum StorageModification { #[derive(Debug, Clone, PartialEq, Eq)] /// A replay protection storage modification -enum ReProtStorageModification { +pub(crate) enum ReProtStorageModification { /// Write an entry Write, /// Delete an entry @@ -81,12 +78,12 @@ enum ReProtStorageModification { #[derive(Debug, Clone, PartialEq, Eq)] pub struct WriteLog { /// The generator of established addresses - address_gen: Option, + pub(crate) address_gen: Option, /// All the storage modification accepted by validity predicates are stored /// in block write-log, before being committed to the storage - block_write_log: HashMap, + pub(crate) block_write_log: HashMap, /// The storage modifications for the current transaction - tx_write_log: HashMap, + pub(crate) tx_write_log: HashMap, /// A precommit bucket for the `tx_write_log`. This is useful for /// validation when a clean `tx_write_log` is needed without committing any /// modification already in there. These modifications can be temporarily @@ -95,12 +92,13 @@ pub struct WriteLog { /// write/update/delete should ever happen on this field, this log should /// only be populated through a dump of the `tx_write_log` and should be /// cleaned either when committing or dumping the `tx_write_log` - tx_precommit_write_log: HashMap, + pub(crate) tx_precommit_write_log: + HashMap, /// The IBC events for the current transaction - ibc_events: BTreeSet, + pub(crate) ibc_events: BTreeSet, /// Storage modifications for the replay protection storage, always /// committed regardless of the result of the transaction - replay_protection: HashMap, + pub(crate) replay_protection: HashMap, } /// Write log prefix iterator @@ -491,83 +489,6 @@ impl WriteLog { self.tx_write_log.clear(); } - /// Commit the current block's write log to the storage. Starts a new block - /// write log. - pub fn commit_block( - &mut self, - storage: &mut State, - batch: &mut D::WriteBatch, - ) -> Result<()> - where - D: 'static + DB + for<'iter> DBIter<'iter>, - H: StorageHasher, - { - for (key, entry) in self.block_write_log.iter() { - match entry { - StorageModification::Write { value } => { - storage - .batch_write_subspace_val(batch, key, value.clone()) - .map_err(Error::StorageError)?; - } - StorageModification::Delete => { - storage - .batch_delete_subspace_val(batch, key) - .map_err(Error::StorageError)?; - } - StorageModification::InitAccount { vp_code_hash } => { - storage - .batch_write_subspace_val(batch, key, *vp_code_hash) - .map_err(Error::StorageError)?; - } - // temporary value isn't persisted - StorageModification::Temp { .. } => {} - } - } - - // Replay protections specifically - for (hash, entry) in self.replay_protection.iter() { - match entry { - ReProtStorageModification::Write => storage - .write_replay_protection_entry( - batch, - // Can only write tx hashes to the previous block, no - // further - &replay_protection::last_key(hash), - ) - .map_err(Error::StorageError)?, - ReProtStorageModification::Delete => storage - .delete_replay_protection_entry( - batch, - // Can only delete tx hashes from the previous block, - // no further - &replay_protection::last_key(hash), - ) - .map_err(Error::StorageError)?, - ReProtStorageModification::Finalize => { - storage - .write_replay_protection_entry( - batch, - &replay_protection::all_key(hash), - ) - .map_err(Error::StorageError)?; - storage - .delete_replay_protection_entry( - batch, - &replay_protection::last_key(hash), - ) - .map_err(Error::StorageError)? - } - } - } - - if let Some(address_gen) = self.address_gen.take() { - storage.address_gen = address_gen - } - self.block_write_log.clear(); - self.replay_protection.clear(); - Ok(()) - } - /// Get the verifiers set whose validity predicates should validate the /// current transaction changes and the storage keys that have been /// modified created, updated and deleted via the write log. @@ -669,7 +590,7 @@ impl WriteLog { } /// Write the transaction hash - pub(crate) fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { + pub fn write_tx_hash(&mut self, hash: Hash) -> Result<()> { if self .replay_protection .insert(hash, ReProtStorageModification::Write) @@ -686,7 +607,7 @@ impl WriteLog { } /// Remove the transaction hash - pub(crate) fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { + pub fn delete_tx_hash(&mut self, hash: Hash) -> Result<()> { match self .replay_protection .insert(hash, ReProtStorageModification::Delete) @@ -735,6 +656,7 @@ mod tests { use proptest::prelude::*; use super::*; + use crate::StateRead; #[test] fn test_crud_value() { @@ -895,9 +817,7 @@ mod tests { #[test] fn test_commit() { - let mut storage = crate::testing::TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut batch = crate::testing::TestStorage::batch(); + let mut state = crate::testing::TestState::default(); let address_gen = EstablishedAddressGen::new("test"); let key1 = @@ -911,134 +831,132 @@ mod tests { // initialize an account let vp1 = Hash::sha256("vp1".as_bytes()); - let (addr1, _) = write_log.init_account(&address_gen, vp1); - write_log.commit_tx(); + let (addr1, _) = state.write_log.init_account(&address_gen, vp1); + state.write_log.commit_tx(); // write values let val1 = "val1".as_bytes().to_vec(); - write_log.write(&key1, val1.clone()).unwrap(); - write_log.write(&key2, val1.clone()).unwrap(); - write_log.write(&key3, val1.clone()).unwrap(); - write_log.write_temp(&key4, val1.clone()).unwrap(); - write_log.commit_tx(); + state.write_log.write(&key1, val1.clone()).unwrap(); + state.write_log.write(&key2, val1.clone()).unwrap(); + state.write_log.write(&key3, val1.clone()).unwrap(); + state.write_log.write_temp(&key4, val1.clone()).unwrap(); + state.write_log.commit_tx(); // these values are not written due to drop_tx let val2 = "val2".as_bytes().to_vec(); - write_log.write(&key1, val2.clone()).unwrap(); - write_log.write(&key2, val2.clone()).unwrap(); - write_log.write(&key3, val2).unwrap(); - write_log.drop_tx(); + state.write_log.write(&key1, val2.clone()).unwrap(); + state.write_log.write(&key2, val2.clone()).unwrap(); + state.write_log.write(&key3, val2).unwrap(); + state.write_log.drop_tx(); // deletes and updates values let val3 = "val3".as_bytes().to_vec(); - write_log.delete(&key2).unwrap(); - write_log.write(&key3, val3.clone()).unwrap(); - write_log.commit_tx(); + state.write_log.delete(&key2).unwrap(); + state.write_log.write(&key3, val3.clone()).unwrap(); + state.write_log.commit_tx(); // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); let (vp_code_hash, _gas) = - storage.validity_predicate(&addr1).expect("vp read failed"); + state.validity_predicate(&addr1).expect("vp read failed"); assert_eq!(vp_code_hash, Some(vp1)); - let (value, _) = storage.read(&key1).expect("read failed"); + let (value, _) = state.db_read(&key1).expect("read failed"); assert_eq!(value.expect("no read value"), val1); - let (value, _) = storage.read(&key2).expect("read failed"); + let (value, _) = state.db_read(&key2).expect("read failed"); assert!(value.is_none()); - let (value, _) = storage.read(&key3).expect("read failed"); + let (value, _) = state.db_read(&key3).expect("read failed"); assert_eq!(value.expect("no read value"), val3); - let (value, _) = storage.read(&key4).expect("read failed"); + let (value, _) = state.db_read(&key4).expect("read failed"); assert_eq!(value, None); } #[test] fn test_replay_protection_commit() { - let mut storage = crate::testing::TestStorage::default(); - let mut write_log = WriteLog::default(); - let mut batch = crate::testing::TestStorage::batch(); + let mut state = crate::testing::TestState::default(); - // write some replay protection keys - write_log - .write_tx_hash(Hash::sha256("tx1".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx2".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx3".as_bytes())) - .unwrap(); + { + let write_log = state.write_log_mut(); + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx1".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx2".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx3".as_bytes())) + .unwrap(); + } // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); - assert!(write_log.replay_protection.is_empty()); + assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx1", "tx2", "tx3"] { + let hash = Hash::sha256(tx.as_bytes()); assert!( - storage - .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) + state + .has_replay_protection_entry(&hash) .expect("read failed") ); } - // write some replay protection keys - write_log - .write_tx_hash(Hash::sha256("tx4".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx5".as_bytes())) - .unwrap(); - write_log - .write_tx_hash(Hash::sha256("tx6".as_bytes())) - .unwrap(); - - // delete previous hash - write_log - .delete_tx_hash(Hash::sha256("tx1".as_bytes())) - .unwrap(); + { + let write_log = state.write_log_mut(); + // write some replay protection keys + write_log + .write_tx_hash(Hash::sha256("tx4".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx5".as_bytes())) + .unwrap(); + write_log + .write_tx_hash(Hash::sha256("tx6".as_bytes())) + .unwrap(); - // finalize previous hashes - for tx in ["tx2", "tx3"] { + // delete previous hash write_log - .finalize_tx_hash(Hash::sha256(tx.as_bytes())) + .delete_tx_hash(Hash::sha256("tx1".as_bytes())) .unwrap(); + + // finalize previous hashes + for tx in ["tx2", "tx3"] { + write_log + .finalize_tx_hash(Hash::sha256(tx.as_bytes())) + .unwrap(); + } } // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); - assert!(write_log.replay_protection.is_empty()); + assert!(state.write_log.replay_protection.is_empty()); for tx in ["tx2", "tx3", "tx4", "tx5", "tx6"] { assert!( - storage + state .has_replay_protection_entry(&Hash::sha256(tx.as_bytes())) .expect("read failed") ); } assert!( - !storage + !state .has_replay_protection_entry(&Hash::sha256("tx1".as_bytes())) .expect("read failed") ); // try to delete finalized hash which shouldn't work - write_log + state + .write_log .delete_tx_hash(Hash::sha256("tx2".as_bytes())) .unwrap(); // commit a block - write_log - .commit_block(&mut storage, &mut batch) - .expect("commit failed"); + state.commit_block().expect("commit failed"); - assert!(write_log.replay_protection.is_empty()); + assert!(state.write_log.replay_protection.is_empty()); assert!( - storage + state .has_replay_protection_entry(&Hash::sha256("tx2".as_bytes())) .expect("read failed") ); diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index 6846895030..e6304ad1b5 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -1466,9 +1466,18 @@ fn dynamic_assets() -> Result<()> { let tokens = { // Only distribute rewards for NAM tokens - let storage = &mut node.shell.lock().unwrap().wl_storage.storage; - let tokens = storage.conversion_state.tokens.clone(); - storage.conversion_state.tokens.retain(|k, _v| *k == nam); + let state = &mut node.shell.lock().unwrap().state; + let tokens = state.in_mem().conversion_state.tokens.clone(); + state + .in_mem_mut() + .conversion_state + .tokens + .insert(btc.clone(), tokens[&btc].clone()); + state + .in_mem_mut() + .conversion_state + .tokens + .retain(|k, _v| *k == nam); tokens }; // Wait till epoch boundary @@ -1534,8 +1543,9 @@ fn dynamic_assets() -> Result<()> { { // Start decoding and distributing shielded rewards for BTC in next // epoch - let storage = &mut node.shell.lock().unwrap().wl_storage.storage; - storage + let state = &mut node.shell.lock().unwrap().state; + state + .in_mem_mut() .conversion_state .tokens .insert(btc.clone(), tokens[&btc].clone()); @@ -1684,7 +1694,7 @@ fn dynamic_assets() -> Result<()> { { // Stop distributing shielded rewards for NAM in next epoch - let storage = &mut node.shell.lock().unwrap().wl_storage; + let storage = &mut node.shell.lock().unwrap().state; storage .write( &token::storage_key::masp_max_reward_rate_key(&tokens[&nam]), @@ -1736,8 +1746,8 @@ fn dynamic_assets() -> Result<()> { { // Stop decoding and distributing shielded rewards for BTC in next epoch - let storage = &mut node.shell.lock().unwrap().wl_storage.storage; - storage.conversion_state.tokens.remove(&btc); + let state = &mut node.shell.lock().unwrap().state; + state.in_mem_mut().conversion_state.tokens.remove(&btc); } // Wait till epoch boundary @@ -1824,7 +1834,7 @@ fn dynamic_assets() -> Result<()> { { // Start distributing shielded rewards for NAM in next epoch - let storage = &mut node.shell.lock().unwrap().wl_storage; + let storage = &mut node.shell.lock().unwrap().state; storage .write( &token::storage_key::masp_max_reward_rate_key(&tokens[&nam]), diff --git a/crates/tests/src/integration/setup.rs b/crates/tests/src/integration/setup.rs index da21de8dda..e8aa6b4ee7 100644 --- a/crates/tests/src/integration/setup.rs +++ b/crates/tests/src/integration/setup.rs @@ -243,7 +243,7 @@ fn create_node( .init_chain(init_req, 1) .map_err(|e| eyre!("Failed to initialize ledger: {:?}", e))?; // set the height of the first block (should be 1) - locked.wl_storage.storage.block.height = 1.into(); + locked.state.in_mem_mut().block.height = 1.into(); locked.commit(); } diff --git a/crates/tests/src/native_vp/eth_bridge_pool.rs b/crates/tests/src/native_vp/eth_bridge_pool.rs index 93b5ea35ed..6a2505b171 100644 --- a/crates/tests/src/native_vp/eth_bridge_pool.rs +++ b/crates/tests/src/native_vp/eth_bridge_pool.rs @@ -1,5 +1,6 @@ #[cfg(test)] mod test_bridge_pool_vp { + use std::cell::RefCell; use std::path::PathBuf; use borsh::BorshDeserialize; @@ -13,10 +14,12 @@ mod test_bridge_pool_vp { use namada::core::key::{common, ed25519, SecretKey}; use namada::core::token::Amount; use namada::eth_bridge::storage::bridge_pool::BRIDGE_POOL_ADDRESS; + use namada::gas::VpGasMeter; use namada::ledger::native_vp::ethereum_bridge::bridge_pool_vp::BridgePoolVp; use namada::tx::Tx; use namada_apps::wallet::defaults::{albert_address, bertha_address}; use namada_apps::wasm_loader; + use namada_core::validity_predicate::VpSentinel; use namada_sdk::eth_bridge::{ wrapped_erc20s, Contracts, Erc20WhitelistEntry, EthereumBridgeParams, UpgradeableContract, @@ -82,7 +85,7 @@ mod test_bridge_pool_vp { }, }; // initialize Ethereum bridge storage - config.init_storage(&mut env.wl_storage); + config.init_storage(&mut env.state); // initialize Bertha's account env.spawn_accounts([&albert_address(), &bertha_address(), &nam()]); // enrich Albert @@ -111,9 +114,13 @@ mod test_bridge_pool_vp { tx_host_env::set(env); let mut tx_env = tx_host_env::take(); tx_env.execute_tx().expect("Test failed."); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, BRIDGE_POOL_ADDRESS); vp_env - .validate_tx(|ctx| BridgePoolVp { ctx }) + .validate_tx(&gas_meter, &sentinel, |ctx| BridgePoolVp { ctx }) .expect("Test failed") } diff --git a/crates/tests/src/native_vp/mod.rs b/crates/tests/src/native_vp/mod.rs index 75dd2344df..375e7af7fa 100644 --- a/crates/tests/src/native_vp/mod.rs +++ b/crates/tests/src/native_vp/mod.rs @@ -8,13 +8,13 @@ use namada::core::address::Address; use namada::core::storage; use namada::ledger::gas::VpGasMeter; use namada::ledger::native_vp::{Ctx, NativeVp}; -use namada::state::mockdb::MockDB; -use namada::state::Sha256Hasher; +use namada::state::testing::TestState; use namada::vm::WasmCacheRwAccess; +use namada_core::validity_predicate::VpSentinel; use crate::tx::TestTxEnv; -type NativeVpCtx<'a> = Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>; +type NativeVpCtx<'a> = Ctx<'a, TestState, WasmCacheRwAccess>; #[derive(Debug)] pub struct TestNativeVpEnv { @@ -44,26 +44,24 @@ impl TestNativeVpEnv { /// Run some transaction code `apply_tx` and validate it with a native VP pub fn validate_tx<'a, T>( &'a self, + gas_meter: &'a RefCell, + sentinel: &'a RefCell, init_native_vp: impl Fn(NativeVpCtx<'a>) -> T, ) -> Result::Error> where T: NativeVp, { - let ctx = Ctx { - iterators: Default::default(), - gas_meter: RefCell::new(VpGasMeter::new_from_tx_meter( - &self.tx_env.gas_meter, - )), - sentinel: Default::default(), - storage: &self.tx_env.wl_storage.storage, - write_log: &self.tx_env.wl_storage.write_log, - tx: &self.tx_env.tx, - tx_index: &self.tx_env.tx_index, - vp_wasm_cache: self.tx_env.vp_wasm_cache.clone(), - address: &self.address, - keys_changed: &self.keys_changed, - verifiers: &self.verifiers, - }; + let ctx = Ctx::new( + &self.address, + &self.tx_env.state, + &self.tx_env.tx, + &self.tx_env.tx_index, + gas_meter, + sentinel, + &self.keys_changed, + &self.verifiers, + self.tx_env.vp_wasm_cache.clone(), + ); let native_vp = init_native_vp(ctx); native_vp.validate_tx( diff --git a/crates/tests/src/native_vp/pos.rs b/crates/tests/src/native_vp/pos.rs index 73e2eb7315..c9f04020f2 100644 --- a/crates/tests/src/native_vp/pos.rs +++ b/crates/tests/src/native_vp/pos.rs @@ -114,7 +114,7 @@ pub fn init_pos( tx_host_env::with(|tx_env| { // Ensure that all the used // addresses exist - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); tx_env.spawn_accounts([&native_token]); for validator in genesis_validators { tx_env.spawn_accounts([&validator.address]); @@ -124,14 +124,14 @@ pub fn init_pos( 1, ) } - tx_env.wl_storage.storage.block.epoch = start_epoch; + tx_env.state.in_mem_mut().block.epoch = start_epoch; // Initialize PoS storage // tx_env - // .storage + // .state // .init_genesis(params, genesis_validators.iter(), start_epoch) // .unwrap(); let params = init_genesis( - &mut tx_env.wl_storage, + &mut tx_env.state, params.clone(), genesis_validators.iter().cloned(), start_epoch, @@ -147,11 +147,15 @@ pub fn init_pos( #[cfg(test)] mod tests { + use std::cell::RefCell; + use namada::core::address; use namada::core::key::common::PublicKey; use namada::core::storage::Epoch; + use namada::gas::VpGasMeter; use namada::ledger::pos::{PosParams, PosVP}; use namada::token; + use namada_core::validity_predicate::VpSentinel; use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; use namada_tx_prelude::Address; use proptest::prelude::*; @@ -267,7 +271,7 @@ mod tests { if !test_state.is_current_tx_valid { // Clear out the changes tx_host_env::with(|env| { - env.wl_storage.drop_tx(); + env.state.drop_tx(); }); } @@ -281,13 +285,13 @@ mod tests { tx_host_env::with(|env| { // Clear out the changes if !test_state.is_current_tx_valid { - env.wl_storage.drop_tx(); + env.state.drop_tx(); } // Also commit the last transaction(s) changes, if any env.commit_tx_and_block(); - env.wl_storage.storage.block.epoch = - env.wl_storage.storage.block.epoch.next(); + env.state.in_mem_mut().block.epoch = + env.state.in_mem().block.epoch.next(); }); // Starting a new tx @@ -317,7 +321,7 @@ mod tests { // Clear out the invalid changes tx_host_env::with(|env| { - env.wl_storage.drop_tx(); + env.state.drop_tx(); }) } } @@ -435,8 +439,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); // Put the tx_env back before checking the result tx_host_env::set(vp_env.tx_env); @@ -569,6 +577,8 @@ mod tests { #[cfg(any(test, feature = "testing"))] pub mod testing { + use std::cell::RefCell; + use derivative::Derivative; use itertools::Either; use namada::core::dec::Dec; @@ -858,9 +868,10 @@ pub mod testing { let current_epoch = tx_host_env::with(|env| { // Reset the gas meter on each change, so that we never run // out in this test + let gas_limit = env.gas_meter.borrow().tx_gas_limit; env.gas_meter = - TxGasMeter::new_from_sub_limit(env.gas_meter.tx_gas_limit); - env.wl_storage.storage.block.epoch + RefCell::new(TxGasMeter::new_from_sub_limit(gas_limit)); + env.state.in_mem().block.epoch }); println!("Current epoch {}", current_epoch); diff --git a/crates/tests/src/storage_api/collections/lazy_map.rs b/crates/tests/src/storage_api/collections/lazy_map.rs index 91ef7a1fb8..708138ff6f 100644 --- a/crates/tests/src/storage_api/collections/lazy_map.rs +++ b/crates/tests/src/storage_api/collections/lazy_map.rs @@ -243,7 +243,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_set.rs b/crates/tests/src/storage_api/collections/lazy_set.rs index 3817ac90a5..5a943401b0 100644 --- a/crates/tests/src/storage_api/collections/lazy_set.rs +++ b/crates/tests/src/storage_api/collections/lazy_set.rs @@ -232,7 +232,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/lazy_vec.rs b/crates/tests/src/storage_api/collections/lazy_vec.rs index 523204627d..d11c5c0543 100644 --- a/crates/tests/src/storage_api/collections/lazy_vec.rs +++ b/crates/tests/src/storage_api/collections/lazy_vec.rs @@ -237,7 +237,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/storage_api/collections/nested_lazy_map.rs b/crates/tests/src/storage_api/collections/nested_lazy_map.rs index b247cbed58..393d922726 100644 --- a/crates/tests/src/storage_api/collections/nested_lazy_map.rs +++ b/crates/tests/src/storage_api/collections/nested_lazy_map.rs @@ -256,7 +256,7 @@ mod tests { match &transition { Transition::CommitTx => { // commit the tx without committing the block - tx_host_env::with(|env| env.wl_storage.commit_tx()); + tx_host_env::with(|env| env.state.commit_tx()); } Transition::CommitTxAndBlock => { // commit the tx and the block diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index 0cf6910ba1..bf5ed4dabe 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -1,4 +1,5 @@ use core::time::Duration; +use std::cell::RefCell; use std::collections::HashMap; use ibc_testkit::testapp::ibc::clients::mock::client_state::{ @@ -73,15 +74,16 @@ use namada::ledger::parameters::storage::{ get_epoch_duration_storage_key, get_max_expected_time_per_block_key, }; use namada::ledger::parameters::EpochDuration; -use namada::ledger::storage::mockdb::MockDB; use namada::ledger::tx_env::TxEnv; use namada::ledger::{ibc, pos}; use namada::proof_of_stake::OwnedPosParams; -use namada::state::Sha256Hasher; +use namada::state::testing::TestState; use namada::tendermint::time::Time as TmTime; use namada::token::{self, Amount, DenominatedAmount}; use namada::tx::Tx; use namada::vm::{wasm, WasmCacheRwAccess}; +use namada_core::validity_predicate::VpSentinel; +use namada_sdk::state::StateRead; use namada_test_utils::TestWasms; use namada_tx_prelude::BorshSerializeExt; @@ -92,7 +94,7 @@ pub const ANY_DENOMINATION: u8 = 4; const COMMITMENT_PREFIX: &[u8] = b"ibc"; pub struct TestIbcVp<'a> { - pub ibc: Ibc<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>, + pub ibc: Ibc<'a, TestState, WasmCacheRwAccess>, } impl<'a> TestIbcVp<'a> { @@ -109,8 +111,7 @@ impl<'a> TestIbcVp<'a> { } pub struct TestMultitokenVp<'a> { - pub multitoken_vp: - MultitokenVp<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>, + pub multitoken_vp: MultitokenVp<'a, TestState, WasmCacheRwAccess>, } impl<'a> TestMultitokenVp<'a> { @@ -132,8 +133,8 @@ pub fn validate_ibc_vp_from_tx<'a>( tx: &'a Tx, ) -> std::result::Result { let (verifiers, keys_changed) = tx_env - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&tx_env.verifiers); let addr = Address::Internal(InternalAddress::Ibc); if !verifiers.contains(&addr) { @@ -145,15 +146,17 @@ pub fn validate_ibc_vp_from_tx<'a>( let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(1_000_000.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &tx_env.wl_storage.storage, - &tx_env.wl_storage.write_log, + &tx_env.state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - 1_000_000.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -170,8 +173,8 @@ pub fn validate_multitoken_vp_from_tx<'a>( target: &Key, ) -> std::result::Result { let (verifiers, keys_changed) = tx_env - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(&tx_env.verifiers); if !keys_changed.contains(target) { panic!( @@ -183,15 +186,17 @@ pub fn validate_multitoken_vp_from_tx<'a>( let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &TxGasMeter::new_from_sub_limit(1_000_000.into()), + )); + let sentinel = RefCell::new(VpSentinel::default()); let ctx = Ctx::new( &ADDRESS, - &tx_env.wl_storage.storage, - &tx_env.wl_storage.write_log, + &tx_env.state, tx, &TxIndex(0), - VpGasMeter::new_from_tx_meter(&TxGasMeter::new_from_sub_limit( - 1_000_000.into(), - )), + &gas_meter, + &sentinel, &keys_changed, &verifiers, vp_wasm_cache, @@ -208,11 +213,11 @@ pub fn init_storage() -> (Address, Address) { let code_hash = Hash::sha256(&code); tx_host_env::with(|env| { - ibc::init_genesis_storage(&mut env.wl_storage); + ibc::init_genesis_storage(&mut env.state); let gov_params = GovernanceParameters::default(); - gov_params.init_storage(&mut env.wl_storage).unwrap(); + gov_params.init_storage(&mut env.state).unwrap(); pos::test_utils::test_init_genesis( - &mut env.wl_storage, + &mut env.state, OwnedPosParams::default(), vec![get_dummy_genesis_validator()].into_iter(), Epoch(1), @@ -220,15 +225,15 @@ pub fn init_storage() -> (Address, Address) { .unwrap(); // store wasm code let key = Key::wasm_code(&code_hash); - env.wl_storage.storage.write(&key, code.clone()).unwrap(); + env.state.db_write(&key, code.clone()).unwrap(); // block header to check timeout timestamp - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(1)) .unwrap(); }); @@ -242,13 +247,11 @@ pub fn init_storage() -> (Address, Address) { let key = token::storage_key::balance_key(&token, &account); let init_bal = Amount::from_uint(100, token_denom).unwrap(); tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&denom_key, &token_denom.serialize_to_vec()) + env.state + .db_write(&denom_key, &token_denom.serialize_to_vec()) .unwrap(); - env.wl_storage - .storage - .write(&key, &init_bal.serialize_to_vec()) + env.state + .db_write(&key, &init_bal.serialize_to_vec()) .unwrap(); }); @@ -260,7 +263,7 @@ pub fn init_storage() -> (Address, Address) { }; let bytes = epoch_duration.serialize_to_vec(); tx_host_env::with(|env| { - env.wl_storage.storage.write(&key, &bytes).unwrap(); + env.state.db_write(&key, &bytes).unwrap(); }); // max_expected_time_per_block @@ -268,21 +271,21 @@ pub fn init_storage() -> (Address, Address) { let key = get_max_expected_time_per_block_key(); let bytes = namada::core::encode(&time); tx_host_env::with(|env| { - env.wl_storage.storage.write(&key, &bytes).unwrap(); + env.state.db_write(&key, &bytes).unwrap(); }); // commit the initialized token and account tx_host_env::with(|env| { - env.wl_storage.commit_tx(); - env.wl_storage.commit_block().unwrap(); + env.state.commit_tx(); + env.state.commit_block().unwrap(); // block header to check timeout timestamp - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); }); @@ -311,10 +314,7 @@ pub fn prepare_client() -> (ClientId, Any, HashMap>) { // client update time let key = client_update_timestamp_key(&client_id); let time = tx_host_env::with(|env| { - let header = env - .wl_storage - .storage - .get_block_header(None) + let header = StateRead::get_block_header(&env.state, None) .unwrap() .0 .unwrap(); @@ -325,7 +325,7 @@ pub fn prepare_client() -> (ClientId, Any, HashMap>) { // client update height let key = client_update_height_key(&client_id); let height = tx_host_env::with(|env| { - let height = env.wl_storage.storage.get_block_height().0; + let height = env.state.in_mem().get_block_height().0; Height::new(0, height.0).expect("invalid height") }); let bytes = height.encode_vec(); diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index 40981f3e5e..330de62167 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -168,7 +168,7 @@ mod tests { tx_host_env::with(|env| { for i in sub_keys.iter() { let key = prefix.push(i).unwrap(); - env.wl_storage.write(&key, i).unwrap(); + env.state.write(&key, i).unwrap(); } }); @@ -227,7 +227,7 @@ mod tests { tx_host_env::with(|env| { // store wasm code let key = Key::wasm_code(&code_hash); - env.wl_storage.storage.write(&key, code.clone()).unwrap(); + env.state.write_bytes(&key, &code).unwrap(); }); tx::ctx().init_account(code_hash, &None).unwrap(); } @@ -339,35 +339,23 @@ mod tests { assert_eq!( tx::ctx().get_chain_id().unwrap(), - tx_host_env::with(|env| env.wl_storage.storage.get_chain_id().0) + tx_host_env::with(|env| env.state.in_mem().get_chain_id().0) ); assert_eq!( tx::ctx().get_block_height().unwrap(), - tx_host_env::with(|env| env - .wl_storage - .storage - .get_block_height() - .0) + tx_host_env::with(|env| env.state.in_mem().get_block_height().0) ); assert_eq!( tx::ctx().get_block_hash().unwrap(), - tx_host_env::with(|env| env.wl_storage.storage.get_block_hash().0) + tx_host_env::with(|env| env.state.in_mem().get_block_hash().0) ); assert_eq!( tx::ctx().get_block_epoch().unwrap(), - tx_host_env::with(|env| env - .wl_storage - .storage - .get_current_epoch() - .0) + tx_host_env::with(|env| env.state.in_mem().get_current_epoch().0) ); assert_eq!( tx::ctx().get_native_token().unwrap(), - tx_host_env::with(|env| env - .wl_storage - .storage - .native_token - .clone()) + tx_host_env::with(|env| env.state.in_mem().native_token.clone()) ); } @@ -377,7 +365,8 @@ mod tests { tx_host_env::init(); let pred_epochs = tx::ctx().get_pred_epochs().unwrap(); - let expected = tx_host_env::take().wl_storage.storage.block.pred_epochs; + let expected = + tx_host_env::take().state.in_mem().block.pred_epochs.clone(); assert_eq!(expected, pred_epochs); } @@ -409,12 +398,9 @@ mod tests { let existing_key = addr_key.join(&Key::parse("existing_key_raw").unwrap()); let existing_value = vec![2_u8; 1000]; - tx_env - .wl_storage - .write(&existing_key, &existing_value) - .unwrap(); + tx_env.state.write(&existing_key, &existing_value).unwrap(); // ... and commit it - tx_env.wl_storage.commit_tx(); + tx_env.state.commit_tx(); // In a transaction, write override the existing key's value and add // another key-value @@ -497,10 +483,10 @@ mod tests { // Write some values to storage ... for i in sub_keys.iter() { let key = prefix.push(i).unwrap(); - tx_env.wl_storage.write(&key, i).unwrap(); + tx_env.state.write(&key, i).unwrap(); } // ... and commit them - tx_env.wl_storage.commit_tx(); + tx_env.state.commit_tx(); // In a transaction, write override the existing key's value and add // another key-value @@ -554,7 +540,7 @@ mod tests { let keypair = key::testing::keypair_1(); let pk = keypair.ref_to(); - let _ = pks_handle(&addr).insert(&mut env.wl_storage, 0_u8, pk.clone()); + let _ = pks_handle(&addr).insert(&mut env.state, 0_u8, pk.clone()); // Initialize the environment vp_host_env::set(env); @@ -571,7 +557,7 @@ mod tests { let keypairs = vec![keypair.clone()]; let pks_map = AccountPublicKeysMap::from_iter(vec![pk.clone()]); let signed_tx_data = vp_host_env::with(|env| { - let chain_id = env.wl_storage.storage.chain_id.clone(); + let chain_id = env.state.in_mem().chain_id.clone(); let mut tx = Tx::new(chain_id, expiration); tx.add_code(code.clone(), None) .add_serialized_data(data.to_vec()) @@ -619,35 +605,23 @@ mod tests { assert_eq!( vp::CTX.get_chain_id().unwrap(), - vp_host_env::with(|env| env.wl_storage.storage.get_chain_id().0) + vp_host_env::with(|env| env.state.in_mem().get_chain_id().0) ); assert_eq!( vp::CTX.get_block_height().unwrap(), - vp_host_env::with(|env| env - .wl_storage - .storage - .get_block_height() - .0) + vp_host_env::with(|env| env.state.in_mem().get_block_height().0) ); assert_eq!( vp::CTX.get_block_hash().unwrap(), - vp_host_env::with(|env| env.wl_storage.storage.get_block_hash().0) + vp_host_env::with(|env| env.state.in_mem().get_block_hash().0) ); assert_eq!( vp::CTX.get_block_epoch().unwrap(), - vp_host_env::with(|env| env - .wl_storage - .storage - .get_current_epoch() - .0) + vp_host_env::with(|env| env.state.in_mem().get_current_epoch().0) ); assert_eq!( vp::CTX.get_native_token().unwrap(), - vp_host_env::with(|env| env - .wl_storage - .storage - .native_token - .clone()) + vp_host_env::with(|env| env.state.in_mem().native_token.clone()) ); } @@ -681,11 +655,8 @@ mod tests { // store wasm codes let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - env.wl_storage.storage.write(&key, code.clone()).unwrap(); - env.wl_storage - .storage - .write(&len_key, code_len.clone()) - .unwrap(); + env.state.write_bytes(&key, &code).unwrap(); + env.state.write_bytes(&len_key, &code_len).unwrap(); }); let mut tx = Tx::new(ChainId::default(), None); tx.add_code_from_hash(code_hash, None) @@ -704,11 +675,8 @@ mod tests { // store wasm codes let key = Key::wasm_code(&code_hash); let len_key = Key::wasm_code_len(&code_hash); - env.wl_storage.storage.write(&key, code.clone()).unwrap(); - env.wl_storage - .storage - .write(&len_key, code_len.clone()) - .unwrap(); + env.state.write(&key, &code).unwrap(); + env.state.write(&len_key, &code_len).unwrap(); }); let mut tx = Tx::new(ChainId::default(), None); tx.add_code_from_hash(code_hash, None) @@ -754,12 +722,12 @@ mod tests { // Commit env.commit_tx_and_block(); // update the block height for the following client update - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); @@ -801,10 +769,7 @@ mod tests { let (client_id, client_state, writes) = ibc::prepare_client(); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -830,12 +795,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -878,10 +843,7 @@ mod tests { let (client_id, client_state, writes) = ibc::prepare_client(); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }) }); @@ -907,12 +869,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -956,10 +918,7 @@ mod tests { writes.extend(conn_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -986,12 +945,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -1029,10 +988,7 @@ mod tests { writes.extend(conn_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1065,12 +1021,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -1112,10 +1068,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1169,10 +1122,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1219,10 +1169,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1267,12 +1214,12 @@ mod tests { // Commit env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -1306,7 +1253,7 @@ mod tests { tx_host_env::set(env); let balance_key = token::storage_key::balance_key(&token, &sender); let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&balance_key).expect("read error") + env.state.read(&balance_key).expect("read error") }); assert_eq!( balance, @@ -1317,7 +1264,7 @@ mod tests { &address::Address::Internal(address::InternalAddress::Ibc), ); let escrow: Option = tx_host_env::with(|env| { - env.wl_storage.read(&escrow_key).expect("read error") + env.state.read(&escrow_key).expect("read error") }); assert_eq!( escrow, @@ -1359,10 +1306,7 @@ mod tests { ); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1393,11 +1337,11 @@ mod tests { // Check the balance tx_host_env::set(env); let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&balance_key).expect("read error") + env.state.read(&balance_key).expect("read error") }); assert_eq!(balance, Some(Amount::from_u64(0))); let minted: Option = tx_host_env::with(|env| { - env.wl_storage.read(&minted_key).expect("read error") + env.state.read(&minted_key).expect("read error") }); assert_eq!(minted, Some(Amount::from_u64(0))); } @@ -1424,10 +1368,7 @@ mod tests { writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1469,12 +1410,11 @@ mod tests { // Check the balance tx_host_env::set(env); let key = ibc::balance_key_with_ibc_prefix(denom, &receiver); - let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&key).expect("read error") - }); + let balance: Option = + tx_host_env::with(|env| env.state.read(&key).expect("read error")); assert_eq!(balance, Some(Amount::from_u64(100))); let minted: Option = tx_host_env::with(|env| { - env.wl_storage.read(&minted_key).expect("read error") + env.state.read(&minted_key).expect("read error") }); assert_eq!(minted, Some(Amount::from_u64(100))); } @@ -1501,10 +1441,7 @@ mod tests { writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); @@ -1542,10 +1479,7 @@ mod tests { tx_host_env::set(env); let ack_key = ibc_storage::ack_key(&port_id, &channel_id, sequence); let ack = tx_host_env::with(|env| { - env.wl_storage - .read_bytes(&ack_key) - .expect("read error") - .unwrap() + env.state.read_bytes(&ack_key).expect("read error").unwrap() }); let expected_ack = Hash::sha256(Vec::::from(ibc::transfer_ack_with_error())) @@ -1555,8 +1489,8 @@ mod tests { let receipt_key = ibc_storage::receipt_key(&port_id, &channel_id, sequence); let changed_keys = tx_host_env::with(|env| { - env.wl_storage - .write_log + env.state + .write_log() .verifiers_and_changed_keys(&BTreeSet::new()) .1 }); @@ -1585,10 +1519,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); // escrow in advance @@ -1596,14 +1527,9 @@ mod tests { &token, &address::Address::Internal(address::InternalAddress::Ibc), ); - let val = Amount::from_uint(100, ibc::ANY_DENOMINATION) - .unwrap() - .serialize_to_vec(); + let val = Amount::from_uint(100, ibc::ANY_DENOMINATION).unwrap(); tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&escrow_key, &val) - .expect("write error"); + env.state.write(&escrow_key, val).expect("write error"); }); // Set this chain as the source zone @@ -1648,15 +1574,14 @@ mod tests { // Check the balance tx_host_env::set(env); let key = token::storage_key::balance_key(&token, &receiver); - let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&key).expect("read error") - }); + let balance: Option = + tx_host_env::with(|env| env.state.read(&key).expect("read error")); assert_eq!( balance, Some(Amount::from_uint(200, ibc::ANY_DENOMINATION).unwrap()) ); let escrow: Option = tx_host_env::with(|env| { - env.wl_storage.read(&escrow_key).expect("read error") + env.state.read(&escrow_key).expect("read error") }); assert_eq!( escrow, @@ -1685,10 +1610,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }); }); // escrow in advance @@ -1700,12 +1622,9 @@ mod tests { denom, &address::Address::Internal(address::InternalAddress::Ibc), ); - let val = Amount::from_u64(100).serialize_to_vec(); + let val = Amount::from_u64(100); tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&escrow_key, &val) - .expect("write error"); + env.state.write(&escrow_key, val).expect("write error"); }); // Set this chain as the source zone @@ -1756,12 +1675,11 @@ mod tests { let denom = format!("{}/{}/{}", dummy_src_port, dummy_src_channel, token); let key = ibc::balance_key_with_ibc_prefix(denom, &receiver); - let balance: Option = tx_host_env::with(|env| { - env.wl_storage.read(&key).expect("read error") - }); + let balance: Option = + tx_host_env::with(|env| env.state.read(&key).expect("read error")); assert_eq!(balance, Some(Amount::from_u64(100))); let escrow: Option = tx_host_env::with(|env| { - env.wl_storage.read(&escrow_key).expect("read error") + env.state.read(&escrow_key).expect("read error") }); assert_eq!(escrow, Some(Amount::from_u64(0))); } @@ -1787,10 +1705,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }) }); @@ -1812,12 +1727,12 @@ mod tests { let mut env = tx_host_env::take(); env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); @@ -1877,10 +1792,7 @@ mod tests { writes.extend(channel_writes); writes.into_iter().for_each(|(key, val)| { tx_host_env::with(|env| { - env.wl_storage - .storage - .write(&key, &val) - .expect("write error"); + env.state.write_bytes(&key, &val).expect("write error"); }) }); @@ -1901,12 +1813,12 @@ mod tests { let mut env = tx_host_env::take(); env.commit_tx_and_block(); // for the next block - env.wl_storage - .storage + env.state + .in_mem_mut() .begin_block(BlockHash::default(), BlockHeight(2)) .unwrap(); - env.wl_storage - .storage + env.state + .in_mem_mut() .set_header(tm_dummy_header()) .unwrap(); tx_host_env::set(env); diff --git a/crates/tests/src/vm_host_env/tx.rs b/crates/tests/src/vm_host_env/tx.rs index 25fc05fbb1..4488ebdbd7 100644 --- a/crates/tests/src/vm_host_env/tx.rs +++ b/crates/tests/src/vm_host_env/tx.rs @@ -1,4 +1,5 @@ use std::borrow::Borrow; +use std::cell::RefCell; use std::collections::BTreeSet; use namada::core::address::Address; @@ -8,9 +9,7 @@ use namada::core::time::DurationSecs; use namada::ledger::gas::TxGasMeter; use namada::ledger::parameters::{self, EpochDuration}; use namada::ledger::storage::mockdb::MockDB; -use namada::ledger::storage::testing::TestStorage; -use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{Sha256Hasher, WlStorage}; +use namada::ledger::storage::testing::TestState; pub use namada::tx::data::TxType; use namada::tx::Tx; use namada::vm::prefix_iter::PrefixIterators; @@ -47,11 +46,11 @@ pub mod tx_host_env { /// Host environment structures required for transactions. #[derive(Debug)] pub struct TestTxEnv { - pub wl_storage: WlStorage, + pub state: TestState, pub iterators: PrefixIterators<'static, MockDB>, pub verifiers: BTreeSet
, - pub gas_meter: TxGasMeter, - pub sentinel: TxSentinel, + pub gas_meter: RefCell, + pub sentinel: RefCell, pub tx_index: TxIndex, pub result_buffer: Option>, pub vp_wasm_cache: VpCache, @@ -66,17 +65,16 @@ impl Default for TestTxEnv { wasm::compilation_cache::common::testing::cache(); let (tx_wasm_cache, tx_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let wl_storage = WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }; + let state = TestState::default(); let mut tx = Tx::from_type(TxType::Raw); - tx.header.chain_id = wl_storage.storage.chain_id.clone(); + tx.header.chain_id = state.in_mem().chain_id.clone(); Self { - wl_storage, + state, iterators: PrefixIterators::default(), - gas_meter: TxGasMeter::new_from_sub_limit(100_000_000.into()), - sentinel: TxSentinel::default(), + gas_meter: RefCell::new(TxGasMeter::new_from_sub_limit( + 100_000_000.into(), + )), + sentinel: RefCell::new(TxSentinel::default()), tx_index: TxIndex::default(), verifiers: BTreeSet::default(), result_buffer: None, @@ -91,12 +89,12 @@ impl Default for TestTxEnv { impl TestTxEnv { pub fn all_touched_storage_keys(&self) -> BTreeSet { - self.wl_storage.write_log.get_keys() + self.state.write_log().get_keys() } pub fn get_verifiers(&self) -> BTreeSet
{ - self.wl_storage - .write_log + self.state + .write_log() .verifiers_and_changed_keys(&self.verifiers) .0 } @@ -109,7 +107,7 @@ impl TestTxEnv { max_signatures_per_transaction: Option, ) { parameters::update_epoch_parameter( - &mut self.wl_storage, + &mut self.state, &epoch_duration.unwrap_or(EpochDuration { min_num_of_blocks: 1, min_duration: DurationSecs(5), @@ -117,17 +115,17 @@ impl TestTxEnv { ) .unwrap(); parameters::update_tx_allowlist_parameter( - &mut self.wl_storage, + &mut self.state, tx_allowlist.unwrap_or_default(), ) .unwrap(); parameters::update_vp_allowlist_parameter( - &mut self.wl_storage, + &mut self.state, vp_allowlist.unwrap_or_default(), ) .unwrap(); parameters::update_max_signature_per_tx( - &mut self.wl_storage, + &mut self.state, max_signatures_per_transaction.unwrap_or(15), ) .unwrap(); @@ -136,7 +134,7 @@ impl TestTxEnv { pub fn store_wasm_code(&mut self, code: Vec) { let hash = Hash::sha256(&code); let key = Key::wasm_code(&hash); - self.wl_storage.storage.write(&key, code).unwrap(); + self.state.db_write(&key, code).unwrap(); } /// Fake accounts' existence by initializing their VP storage. @@ -158,9 +156,8 @@ impl TestTxEnv { } let key = Key::validity_predicate(address.borrow()); let vp_code = vec![]; - self.wl_storage - .storage - .write(&key, vp_code) + self.state + .db_write(&key, vp_code) .expect("Unable to write VP"); } } @@ -172,7 +169,7 @@ impl TestTxEnv { threshold: u8, ) { account::init_account_storage( - &mut self.wl_storage, + &mut self.state, owner, &public_keys, threshold, @@ -187,21 +184,20 @@ impl TestTxEnv { threshold: u8, ) { let storage_key = account::threshold_key(address); - self.wl_storage - .storage - .write(&storage_key, threshold.serialize_to_vec()) + self.state + .db_write(&storage_key, threshold.serialize_to_vec()) .unwrap(); } /// Commit the genesis state. Typically, you'll want to call this after /// setting up the initial state, before running a transaction. pub fn commit_genesis(&mut self) { - self.wl_storage.commit_block().unwrap(); + self.state.commit_block().unwrap(); } pub fn commit_tx_and_block(&mut self) { - self.wl_storage.commit_tx(); - self.wl_storage + self.state.commit_tx(); + self.state .commit_block() .map_err(|err| println!("{:?}", err)) .ok(); @@ -217,18 +213,16 @@ impl TestTxEnv { amount: token::Amount, ) { let storage_key = token::storage_key::balance_key(token, target); - self.wl_storage - .storage - .write(&storage_key, amount.serialize_to_vec()) + self.state + .db_write(&storage_key, amount.serialize_to_vec()) .unwrap(); } /// Apply the tx changes to the write log. pub fn execute_tx(&mut self) -> Result<(), Error> { wasm::run::tx( - &self.wl_storage.storage, - &mut self.wl_storage.write_log, - &mut self.gas_meter, + &mut self.state, + &self.gas_meter, &self.tx_index, &self.tx, &mut self.vp_wasm_cache, @@ -316,14 +310,14 @@ mod native_tx_host_env { /// changes. pub fn set_from_vp_env(vp_env: TestVpEnv) { let TestVpEnv { - wl_storage, + state, tx, vp_wasm_cache, vp_cache_dir, .. } = vp_env; let tx_env = TestTxEnv { - wl_storage, + state, vp_wasm_cache, vp_cache_dir, tx, @@ -342,7 +336,7 @@ mod native_tx_host_env { #[no_mangle] extern "C" fn extern_fn_name( $($arg: $type),* ) { with(|TestTxEnv { - wl_storage, + state, iterators, verifiers, gas_meter, @@ -357,8 +351,7 @@ mod native_tx_host_env { }: &mut TestTxEnv| { let tx_env = vm::host_env::testing::tx_env( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, @@ -385,7 +378,7 @@ mod native_tx_host_env { extern "C" fn extern_fn_name( $($arg: $type),* ) -> $ret { with(|TestTxEnv { tx_index, - wl_storage, + state, iterators, verifiers, gas_meter, @@ -399,8 +392,7 @@ mod native_tx_host_env { }: &mut TestTxEnv| { let tx_env = vm::host_env::testing::tx_env( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, @@ -426,7 +418,7 @@ mod native_tx_host_env { #[no_mangle] extern "C" fn extern_fn_name( $($arg: $type),* ) { with(|TestTxEnv { - wl_storage, + state, iterators, verifiers, gas_meter, @@ -441,8 +433,7 @@ mod native_tx_host_env { }: &mut TestTxEnv| { let tx_env = vm::host_env::testing::tx_env( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, @@ -528,6 +519,8 @@ mod tests { use namada::ledger::storage::mockdb::MockDB; use namada::vm::host_env::{self, TxVmEnv}; use namada::vm::memory::VmMemory; + use namada_core::hash::Sha256Hasher; + use namada_tx_prelude::StorageWrite; use proptest::prelude::*; use test_log::test; @@ -715,20 +708,19 @@ mod tests { if setup.write_to_storage { // Write the key-val to storage which may affect `tx_read` execution // path - let _res = - test_env.wl_storage.storage.write(&setup.key, &setup.val); + let _res = test_env.state.write_bytes(&setup.key, &setup.val); } if setup.write_to_wl { // Write the key-val to write log which may affect `tx_read` // execution path let _res = test_env - .wl_storage - .write_log + .state + .write_log_mut() .write(&setup.key, setup.val.clone()); } let TestTxEnv { - wl_storage, + state, iterators, verifiers, gas_meter, @@ -743,8 +735,7 @@ mod tests { } = test_env; let tx_env = vm::host_env::testing::tx_env_with_wasm_memory( - &wl_storage.storage, - &mut wl_storage.write_log, + state, iterators, verifiers, gas_meter, diff --git a/crates/tests/src/vm_host_env/vp.rs b/crates/tests/src/vm_host_env/vp.rs index c99e3dea9d..44c1377e4d 100644 --- a/crates/tests/src/vm_host_env/vp.rs +++ b/crates/tests/src/vm_host_env/vp.rs @@ -1,3 +1,4 @@ +use std::cell::RefCell; use std::collections::BTreeSet; use namada::core::address::{self, Address}; @@ -5,9 +6,7 @@ use namada::core::storage::{self, Key, TxIndex}; use namada::gas::TxGasMeter; use namada::ledger::gas::VpGasMeter; use namada::ledger::storage::mockdb::MockDB; -use namada::ledger::storage::testing::TestStorage; -use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{Sha256Hasher, WlStorage}; +use namada::ledger::storage::testing::TestState; use namada::tx::data::TxType; use namada::tx::Tx; use namada::vm::prefix_iter::PrefixIterators; @@ -42,10 +41,10 @@ pub mod vp_host_env { #[derive(Debug)] pub struct TestVpEnv { pub addr: Address, - pub wl_storage: WlStorage, + pub state: TestState, pub iterators: PrefixIterators<'static, MockDB>, - pub gas_meter: VpGasMeter, - pub sentinel: VpSentinel, + pub gas_meter: RefCell, + pub sentinel: RefCell, pub tx: Tx, pub tx_index: TxIndex, pub keys_changed: BTreeSet, @@ -66,20 +65,17 @@ impl Default for TestVpEnv { let (vp_wasm_cache, vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let wl_storage = WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }; + let state = TestState::default(); let mut tx = Tx::from_type(TxType::Raw); - tx.header.chain_id = wl_storage.storage.chain_id.clone(); + tx.header.chain_id = state.in_mem().chain_id.clone(); Self { addr: address::testing::established_address_1(), - wl_storage, + state, iterators: PrefixIterators::default(), - gas_meter: VpGasMeter::new_from_tx_meter( + gas_meter: RefCell::new(VpGasMeter::new_from_tx_meter( &TxGasMeter::new_from_sub_limit(10_000_000_000.into()), - ), - sentinel: VpSentinel::default(), + )), + sentinel: RefCell::new(VpSentinel::default()), tx, tx_index: TxIndex::default(), keys_changed: BTreeSet::default(), @@ -94,12 +90,12 @@ impl Default for TestVpEnv { impl TestVpEnv { pub fn all_touched_storage_keys(&self) -> BTreeSet { - self.wl_storage.write_log.get_keys() + self.state.write_log().get_keys() } pub fn get_verifiers(&self) -> BTreeSet
{ - self.wl_storage - .write_log + self.state + .write_log() .verifiers_and_changed_keys(&self.verifiers) .0 } @@ -116,7 +112,7 @@ mod native_vp_host_env { // TODO replace with `std::concat_idents` once stabilized (https://github.com/rust-lang/rust/issues/29599) use concat_idents::concat_idents; - use namada::state::Sha256Hasher; + use namada::state::StateRead; use namada::vm::host_env::*; use namada::vm::WasmCacheRwAccess; @@ -124,8 +120,8 @@ mod native_vp_host_env { #[cfg(feature = "wasm-runtime")] pub type VpEval = namada::vm::wasm::run::VpEvalWasm< - MockDB, - Sha256Hasher, + ::D, + ::H, WasmCacheRwAccess, >; #[cfg(not(feature = "wasm-runtime"))] @@ -197,7 +193,7 @@ mod native_vp_host_env { // Write an empty validity predicate for the address, because it's used // to check if the address exists when we write into its storage let vp_key = Key::validity_predicate(&addr); - tx_env.wl_storage.storage.write(&vp_key, vec![]).unwrap(); + tx_env.state.db_write(&vp_key, vec![]).unwrap(); tx_host_env::set(tx_env); apply_tx(&addr); @@ -205,8 +201,8 @@ mod native_vp_host_env { let tx_env = tx_host_env::take(); let verifiers_from_tx = &tx_env.verifiers; let (verifiers, keys_changed) = tx_env - .wl_storage - .write_log + .state + .write_log() .verifiers_and_changed_keys(verifiers_from_tx); if !verifiers.contains(&addr) { panic!( @@ -218,7 +214,7 @@ mod native_vp_host_env { let vp_env = TestVpEnv { addr, - wl_storage: tx_env.wl_storage, + state: tx_env.state, keys_changed, verifiers, ..Default::default() @@ -258,7 +254,7 @@ mod native_vp_host_env { extern "C" fn extern_fn_name( $($arg: $type),* ) { with(|TestVpEnv { addr, - wl_storage, + state, iterators, gas_meter, sentinel, @@ -274,8 +270,7 @@ mod native_vp_host_env { let env = vm::host_env::testing::vp_env( addr, - &wl_storage.storage, - &wl_storage.write_log, + state, iterators, gas_meter, sentinel, @@ -303,7 +298,7 @@ mod native_vp_host_env { extern "C" fn extern_fn_name( $($arg: $type),* ) -> $ret { with(|TestVpEnv { addr, - wl_storage, + state, iterators, gas_meter, sentinel, @@ -319,8 +314,7 @@ mod native_vp_host_env { let env = vm::host_env::testing::vp_env( addr, - &wl_storage.storage, - &wl_storage.write_log, + state, iterators, gas_meter, sentinel, diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index feeef48f40..29a7e0fbf7 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3511,11 +3511,12 @@ dependencies = [ "ics23", "masp_primitives", "namada_core", + "namada_gas", "namada_governance", "namada_parameters", "namada_state", "namada_storage", - "namada_trans_token", + "namada_token", "primitive-types", "proptest", "prost 0.12.3", diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 68283ee00a..535dd37515 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -18,16 +18,19 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; use namada::core::dec::Dec; use namada::core::storage::Epoch; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -107,7 +110,7 @@ mod tests { // Ensure that the bond's source has enough tokens for the bond let target = bond.source.as_ref().unwrap_or(&bond.validator); - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); tx_env.credit_tokens(target, &native_token, bond.amount); native_token }); @@ -327,8 +330,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index c081651b90..b31c981d8d 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -20,13 +20,16 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::cmp; use namada::core::dec::{Dec, POS_DECIMAL_PRECISION}; use namada::core::storage::Epoch; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::validator_commission_rate_handle; use namada::proof_of_stake::types::GenesisValidator; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -152,8 +155,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_redelegate.rs b/wasm/wasm_source/src/tx_redelegate.rs index a00f533ab0..b07032f574 100644 --- a/wasm/wasm_source/src/tx_redelegate.rs +++ b/wasm/wasm_source/src/tx_redelegate.rs @@ -22,16 +22,19 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; use namada::core::dec::Dec; use namada::core::storage::Epoch; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -116,7 +119,7 @@ mod tests { init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); let owner = &redelegation.owner; tx_env.spawn_accounts([owner]); @@ -362,8 +365,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 135b59be69..2ba7bda8c5 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -25,16 +25,19 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; use std::collections::BTreeSet; use namada::core::dec::Dec; use namada::core::storage::Epoch; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::{ bond_handle, read_consensus_validator_set_addresses_with_stake, read_total_stake, read_validator_stake, unbond_handle, }; use namada::proof_of_stake::types::{GenesisValidator, WeightedValidator}; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -114,7 +117,7 @@ mod tests { init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); if is_delegation { let source = unbond.source.as_ref().unwrap(); tx_env.spawn_accounts([source]); @@ -340,8 +343,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index c6578ae3a3..4c29e4afc7 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -23,11 +23,15 @@ fn apply_tx(ctx: &mut Ctx, tx_data: Tx) -> TxResult { #[cfg(test)] mod tests { + use std::cell::RefCell; + use namada::core::dec::Dec; use namada::core::storage::Epoch; + use namada::ledger::gas::VpGasMeter; use namada::ledger::pos::{OwnedPosParams, PosVP}; use namada::proof_of_stake::storage::unbond_handle; use namada::proof_of_stake::types::GenesisValidator; + use namada::validity_predicate::VpSentinel; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; use namada_tests::native_vp::TestNativeVpEnv; @@ -113,7 +117,7 @@ mod tests { init_pos(&genesis_validators[..], &pos_params, Epoch(0)); let native_token = tx_host_env::with(|tx_env| { - let native_token = tx_env.wl_storage.storage.native_token.clone(); + let native_token = tx_env.state.in_mem().native_token.clone(); if is_delegation { let source = withdraw.source.as_ref().unwrap(); tx_env.spawn_accounts([source]); @@ -154,8 +158,8 @@ mod tests { + pos_params.unbonding_len + pos_params.cubic_slashing_window_length) { - env.wl_storage.storage.block.epoch = - env.wl_storage.storage.block.epoch.next(); + env.state.in_mem_mut().block.epoch = + env.state.in_mem().block.epoch.next(); } }); let bond_epoch = if is_delegation { @@ -170,7 +174,7 @@ mod tests { ); assert_eq!( - tx_host_env::with(|env| env.wl_storage.storage.block.epoch), + tx_host_env::with(|env| env.state.in_mem().block.epoch), Epoch( pos_params.pipeline_len + pos_params.unbonding_len @@ -224,8 +228,12 @@ mod tests { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); + let gas_meter = RefCell::new(VpGasMeter::new_from_tx_meter( + &tx_env.gas_meter.borrow(), + )); + let sentinel = RefCell::new(VpSentinel::default()); let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); - let result = vp_env.validate_tx(PosVP::new); + let result = vp_env.validate_tx(&gas_meter, &sentinel, PosVP::new); let result = result.expect("Validation of valid changes must not fail!"); assert!( diff --git a/wasm/wasm_source/src/vp_implicit.rs b/wasm/wasm_source/src/vp_implicit.rs index 0f47ddbb19..6ac6a9d735 100644 --- a/wasm/wasm_source/src/vp_implicit.rs +++ b/wasm/wasm_source/src/vp_implicit.rs @@ -439,7 +439,7 @@ mod tests { tx_env.spawn_accounts([&vp_owner, &source, &token]); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -531,7 +531,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -615,7 +615,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -678,7 +678,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -739,7 +739,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -810,7 +810,7 @@ mod tests { tx_env.credit_tokens(&source, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index ddaf25caee..655d73ccd6 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -388,7 +388,7 @@ mod tests { tx_env.credit_tokens(&source, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -439,7 +439,7 @@ mod tests { tx_env.spawn_accounts([&vp_owner, &target, &token]); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -501,7 +501,7 @@ mod tests { tx_env.credit_tokens(&vp_owner, &token, amount); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -594,7 +594,7 @@ mod tests { tx_env.init_account_storage(&vp_owner, vec![public_key], 1); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -758,7 +758,7 @@ mod tests { tx_env.init_account_storage(&validator, vec![public_key], 1); // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -855,7 +855,7 @@ mod tests { // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) @@ -1044,7 +1044,7 @@ mod tests { // write the denomination of NAM into storage token::write_denom( - &mut tx_env.wl_storage, + &mut tx_env.state, &token, token::NATIVE_MAX_DECIMAL_PLACES.into(), ) diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 762a63baeb..1b7d06ad1d 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -3511,11 +3511,12 @@ dependencies = [ "ics23", "masp_primitives", "namada_core", + "namada_gas", "namada_governance", "namada_parameters", "namada_state", "namada_storage", - "namada_trans_token", + "namada_token", "primitive-types", "proptest", "prost 0.12.3",