diff --git a/.changelog/unreleased/features/503-lazy-vec-and-map.md b/.changelog/unreleased/features/503-lazy-vec-and-map.md new file mode 100644 index 0000000000..d29ee5fd9c --- /dev/null +++ b/.changelog/unreleased/features/503-lazy-vec-and-map.md @@ -0,0 +1,2 @@ +- Added lazy vector and map data structures for ledger storage + ([#503](https://github.com/anoma/namada/pull/503)) \ No newline at end of file diff --git a/.changelog/v0.7.1/improvements/1093-unify-native-and-wasm-vp.md b/.changelog/v0.7.1/improvements/1093-unify-native-and-wasm-vp.md new file mode 100644 index 0000000000..e39308413f --- /dev/null +++ b/.changelog/v0.7.1/improvements/1093-unify-native-and-wasm-vp.md @@ -0,0 +1,3 @@ +- Added WASM transaction and validity predicate `Ctx` with methods for host + environment functions to unify the interface of native VPs and WASM VPs under + `trait VpEnv` ([#1093](https://github.com/anoma/anoma/pull/1093)) \ No newline at end of file diff --git a/.changelog/v0.7.1/improvements/331-common-write-storage-trait.md b/.changelog/v0.7.1/improvements/331-common-write-storage-trait.md new file mode 100644 index 0000000000..2e3e605197 --- /dev/null +++ b/.changelog/v0.7.1/improvements/331-common-write-storage-trait.md @@ -0,0 +1,2 @@ +- Added a StorageWrite trait for a common interface for transactions and direct + storage access for protocol ([#331](https://github.com/anoma/namada/pull/331)) \ No newline at end of file diff --git a/.changelog/v0.7.1/improvements/334-refactor-storage-read-write.md b/.changelog/v0.7.1/improvements/334-refactor-storage-read-write.md new file mode 100644 index 0000000000..0642596268 --- /dev/null +++ b/.changelog/v0.7.1/improvements/334-refactor-storage-read-write.md @@ -0,0 +1,2 @@ +- Re-use encoding/decoding storage write/read and handle any errors + ([#334](https://github.com/anoma/namada/pull/334)) \ No newline at end of file diff --git a/.changelog/v0.7.1/improvements/335-refactor-storage-prefix-iter.md b/.changelog/v0.7.1/improvements/335-refactor-storage-prefix-iter.md new file mode 100644 index 0000000000..d51f6c72f0 --- /dev/null +++ b/.changelog/v0.7.1/improvements/335-refactor-storage-prefix-iter.md @@ -0,0 +1,3 @@ +- Added a simpler prefix iterator API that returns `std::iter::Iterator` with + the storage keys parsed and a variant that also decodes stored values with + Borsh ([#335](https://github.com/anoma/namada/pull/335)) \ No newline at end of file diff --git a/.changelog/v0.7.1/improvements/380-vp-env-pre-post-via-storage-api.md b/.changelog/v0.7.1/improvements/380-vp-env-pre-post-via-storage-api.md new file mode 100644 index 0000000000..655cdf256a --- /dev/null +++ b/.changelog/v0.7.1/improvements/380-vp-env-pre-post-via-storage-api.md @@ -0,0 +1,3 @@ +- Added `pre/post` methods into `trait VpEnv` that return objects implementing + `trait StorageRead` for re-use of library code written on top of `StorageRead` + inside validity predicates. ([#380](https://github.com/anoma/namada/pull/380)) \ No newline at end of file diff --git a/.changelog/v0.7.1/improvements/409-sorted-prefix-iter.md b/.changelog/v0.7.1/improvements/409-sorted-prefix-iter.md new file mode 100644 index 0000000000..2f95505960 --- /dev/null +++ b/.changelog/v0.7.1/improvements/409-sorted-prefix-iter.md @@ -0,0 +1,3 @@ +- Fix order of prefix iterator to be sorted by storage + keys and add support for a reverse order prefix iterator. + ([#409](https://github.com/anoma/namada/issues/409)) \ No newline at end of file diff --git a/.changelog/v0.7.1/improvements/465-vp-tx-env-conrete-error.md b/.changelog/v0.7.1/improvements/465-vp-tx-env-conrete-error.md new file mode 100644 index 0000000000..e40ff76a17 --- /dev/null +++ b/.changelog/v0.7.1/improvements/465-vp-tx-env-conrete-error.md @@ -0,0 +1,2 @@ +- Re-use `storage_api::Error` type that supports wrapping custom error in `VpEnv` and `TxEnv` traits. + ([#465](https://github.com/anoma/namada/pull/465)) diff --git a/Cargo.lock b/Cargo.lock index eafd05b2ff..d325bb309c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4034,12 +4034,12 @@ dependencies = [ "byte-unit", "chrono", "clru", + "data-encoding", "derivative", "ed25519-consensus", "ferveo", "ferveo-common", "group-threshold-cryptography", - "hex", "ibc", "ibc-proto", "ics23", @@ -4098,6 +4098,7 @@ dependencies = [ "color-eyre", "config", "curl", + "data-encoding", "derivative", "directories", "ed25519-consensus", @@ -4108,7 +4109,6 @@ dependencies = [ "flate2", "futures 0.3.21", "git2", - "hex", "itertools 0.10.3", "jsonpath_lib", "libc", @@ -4200,18 +4200,19 @@ dependencies = [ "chrono", "color-eyre", "concat-idents", + "data-encoding", "derivative", "escargot", "expectrl", "eyre", "file-serve", "fs_extra", - "hex", "itertools 0.10.3", "libp2p", "namada", "namada_apps", - "namada_vm_env", + "namada_tx_prelude", + "namada_vp_prelude", "pretty_assertions", "proptest", "prost 0.9.0", @@ -4229,8 +4230,12 @@ dependencies = [ name = "namada_tx_prelude" version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] @@ -4238,17 +4243,19 @@ name = "namada_vm_env" version = "0.7.1" dependencies = [ "borsh", - "hex", "namada", - "namada_macros", ] [[package]] name = "namada_vp_prelude" version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 65d7d84eed..6b30c2ef4c 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -62,6 +62,7 @@ clap = {git = "https://github.com/clap-rs/clap/", tag = "v3.0.0-beta.2", default color-eyre = "0.5.10" config = "0.11.0" curl = "0.4.43" +data-encoding = "2.3.2" derivative = "2.2.0" directories = "4.0.1" ed25519-consensus = "1.2.0" @@ -71,7 +72,6 @@ eyre = "0.6.5" flate2 = "1.0.22" file-lock = "2.0.2" futures = "0.3" -hex = "0.4.3" itertools = "0.10.1" jsonpath_lib = "0.3.0" libc = "0.2.97" diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 34652ac825..67e81c1588 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -11,6 +11,7 @@ use async_std::fs::{self}; use async_std::path::PathBuf; use async_std::prelude::*; use borsh::BorshDeserialize; +use data_encoding::HEXLOWER; use itertools::Itertools; use namada::ledger::governance::storage as gov_storage; use namada::ledger::governance::utils::Votes; @@ -81,7 +82,7 @@ pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { .unwrap(); match response.code { Code::Ok => { - println!("{}", hex::encode(&response.value)); + println!("{}", HEXLOWER.encode(&response.value)); } Code::Err(err) => { eprintln!( diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index 5bd3dc803f..9c1fff3dcc 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -26,7 +26,7 @@ pub mod genesis_config { use std::path::Path; use std::str::FromStr; - use hex; + use data_encoding::HEXLOWER; use namada::ledger::governance::parameters::GovParams; use namada::ledger::parameters::{EpochDuration, Parameters}; use namada::ledger::pos::types::BasisPoints; @@ -50,12 +50,12 @@ pub mod genesis_config { impl HexString { pub fn to_bytes(&self) -> Result, HexKeyError> { - let bytes = hex::decode(&self.0)?; + let bytes = HEXLOWER.decode(self.0.as_ref())?; Ok(bytes) } pub fn to_sha256_bytes(&self) -> Result<[u8; 32], HexKeyError> { - let bytes = hex::decode(&self.0)?; + let bytes = HEXLOWER.decode(self.0.as_ref())?; let slice = bytes.as_slice(); let array: [u8; 32] = slice.try_into()?; Ok(array) @@ -76,15 +76,15 @@ pub mod genesis_config { #[derive(Error, Debug)] pub enum HexKeyError { #[error("Invalid hex string: {0:?}")] - InvalidHexString(hex::FromHexError), + InvalidHexString(data_encoding::DecodeError), #[error("Invalid sha256 checksum: {0}")] InvalidSha256(TryFromSliceError), #[error("Invalid public key: {0}")] InvalidPublicKey(ParsePublicKeyError), } - impl From for HexKeyError { - fn from(err: hex::FromHexError) -> Self { + impl From for HexKeyError { + fn from(err: data_encoding::DecodeError) -> Self { Self::InvalidHexString(err) } } diff --git a/apps/src/lib/node/gossip/p2p/identity.rs b/apps/src/lib/node/gossip/p2p/identity.rs index 42442054c8..3227060014 100644 --- a/apps/src/lib/node/gossip/p2p/identity.rs +++ b/apps/src/lib/node/gossip/p2p/identity.rs @@ -21,6 +21,7 @@ pub struct Identity { // TODO this is needed because libp2p does not export ed255519 serde // feature maybe a MR for libp2p to export theses functions ? mod keypair_serde { + use data_encoding::HEXLOWER; use libp2p::identity::ed25519::Keypair; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -33,7 +34,7 @@ mod keypair_serde { S: Serializer, { let bytes = value.encode(); - let string = hex::encode(&bytes[..]); + let string = HEXLOWER.encode(&bytes[..]); string.serialize(serializer) } pub fn deserialize<'d, D>(deserializer: D) -> Result @@ -41,7 +42,8 @@ mod keypair_serde { D: Deserializer<'d>, { let string = String::deserialize(deserializer)?; - let mut bytes = hex::decode(&string).map_err(Error::custom)?; + let mut bytes = + HEXLOWER.decode(string.as_ref()).map_err(Error::custom)?; Keypair::decode(bytes.as_mut()).map_err(Error::custom) } } diff --git a/apps/src/lib/node/ledger/protocol/mod.rs b/apps/src/lib/node/ledger/protocol/mod.rs index e5f191d6e7..cac7cacb42 100644 --- a/apps/src/lib/node/ledger/protocol/mod.rs +++ b/apps/src/lib/node/ledger/protocol/mod.rs @@ -249,10 +249,13 @@ where } Address::Internal(internal_addr) => { let ctx = native_vp::Ctx::new( + addr, storage, write_log, tx, gas_meter, + &keys_changed, + &verifiers, vp_wasm_cache.clone(), ); let tx_data = match tx.data.as_ref() { diff --git a/apps/src/lib/node/ledger/storage/rocksdb.rs b/apps/src/lib/node/ledger/storage/rocksdb.rs index 71ed305ea5..44f55fa8e3 100644 --- a/apps/src/lib/node/ledger/storage/rocksdb.rs +++ b/apps/src/lib/node/ledger/storage/rocksdb.rs @@ -806,24 +806,36 @@ impl<'iter> DBIter<'iter> for RocksDB { &'iter self, prefix: &Key, ) -> PersistentPrefixIterator<'iter> { - let db_prefix = "subspace/".to_owned(); - let prefix = format!("{}{}", db_prefix, prefix); + iter_prefix(self, prefix, Direction::Forward) + } - let mut read_opts = ReadOptions::default(); - // don't use the prefix bloom filter - read_opts.set_total_order_seek(true); - let mut upper_prefix = prefix.clone().into_bytes(); - if let Some(last) = upper_prefix.pop() { - upper_prefix.push(last + 1); - } - read_opts.set_iterate_upper_bound(upper_prefix); + fn rev_iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter { + iter_prefix(self, prefix, Direction::Reverse) + } +} - let iter = self.0.iterator_opt( - IteratorMode::From(prefix.as_bytes(), Direction::Forward), - read_opts, - ); - PersistentPrefixIterator(PrefixIterator::new(iter, db_prefix)) +fn iter_prefix<'iter>( + db: &'iter RocksDB, + prefix: &Key, + direction: Direction, +) -> PersistentPrefixIterator<'iter> { + let db_prefix = "subspace/".to_owned(); + let prefix = format!("{}{}", db_prefix, prefix); + + let mut read_opts = ReadOptions::default(); + // don't use the prefix bloom filter + read_opts.set_total_order_seek(true); + let mut upper_prefix = prefix.clone().into_bytes(); + if let Some(last) = upper_prefix.pop() { + upper_prefix.push(last + 1); } + read_opts.set_iterate_upper_bound(upper_prefix); + + let iter = db.0.iterator_opt( + IteratorMode::From(prefix.as_bytes(), direction), + read_opts, + ); + PersistentPrefixIterator(PrefixIterator::new(iter, db_prefix)) } #[derive(Debug)] diff --git a/apps/src/lib/wallet/keys.rs b/apps/src/lib/wallet/keys.rs index 1c521e7515..e922c7df5a 100644 --- a/apps/src/lib/wallet/keys.rs +++ b/apps/src/lib/wallet/keys.rs @@ -5,6 +5,7 @@ use std::rc::Rc; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; +use data_encoding::HEXLOWER; use namada::types::key::*; use orion::{aead, kdf}; use serde::{Deserialize, Serialize}; @@ -108,15 +109,15 @@ pub struct EncryptedKeypair(Vec); impl Display for EncryptedKeypair { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.0)) + write!(f, "{}", HEXLOWER.encode(self.0.as_ref())) } } impl FromStr for EncryptedKeypair { - type Err = hex::FromHexError; + type Err = data_encoding::DecodeError; fn from_str(s: &str) -> Result { - hex::decode(s).map(Self) + HEXLOWER.decode(s.as_ref()).map(Self) } } diff --git a/apps/src/lib/wasm_loader/mod.rs b/apps/src/lib/wasm_loader/mod.rs index b6cb424457..e41efdbf77 100644 --- a/apps/src/lib/wasm_loader/mod.rs +++ b/apps/src/lib/wasm_loader/mod.rs @@ -4,8 +4,8 @@ use std::collections::HashMap; use std::fs; use std::path::Path; +use data_encoding::HEXLOWER; use futures::future::join_all; -use hex; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; @@ -144,7 +144,7 @@ pub async fn pre_fetch_wasm(wasm_directory: impl AsRef) { Ok(bytes) => { let mut hasher = Sha256::new(); hasher.update(bytes); - let result = hex::encode(hasher.finalize()); + let result = HEXLOWER.encode(&hasher.finalize()); let derived_name = format!( "{}.{}.wasm", &name.split('.').collect::>()[0], diff --git a/macros/src/lib.rs b/macros/src/lib.rs index afa49c66ab..33e729dca4 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -15,7 +15,10 @@ use syn::{parse_macro_input, DeriveInput, ItemFn}; /// This macro expects a function with signature: /// /// ```compiler_fail -/// fn apply_tx(tx_data: Vec) +/// fn apply_tx( +/// ctx: &mut Ctx, +/// tx_data: Vec +/// ) -> TxResult /// ``` #[proc_macro_attribute] pub fn transaction(_attr: TokenStream, input: TokenStream) -> TokenStream { @@ -38,7 +41,19 @@ pub fn transaction(_attr: TokenStream, input: TokenStream) -> TokenStream { ) }; let tx_data = slice.to_vec(); - #ident(tx_data); + + // The context on WASM side is only provided by the VM once its + // being executed (in here it's implicit). But because we want to + // have interface consistent with the VP interface, in which the + // context is explicit, in here we're just using an empty `Ctx` + // to "fake" it. + let mut ctx = unsafe { namada_tx_prelude::Ctx::new() }; + + if let Err(err) = #ident(&mut ctx, tx_data) { + namada_tx_prelude::debug_log!("Transaction error: {}", err); + // crash the transaction to abort + panic!(); + } } }; TokenStream::from(gen) @@ -50,11 +65,12 @@ pub fn transaction(_attr: TokenStream, input: TokenStream) -> TokenStream { /// /// ```compiler_fail /// fn validate_tx( +/// ctx: &Ctx, /// tx_data: Vec, /// addr: Address, /// keys_changed: BTreeSet, /// verifiers: BTreeSet
-/// ) -> bool +/// ) -> VpResult /// ``` #[proc_macro_attribute] pub fn validity_predicate( @@ -74,7 +90,6 @@ pub fn validity_predicate( #[no_mangle] extern "C" fn _validate_tx( // VP's account's address - // TODO Should the address be on demand (a call to host function?) addr_ptr: u64, addr_len: u64, tx_data_ptr: u64, @@ -113,11 +128,22 @@ pub fn validity_predicate( }; let verifiers: BTreeSet
= BTreeSet::try_from_slice(slice).unwrap(); + // The context on WASM side is only provided by the VM once its + // being executed (in here it's implicit). But because we want to + // have interface identical with the native VPs, in which the + // context is explicit, in here we're just using an empty `Ctx` + // to "fake" it. + let ctx = unsafe { namada_vp_prelude::Ctx::new() }; + // run validation with the concrete type(s) - if #ident(tx_data, addr, keys_changed, verifiers) { - 1 - } else { - 0 + match #ident(&ctx, tx_data, addr, keys_changed, verifiers) + { + Ok(true) => 1, + Ok(false) => 0, + Err(err) => { + namada_vp_prelude::debug_log!("Validity predicate error: {}", err); + 0 + }, } } }; diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index a137eb8a91..144c88c596 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -94,122 +94,164 @@ pub trait PosReadOnly { + BorshSerialize + BorshSchema; + /// Underlying read (and write in [`PosActions`]) interface errors + type Error; + /// Address of the PoS account const POS_ADDRESS: Self::Address; + /// Address of the staking token /// TODO: this should be `const`, but in the ledger `address::xan` is not a /// `const fn` fn staking_token_address() -> Self::Address; /// Read PoS parameters. - fn read_pos_params(&self) -> PosParams; + fn read_pos_params(&self) -> Result; /// Read PoS validator's staking reward address. fn read_validator_staking_reward_address( &self, key: &Self::Address, - ) -> Option; + ) -> Result, Self::Error>; /// Read PoS validator's consensus key (used for signing block votes). fn read_validator_consensus_key( &self, key: &Self::Address, - ) -> Option>; + ) -> Result>, Self::Error>; /// Read PoS validator's state. fn read_validator_state( &self, key: &Self::Address, - ) -> Option; + ) -> Result, Self::Error>; /// Read PoS validator's total deltas of their bonds (validator self-bonds /// and delegations). fn read_validator_total_deltas( &self, key: &Self::Address, - ) -> Option>; + ) -> Result>, Self::Error>; /// Read PoS validator's voting power. fn read_validator_voting_power( &self, key: &Self::Address, - ) -> Option; + ) -> Result, Self::Error>; /// Read PoS slashes applied to a validator. - fn read_validator_slashes(&self, key: &Self::Address) -> Vec; + fn read_validator_slashes( + &self, + key: &Self::Address, + ) -> Result, Self::Error>; /// Read PoS bond (validator self-bond or a delegation). fn read_bond( &self, key: &BondId, - ) -> Option>; + ) -> Result>, Self::Error>; /// Read PoS unbond (unbonded tokens from validator self-bond or a /// delegation). fn read_unbond( &self, key: &BondId, - ) -> Option>; + ) -> Result>, Self::Error>; /// Read PoS validator set (active and inactive). - fn read_validator_set(&self) -> ValidatorSets; + fn read_validator_set( + &self, + ) -> Result, Self::Error>; /// Read PoS total voting power of all validators (active and inactive). - fn read_total_voting_power(&self) -> TotalVotingPowers; + fn read_total_voting_power(&self) + -> Result; } /// PoS system trait to be implemented in integration that can read and write /// PoS data. pub trait PosActions: PosReadOnly { + /// Error in `PosActions::become_validator` + type BecomeValidatorError: From + + From>; + + /// Error in `PosActions::bond_tokens` + type BondError: From + From>; + + /// Error in `PosActions::unbond_tokens` + type UnbondError: From + + From>; + + /// Error in `PosActions::withdraw_tokens` + type WithdrawError: From + From>; + /// Write PoS parameters. - fn write_pos_params(&mut self, params: &PosParams); + fn write_pos_params( + &mut self, + params: &PosParams, + ) -> Result<(), Self::Error>; /// Write PoS validator's raw hash its address. - fn write_validator_address_raw_hash(&mut self, address: &Self::Address); + fn write_validator_address_raw_hash( + &mut self, + address: &Self::Address, + ) -> Result<(), Self::Error>; /// Write PoS validator's staking reward address, into which staking rewards /// will be credited. fn write_validator_staking_reward_address( &mut self, key: &Self::Address, value: Self::Address, - ); + ) -> Result<(), Self::Error>; /// Write PoS validator's consensus key (used for signing block votes). fn write_validator_consensus_key( &mut self, key: &Self::Address, value: ValidatorConsensusKeys, - ); + ) -> Result<(), Self::Error>; /// Write PoS validator's state. fn write_validator_state( &mut self, key: &Self::Address, value: ValidatorStates, - ); + ) -> Result<(), Self::Error>; /// Write PoS validator's total deltas of their bonds (validator self-bonds /// and delegations). fn write_validator_total_deltas( &mut self, key: &Self::Address, value: ValidatorTotalDeltas, - ); + ) -> Result<(), Self::Error>; /// Write PoS validator's voting power. fn write_validator_voting_power( &mut self, key: &Self::Address, value: ValidatorVotingPowers, - ); + ) -> Result<(), Self::Error>; /// Write PoS bond (validator self-bond or a delegation). fn write_bond( &mut self, key: &BondId, value: Bonds, - ); + ) -> Result<(), Self::Error>; /// Write PoS unbond (unbonded tokens from validator self-bond or a /// delegation). fn write_unbond( &mut self, key: &BondId, value: Unbonds, - ); + ) -> Result<(), Self::Error>; /// Write PoS validator set (active and inactive). - fn write_validator_set(&mut self, value: ValidatorSets); + fn write_validator_set( + &mut self, + value: ValidatorSets, + ) -> Result<(), Self::Error>; /// Write PoS total voting power of all validators (active and inactive). - fn write_total_voting_power(&mut self, value: TotalVotingPowers); + fn write_total_voting_power( + &mut self, + value: TotalVotingPowers, + ) -> Result<(), Self::Error>; /// Delete an emptied PoS bond (validator self-bond or a delegation). - fn delete_bond(&mut self, key: &BondId); + fn delete_bond( + &mut self, + key: &BondId, + ) -> Result<(), Self::Error>; /// Delete an emptied PoS unbond (unbonded tokens from validator self-bond /// or a delegation). - fn delete_unbond(&mut self, key: &BondId); + fn delete_unbond( + &mut self, + key: &BondId, + ) -> Result<(), Self::Error>; /// Transfer tokens from the `src` to the `dest`. fn transfer( @@ -218,7 +260,7 @@ pub trait PosActions: PosReadOnly { amount: Self::TokenAmount, src: &Self::Address, dest: &Self::Address, - ); + ) -> Result<(), Self::Error>; /// Attempt to update the given account to become a validator. fn become_validator( @@ -227,21 +269,19 @@ pub trait PosActions: PosReadOnly { staking_reward_address: &Self::Address, consensus_key: &Self::PublicKey, current_epoch: impl Into, - ) -> Result<(), BecomeValidatorError> { + ) -> Result<(), Self::BecomeValidatorError> { let current_epoch = current_epoch.into(); - let params = self.read_pos_params(); - let mut validator_set = self.read_validator_set(); - if self.is_validator(address) { - return Err(BecomeValidatorError::AlreadyValidator( - address.clone(), - )); + let params = self.read_pos_params()?; + let mut validator_set = self.read_validator_set()?; + if self.is_validator(address)? { + Err(BecomeValidatorError::AlreadyValidator(address.clone()))?; } if address == staking_reward_address { - return Err( + Err( BecomeValidatorError::StakingRewardAddressEqValidatorAddress( address.clone(), ), - ); + )?; } let BecomeValidatorData { consensus_key, @@ -258,20 +298,24 @@ pub trait PosActions: PosReadOnly { self.write_validator_staking_reward_address( address, staking_reward_address.clone(), - ); - self.write_validator_consensus_key(address, consensus_key); - self.write_validator_state(address, state); - self.write_validator_set(validator_set); - self.write_validator_address_raw_hash(address); - self.write_validator_total_deltas(address, total_deltas); - self.write_validator_voting_power(address, voting_power); + )?; + self.write_validator_consensus_key(address, consensus_key)?; + self.write_validator_state(address, state)?; + self.write_validator_set(validator_set)?; + self.write_validator_address_raw_hash(address)?; + self.write_validator_total_deltas(address, total_deltas)?; + self.write_validator_voting_power(address, voting_power)?; Ok(()) } /// Check if the given address is a validator by checking that it has some /// state. - fn is_validator(&self, address: &Self::Address) -> bool { - self.read_validator_state(address).is_some() + fn is_validator( + &self, + address: &Self::Address, + ) -> Result { + let state = self.read_validator_state(address)?; + Ok(state.is_some()) } /// Self-bond tokens to a validator when `source` is `None` or equal to @@ -283,29 +327,27 @@ pub trait PosActions: PosReadOnly { validator: &Self::Address, amount: Self::TokenAmount, current_epoch: impl Into, - ) -> Result<(), BondError> { + ) -> Result<(), Self::BondError> { let current_epoch = current_epoch.into(); if let Some(source) = source { - if source != validator && self.is_validator(source) { - return Err(BondError::SourceMustNotBeAValidator( - source.clone(), - )); + if source != validator && self.is_validator(source)? { + Err(BondError::SourceMustNotBeAValidator(source.clone()))?; } } - let params = self.read_pos_params(); - let validator_state = self.read_validator_state(validator); + let params = self.read_pos_params()?; + let validator_state = self.read_validator_state(validator)?; let source = source.unwrap_or(validator); let bond_id = BondId { source: source.clone(), validator: validator.clone(), }; - let bond = self.read_bond(&bond_id); + let bond = self.read_bond(&bond_id)?; let validator_total_deltas = - self.read_validator_total_deltas(validator); + self.read_validator_total_deltas(validator)?; let validator_voting_power = - self.read_validator_voting_power(validator); - let mut total_voting_power = self.read_total_voting_power(); - let mut validator_set = self.read_validator_set(); + self.read_validator_voting_power(validator)?; + let mut total_voting_power = self.read_total_voting_power()?; + let mut validator_set = self.read_validator_set()?; let BondData { bond, @@ -323,12 +365,11 @@ pub trait PosActions: PosReadOnly { &mut validator_set, current_epoch, )?; - - self.write_bond(&bond_id, bond); - self.write_validator_total_deltas(validator, validator_total_deltas); - self.write_validator_voting_power(validator, validator_voting_power); - self.write_total_voting_power(total_voting_power); - self.write_validator_set(validator_set); + self.write_bond(&bond_id, bond)?; + self.write_validator_total_deltas(validator, validator_total_deltas)?; + self.write_validator_voting_power(validator, validator_voting_power)?; + self.write_total_voting_power(total_voting_power)?; + self.write_validator_set(validator_set)?; // Transfer the bonded tokens from the source to PoS self.transfer( @@ -336,8 +377,7 @@ pub trait PosActions: PosReadOnly { amount, source, &Self::POS_ADDRESS, - ); - + )?; Ok(()) } @@ -350,28 +390,32 @@ pub trait PosActions: PosReadOnly { validator: &Self::Address, amount: Self::TokenAmount, current_epoch: impl Into, - ) -> Result<(), UnbondError> { + ) -> Result<(), Self::UnbondError> { let current_epoch = current_epoch.into(); - let params = self.read_pos_params(); + let params = self.read_pos_params()?; let source = source.unwrap_or(validator); let bond_id = BondId { source: source.clone(), validator: validator.clone(), }; - let mut bond = - self.read_bond(&bond_id).ok_or(UnbondError::NoBondFound)?; - let unbond = self.read_unbond(&bond_id); - let mut validator_total_deltas = - self.read_validator_total_deltas(validator).ok_or_else(|| { + let mut bond = match self.read_bond(&bond_id)? { + Some(val) => val, + None => Err(UnbondError::NoBondFound)?, + }; + let unbond = self.read_unbond(&bond_id)?; + let mut validator_total_deltas = self + .read_validator_total_deltas(validator)? + .ok_or_else(|| { UnbondError::ValidatorHasNoBonds(validator.clone()) })?; - let mut validator_voting_power = - self.read_validator_voting_power(validator).ok_or_else(|| { + let mut validator_voting_power = self + .read_validator_voting_power(validator)? + .ok_or_else(|| { UnbondError::ValidatorHasNoVotingPower(validator.clone()) })?; - let slashes = self.read_validator_slashes(validator); - let mut total_voting_power = self.read_total_voting_power(); - let mut validator_set = self.read_validator_set(); + let slashes = self.read_validator_slashes(validator)?; + let mut total_voting_power = self.read_total_voting_power()?; + let mut validator_set = self.read_validator_set()?; let UnbondData { unbond } = unbond_tokens( ¶ms, @@ -394,18 +438,18 @@ pub trait PosActions: PosReadOnly { ); match total_bonds { Some(total_bonds) if total_bonds.sum() != 0.into() => { - self.write_bond(&bond_id, bond); + self.write_bond(&bond_id, bond)?; } _ => { // If the bond is left empty, delete it - self.delete_bond(&bond_id) + self.delete_bond(&bond_id)? } } - self.write_unbond(&bond_id, unbond); - self.write_validator_total_deltas(validator, validator_total_deltas); - self.write_validator_voting_power(validator, validator_voting_power); - self.write_total_voting_power(total_voting_power); - self.write_validator_set(validator_set); + self.write_unbond(&bond_id, unbond)?; + self.write_validator_total_deltas(validator, validator_total_deltas)?; + self.write_validator_voting_power(validator, validator_voting_power)?; + self.write_total_voting_power(total_voting_power)?; + self.write_validator_set(validator_set)?; Ok(()) } @@ -418,17 +462,17 @@ pub trait PosActions: PosReadOnly { source: Option<&Self::Address>, validator: &Self::Address, current_epoch: impl Into, - ) -> Result> { + ) -> Result { let current_epoch = current_epoch.into(); - let params = self.read_pos_params(); + let params = self.read_pos_params()?; let source = source.unwrap_or(validator); let bond_id = BondId { source: source.clone(), validator: validator.clone(), }; - let unbond = self.read_unbond(&bond_id); - let slashes = self.read_validator_slashes(&bond_id.validator); + let unbond = self.read_unbond(&bond_id)?; + let slashes = self.read_validator_slashes(&bond_id.validator)?; let WithdrawData { unbond, @@ -449,11 +493,11 @@ pub trait PosActions: PosReadOnly { ); match total_unbonds { Some(total_unbonds) if total_unbonds.sum() != 0.into() => { - self.write_unbond(&bond_id, unbond); + self.write_unbond(&bond_id, unbond)?; } _ => { // If the unbond is left empty, delete it - self.delete_unbond(&bond_id) + self.delete_unbond(&bond_id)? } } @@ -463,7 +507,7 @@ pub trait PosActions: PosReadOnly { withdrawn, &Self::POS_ADDRESS, source, - ); + )?; Ok(slashed) } diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 53ab95f15c..89b20e59bc 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -57,11 +57,11 @@ borsh = "0.9.0" chrono = "0.4.19" # Using unreleased commit on top of version 0.5.0 that adds Sync to the CLruCache clru = {git = "https://github.com/marmeladema/clru-rs.git", rev = "71ca566"} +data-encoding = "2.3.2" derivative = "2.2.0" ed25519-consensus = "1.2.0" ferveo = {optional = true, git = "https://github.com/anoma/ferveo"} ferveo-common = {git = "https://github.com/anoma/ferveo"} -hex = "0.4.3" tpke = {package = "group-threshold-cryptography", optional = true, git = "https://github.com/anoma/ferveo"} # TODO using the same version of tendermint-rs as we do here. ibc = {git = "https://github.com/heliaxdev/ibc-rs", rev = "30b3495ac56c6c37c99bc69ef9f2e84c3309c6cc", default-features = false} diff --git a/shared/src/ledger/governance/vp.rs b/shared/src/ledger/governance/vp.rs index 6ccfc7a33e..ad941806c4 100644 --- a/shared/src/ledger/governance/vp.rs +++ b/shared/src/ledger/governance/vp.rs @@ -7,6 +7,7 @@ use super::storage as gov_storage; use crate::ledger::native_vp::{self, Ctx}; use crate::ledger::pos::{self as pos_storage, BondId, Bonds}; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; +use crate::ledger::vp_env::VpEnv; use crate::types::address::{xan as m1t, Address, InternalAddress}; use crate::types::storage::{Epoch, Key}; use crate::types::token; @@ -350,7 +351,7 @@ where let max_content_length = read(ctx, &max_content_length_parameter_key, ReadType::PRE).ok(); let has_pre_content = ctx.has_key_pre(&content_key).ok(); - let post_content = ctx.read_post(&content_key).unwrap(); + let post_content = ctx.read_bytes_post(&content_key).unwrap(); match (has_pre_content, post_content, max_content_length) { ( Some(has_pre_content), @@ -377,7 +378,7 @@ where let max_content_length = read(ctx, &max_content_length_parameter_key, ReadType::PRE).ok(); let has_pre_content = ctx.has_key_pre(&content_key).ok(); - let post_content = ctx.read_post(&content_key).unwrap(); + let post_content = ctx.read_bytes_post(&content_key).unwrap(); match (has_pre_content, post_content, max_content_length) { ( Some(has_pre_content), @@ -504,8 +505,8 @@ where T: Clone + BorshDeserialize, { let storage_result = match read_type { - ReadType::PRE => context.read_pre(key), - ReadType::POST => context.read_post(key), + ReadType::PRE => context.read_bytes_pre(key), + ReadType::POST => context.read_bytes_post(key), }; match storage_result { diff --git a/shared/src/ledger/ibc/handler.rs b/shared/src/ledger/ibc/handler.rs index bf45759535..4a3fe528a9 100644 --- a/shared/src/ledger/ibc/handler.rs +++ b/shared/src/ledger/ibc/handler.rs @@ -70,6 +70,7 @@ use crate::ibc::events::IbcEvent; use crate::ibc::mock::client_state::{MockClientState, MockConsensusState}; use crate::ibc::timestamp::Timestamp; use crate::ledger::ibc::storage; +use crate::ledger::storage_api; use crate::tendermint::Time; use crate::tendermint_proto::{Error as ProtoError, Protobuf}; use crate::types::address::{Address, InternalAddress}; @@ -117,40 +118,69 @@ pub enum Error { ReceivingToken(String), } +// This is needed to use `ibc::Handler::Error` with `IbcActions` in +// `tx_prelude/src/ibc.rs` +impl From for storage_api::Error { + fn from(err: Error) -> Self { + storage_api::Error::new(err) + } +} + /// for handling IBC modules pub type Result = std::result::Result; /// IBC trait to be implemented in integration that can read and write pub trait IbcActions { + /// IBC action error + type Error: From; + /// Read IBC-related data - fn read_ibc_data(&self, key: &Key) -> Option>; + fn read_ibc_data( + &self, + key: &Key, + ) -> std::result::Result>, Self::Error>; /// Write IBC-related data - fn write_ibc_data(&self, key: &Key, data: impl AsRef<[u8]>); + fn write_ibc_data( + &mut self, + key: &Key, + data: impl AsRef<[u8]>, + ) -> std::result::Result<(), Self::Error>; /// Delete IBC-related data - fn delete_ibc_data(&self, key: &Key); + fn delete_ibc_data( + &mut self, + key: &Key, + ) -> std::result::Result<(), Self::Error>; /// Emit an IBC event - fn emit_ibc_event(&self, event: AnomaIbcEvent); + fn emit_ibc_event( + &mut self, + event: AnomaIbcEvent, + ) -> std::result::Result<(), Self::Error>; /// Transfer token fn transfer_token( - &self, + &mut self, src: &Address, dest: &Address, token: &Address, amount: Amount, - ); + ) -> std::result::Result<(), Self::Error>; /// Get the current height of this chain - fn get_height(&self) -> BlockHeight; + fn get_height(&self) -> std::result::Result; /// Get the current time of the tendermint header of this chain - fn get_header_time(&self) -> Rfc3339String; + fn get_header_time( + &self, + ) -> std::result::Result; /// dispatch according to ICS26 routing - fn dispatch(&self, tx_data: &[u8]) -> Result<()> { + fn dispatch_ibc_action( + &mut self, + tx_data: &[u8], + ) -> std::result::Result<(), Self::Error> { let ibc_msg = IbcMessage::decode(tx_data).map_err(Error::IbcData)?; match &ibc_msg.0 { Ics26Envelope::Ics2Msg(ics02_msg) => match ics02_msg { @@ -200,14 +230,17 @@ pub trait IbcActions { } /// Create a new client - fn create_client(&self, msg: &MsgCreateAnyClient) -> Result<()> { + fn create_client( + &mut self, + msg: &MsgCreateAnyClient, + ) -> std::result::Result<(), Self::Error> { let counter_key = storage::client_counter_key(); let counter = self.get_and_inc_counter(&counter_key)?; let client_type = msg.client_state.client_type(); let client_id = client_id(client_type, counter)?; // client type let client_type_key = storage::client_type_key(&client_id); - self.write_ibc_data(&client_type_key, client_type.as_str().as_bytes()); + self.write_ibc_data(&client_type_key, client_type.as_str().as_bytes())?; // client state let client_state_key = storage::client_state_key(&client_id); self.write_ibc_data( @@ -215,7 +248,7 @@ pub trait IbcActions { msg.client_state .encode_vec() .expect("encoding shouldn't fail"), - ); + )?; // consensus state let height = msg.client_state.latest_height(); let consensus_state_key = @@ -225,29 +258,33 @@ pub trait IbcActions { msg.consensus_state .encode_vec() .expect("encoding shouldn't fail"), - ); + )?; self.set_client_update_time(&client_id)?; let event = make_create_client_event(&client_id, msg) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Update a client - fn update_client(&self, msg: &MsgUpdateAnyClient) -> Result<()> { + fn update_client( + &mut self, + msg: &MsgUpdateAnyClient, + ) -> std::result::Result<(), Self::Error> { // get and update the client let client_id = msg.client_id.clone(); let client_state_key = storage::client_state_key(&client_id); - let value = self.read_ibc_data(&client_state_key).ok_or_else(|| { - Error::Client(format!( - "The client to be updated doesn't exist: ID {}", - client_id - )) - })?; + let value = + self.read_ibc_data(&client_state_key)?.ok_or_else(|| { + Error::Client(format!( + "The client to be updated doesn't exist: ID {}", + client_id + )) + })?; let client_state = AnyClientState::decode_vec(&value).map_err(Error::Decoding)?; let (new_client_state, new_consensus_state) = @@ -259,7 +296,7 @@ pub trait IbcActions { new_client_state .encode_vec() .expect("encoding shouldn't fail"), - ); + )?; let consensus_state_key = storage::consensus_state_key(&client_id, height); self.write_ibc_data( @@ -267,20 +304,23 @@ pub trait IbcActions { new_consensus_state .encode_vec() .expect("encoding shouldn't fail"), - ); + )?; self.set_client_update_time(&client_id)?; let event = make_update_client_event(&client_id, msg) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Upgrade a client - fn upgrade_client(&self, msg: &MsgUpgradeAnyClient) -> Result<()> { + fn upgrade_client( + &mut self, + msg: &MsgUpgradeAnyClient, + ) -> std::result::Result<(), Self::Error> { let client_state_key = storage::client_state_key(&msg.client_id); let height = msg.client_state.latest_height(); let consensus_state_key = @@ -290,26 +330,29 @@ pub trait IbcActions { msg.client_state .encode_vec() .expect("encoding shouldn't fail"), - ); + )?; self.write_ibc_data( &consensus_state_key, msg.consensus_state .encode_vec() .expect("encoding shouldn't fail"), - ); + )?; self.set_client_update_time(&msg.client_id)?; let event = make_upgrade_client_event(&msg.client_id, msg) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Initialize a connection for ConnectionOpenInit - fn init_connection(&self, msg: &MsgConnectionOpenInit) -> Result<()> { + fn init_connection( + &mut self, + msg: &MsgConnectionOpenInit, + ) -> std::result::Result<(), Self::Error> { let counter_key = storage::connection_counter_key(); let counter = self.get_and_inc_counter(&counter_key)?; // new connection @@ -319,18 +362,21 @@ pub trait IbcActions { self.write_ibc_data( &conn_key, connection.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_init_connection_event(&conn_id, msg) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Initialize a connection for ConnectionOpenTry - fn try_connection(&self, msg: &MsgConnectionOpenTry) -> Result<()> { + fn try_connection( + &mut self, + msg: &MsgConnectionOpenTry, + ) -> std::result::Result<(), Self::Error> { let counter_key = storage::connection_counter_key(); let counter = self.get_and_inc_counter(&counter_key)?; // new connection @@ -340,20 +386,23 @@ pub trait IbcActions { self.write_ibc_data( &conn_key, connection.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_try_connection_event(&conn_id, msg) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Open the connection for ConnectionOpenAck - fn ack_connection(&self, msg: &MsgConnectionOpenAck) -> Result<()> { + fn ack_connection( + &mut self, + msg: &MsgConnectionOpenAck, + ) -> std::result::Result<(), Self::Error> { let conn_key = storage::connection_key(&msg.connection_id); - let value = self.read_ibc_data(&conn_key).ok_or_else(|| { + let value = self.read_ibc_data(&conn_key)?.ok_or_else(|| { Error::Connection(format!( "The connection to be opened doesn't exist: ID {}", msg.connection_id @@ -369,18 +418,21 @@ pub trait IbcActions { self.write_ibc_data( &conn_key, connection.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_ack_connection_event(msg).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Open the connection for ConnectionOpenConfirm - fn confirm_connection(&self, msg: &MsgConnectionOpenConfirm) -> Result<()> { + fn confirm_connection( + &mut self, + msg: &MsgConnectionOpenConfirm, + ) -> std::result::Result<(), Self::Error> { let conn_key = storage::connection_key(&msg.connection_id); - let value = self.read_ibc_data(&conn_key).ok_or_else(|| { + let value = self.read_ibc_data(&conn_key)?.ok_or_else(|| { Error::Connection(format!( "The connection to be opend doesn't exist: ID {}", msg.connection_id @@ -392,16 +444,19 @@ pub trait IbcActions { self.write_ibc_data( &conn_key, connection.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_confirm_connection_event(msg).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Initialize a channel for ChannelOpenInit - fn init_channel(&self, msg: &MsgChannelOpenInit) -> Result<()> { + fn init_channel( + &mut self, + msg: &MsgChannelOpenInit, + ) -> std::result::Result<(), Self::Error> { self.bind_port(&msg.port_id)?; let counter_key = storage::channel_counter_key(); let counter = self.get_and_inc_counter(&counter_key)?; @@ -412,18 +467,21 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, msg.channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_init_channel_event(&channel_id, msg) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Initialize a channel for ChannelOpenTry - fn try_channel(&self, msg: &MsgChannelOpenTry) -> Result<()> { + fn try_channel( + &mut self, + msg: &MsgChannelOpenTry, + ) -> std::result::Result<(), Self::Error> { self.bind_port(&msg.port_id)?; let counter_key = storage::channel_counter_key(); let counter = self.get_and_inc_counter(&counter_key)?; @@ -434,22 +492,25 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, msg.channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_try_channel_event(&channel_id, msg) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Open the channel for ChannelOpenAck - fn ack_channel(&self, msg: &MsgChannelOpenAck) -> Result<()> { + fn ack_channel( + &mut self, + msg: &MsgChannelOpenAck, + ) -> std::result::Result<(), Self::Error> { let port_channel_id = port_channel_id(msg.port_id.clone(), msg.channel_id.clone()); let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key).ok_or_else(|| { + let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { Error::Channel(format!( "The channel to be opened doesn't exist: Port/Channel {}", port_channel_id @@ -463,20 +524,23 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_ack_channel_event(msg).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Open the channel for ChannelOpenConfirm - fn confirm_channel(&self, msg: &MsgChannelOpenConfirm) -> Result<()> { + fn confirm_channel( + &mut self, + msg: &MsgChannelOpenConfirm, + ) -> std::result::Result<(), Self::Error> { let port_channel_id = port_channel_id(msg.port_id.clone(), msg.channel_id.clone()); let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key).ok_or_else(|| { + let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { Error::Channel(format!( "The channel to be opened doesn't exist: Port/Channel {}", port_channel_id @@ -488,20 +552,23 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_open_confirm_channel_event(msg).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Close the channel for ChannelCloseInit - fn close_init_channel(&self, msg: &MsgChannelCloseInit) -> Result<()> { + fn close_init_channel( + &mut self, + msg: &MsgChannelCloseInit, + ) -> std::result::Result<(), Self::Error> { let port_channel_id = port_channel_id(msg.port_id.clone(), msg.channel_id.clone()); let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key).ok_or_else(|| { + let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { Error::Channel(format!( "The channel to be closed doesn't exist: Port/Channel {}", port_channel_id @@ -513,23 +580,23 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_close_init_channel_event(msg).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Close the channel for ChannelCloseConfirm fn close_confirm_channel( - &self, + &mut self, msg: &MsgChannelCloseConfirm, - ) -> Result<()> { + ) -> std::result::Result<(), Self::Error> { let port_channel_id = port_channel_id(msg.port_id.clone(), msg.channel_id.clone()); let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key).ok_or_else(|| { + let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { Error::Channel(format!( "The channel to be closed doesn't exist: Port/Channel {}", port_channel_id @@ -541,22 +608,22 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; let event = make_close_confirm_channel_event(msg).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Send a packet fn send_packet( - &self, + &mut self, port_channel_id: PortChannelId, data: Vec, timeout_height: Height, timeout_timestamp: Timestamp, - ) -> Result<()> { + ) -> std::result::Result<(), Self::Error> { // get and increment the next sequence send let seq_key = storage::next_sequence_send_key(&port_channel_id); let sequence = self.get_and_inc_sequence(&seq_key)?; @@ -564,7 +631,7 @@ pub trait IbcActions { // get the channel for the destination info. let channel_key = storage::channel_key(&port_channel_id); let channel = self - .read_ibc_data(&channel_key) + .read_ibc_data(&channel_key)? .expect("cannot get the channel to be closed"); let channel = ChannelEnd::decode_vec(&channel).expect("cannot get the channel"); @@ -595,16 +662,19 @@ pub trait IbcActions { commitment .encode(&mut commitment_bytes) .expect("encoding shouldn't fail"); - self.write_ibc_data(&commitment_key, commitment_bytes); + self.write_ibc_data(&commitment_key, commitment_bytes)?; let event = make_send_packet_event(packet).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Receive a packet - fn receive_packet(&self, msg: &MsgRecvPacket) -> Result<()> { + fn receive_packet( + &mut self, + msg: &MsgRecvPacket, + ) -> std::result::Result<(), Self::Error> { // check the packet data if let Ok(data) = serde_json::from_slice(&msg.packet.data) { self.receive_token(&msg.packet, &data)?; @@ -616,7 +686,7 @@ pub trait IbcActions { &msg.packet.destination_channel, msg.packet.sequence, ); - self.write_ibc_data(&receipt_key, PacketReceipt::default().as_bytes()); + self.write_ibc_data(&receipt_key, PacketReceipt::default().as_bytes())?; // store the ack let ack_key = storage::ack_key( @@ -625,7 +695,7 @@ pub trait IbcActions { msg.packet.sequence, ); let ack = PacketAck::default().encode_to_vec(); - self.write_ibc_data(&ack_key, ack.clone()); + self.write_ibc_data(&ack_key, ack.clone())?; // increment the next sequence receive let port_channel_id = port_channel_id( @@ -638,28 +708,34 @@ pub trait IbcActions { let event = make_write_ack_event(msg.packet.clone(), ack) .try_into() .unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Receive a acknowledgement - fn acknowledge_packet(&self, msg: &MsgAcknowledgement) -> Result<()> { + fn acknowledge_packet( + &mut self, + msg: &MsgAcknowledgement, + ) -> std::result::Result<(), Self::Error> { let commitment_key = storage::commitment_key( &msg.packet.source_port, &msg.packet.source_channel, msg.packet.sequence, ); - self.delete_ibc_data(&commitment_key); + self.delete_ibc_data(&commitment_key)?; let event = make_ack_event(msg.packet.clone()).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Receive a timeout - fn timeout_packet(&self, msg: &MsgTimeout) -> Result<()> { + fn timeout_packet( + &mut self, + msg: &MsgTimeout, + ) -> std::result::Result<(), Self::Error> { // check the packet data if let Ok(data) = serde_json::from_slice(&msg.packet.data) { self.refund_token(&msg.packet, &data)?; @@ -671,7 +747,7 @@ pub trait IbcActions { &msg.packet.source_channel, msg.packet.sequence, ); - self.delete_ibc_data(&commitment_key); + self.delete_ibc_data(&commitment_key)?; // close the channel let port_channel_id = port_channel_id( @@ -679,7 +755,7 @@ pub trait IbcActions { msg.packet.source_channel.clone(), ); let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key).ok_or_else(|| { + let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { Error::Channel(format!( "The channel to be closed doesn't exist: Port/Channel {}", port_channel_id @@ -692,17 +768,20 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; } let event = make_timeout_event(msg.packet.clone()).try_into().unwrap(); - self.emit_ibc_event(event); + self.emit_ibc_event(event)?; Ok(()) } /// Receive a timeout for TimeoutOnClose - fn timeout_on_close_packet(&self, msg: &MsgTimeoutOnClose) -> Result<()> { + fn timeout_on_close_packet( + &mut self, + msg: &MsgTimeoutOnClose, + ) -> std::result::Result<(), Self::Error> { // check the packet data if let Ok(data) = serde_json::from_slice(&msg.packet.data) { self.refund_token(&msg.packet, &data)?; @@ -714,7 +793,7 @@ pub trait IbcActions { &msg.packet.source_channel, msg.packet.sequence, ); - self.delete_ibc_data(&commitment_key); + self.delete_ibc_data(&commitment_key)?; // close the channel let port_channel_id = port_channel_id( @@ -722,7 +801,7 @@ pub trait IbcActions { msg.packet.source_channel.clone(), ); let channel_key = storage::channel_key(&port_channel_id); - let value = self.read_ibc_data(&channel_key).ok_or_else(|| { + let value = self.read_ibc_data(&channel_key)?.ok_or_else(|| { Error::Channel(format!( "The channel to be closed doesn't exist: Port/Channel {}", port_channel_id @@ -735,15 +814,18 @@ pub trait IbcActions { self.write_ibc_data( &channel_key, channel.encode_vec().expect("encoding shouldn't fail"), - ); + )?; } Ok(()) } /// Set the timestamp and the height for the client update - fn set_client_update_time(&self, client_id: &ClientId) -> Result<()> { - let time = Time::parse_from_rfc3339(&self.get_header_time().0) + fn set_client_update_time( + &mut self, + client_id: &ClientId, + ) -> std::result::Result<(), Self::Error> { + let time = Time::parse_from_rfc3339(&self.get_header_time()?.0) .map_err(|e| { Error::Time(format!("The time of the header is invalid: {}", e)) })?; @@ -751,36 +833,42 @@ pub trait IbcActions { self.write_ibc_data( &key, time.encode_vec().expect("encoding shouldn't fail"), - ); + )?; // the revision number is always 0 - let height = Height::new(0, self.get_height().0); + let height = Height::new(0, self.get_height()?.0); let height_key = storage::client_update_height_key(client_id); // write the current height as u64 self.write_ibc_data( &height_key, height.encode_vec().expect("Encoding shouldn't fail"), - ); + )?; Ok(()) } /// Get and increment the counter - fn get_and_inc_counter(&self, key: &Key) -> Result { - let value = self.read_ibc_data(key).ok_or_else(|| { + fn get_and_inc_counter( + &mut self, + key: &Key, + ) -> std::result::Result { + let value = self.read_ibc_data(key)?.ok_or_else(|| { Error::Counter(format!("The counter doesn't exist: {}", key)) })?; let value: [u8; 8] = value.try_into().map_err(|_| { Error::Counter(format!("The counter value wasn't u64: Key {}", key)) })?; let counter = u64::from_be_bytes(value); - self.write_ibc_data(key, (counter + 1).to_be_bytes()); + self.write_ibc_data(key, (counter + 1).to_be_bytes())?; Ok(counter) } /// Get and increment the sequence - fn get_and_inc_sequence(&self, key: &Key) -> Result { - let index = match self.read_ibc_data(key) { + fn get_and_inc_sequence( + &mut self, + key: &Key, + ) -> std::result::Result { + let index = match self.read_ibc_data(key)? { Some(v) => { let index: [u8; 8] = v.try_into().map_err(|_| { Error::Sequence(format!( @@ -793,29 +881,35 @@ pub trait IbcActions { // when the sequence has never been used, returns the initial value None => 1, }; - self.write_ibc_data(key, (index + 1).to_be_bytes()); + self.write_ibc_data(key, (index + 1).to_be_bytes())?; Ok(index.into()) } /// Bind a new port - fn bind_port(&self, port_id: &PortId) -> Result<()> { + fn bind_port( + &mut self, + port_id: &PortId, + ) -> std::result::Result<(), Self::Error> { let port_key = storage::port_key(port_id); - match self.read_ibc_data(&port_key) { + match self.read_ibc_data(&port_key)? { Some(_) => {} None => { // create a new capability and claim it let index_key = storage::capability_index_key(); let cap_index = self.get_and_inc_counter(&index_key)?; - self.write_ibc_data(&port_key, cap_index.to_be_bytes()); + self.write_ibc_data(&port_key, cap_index.to_be_bytes())?; let cap_key = storage::capability_key(cap_index); - self.write_ibc_data(&cap_key, port_id.as_bytes()); + self.write_ibc_data(&cap_key, port_id.as_bytes())?; } } Ok(()) } /// Send the specified token by escrowing or burning - fn send_token(&self, msg: &MsgTransfer) -> Result<()> { + fn send_token( + &mut self, + msg: &MsgTransfer, + ) -> std::result::Result<(), Self::Error> { let data = FungibleTokenPacketData::from(msg.clone()); let source = Address::decode(data.sender.clone()).map_err(|e| { Error::SendingToken(format!( @@ -852,7 +946,7 @@ pub trait IbcActions { if data.denomination.starts_with(&prefix) { // sink zone let burn = Address::Internal(InternalAddress::IbcBurn); - self.transfer_token(&source, &burn, &token, amount); + self.transfer_token(&source, &burn, &token, amount)?; } else { // source zone let escrow = @@ -860,7 +954,7 @@ pub trait IbcActions { msg.source_port.to_string(), msg.source_channel.to_string(), )); - self.transfer_token(&source, &escrow, &token, amount); + self.transfer_token(&source, &escrow, &token, amount)?; } // send a packet @@ -880,10 +974,10 @@ pub trait IbcActions { /// Receive the specified token by unescrowing or minting fn receive_token( - &self, + &mut self, packet: &Packet, data: &FungibleTokenPacketData, - ) -> Result<()> { + ) -> std::result::Result<(), Self::Error> { let dest = Address::decode(data.receiver.clone()).map_err(|e| { Error::ReceivingToken(format!( "Invalid receiver address: receiver {}, error {}", @@ -922,21 +1016,21 @@ pub trait IbcActions { packet.destination_port.to_string(), packet.destination_channel.to_string(), )); - self.transfer_token(&escrow, &dest, &token, amount); + self.transfer_token(&escrow, &dest, &token, amount)?; } else { // mint the token because the sender chain is the source let mint = Address::Internal(InternalAddress::IbcMint); - self.transfer_token(&mint, &dest, &token, amount); + self.transfer_token(&mint, &dest, &token, amount)?; } Ok(()) } /// Refund the specified token by unescrowing or minting fn refund_token( - &self, + &mut self, packet: &Packet, data: &FungibleTokenPacketData, - ) -> Result<()> { + ) -> std::result::Result<(), Self::Error> { let dest = Address::decode(data.sender.clone()).map_err(|e| { Error::ReceivingToken(format!( "Invalid sender address: sender {}, error {}", @@ -971,7 +1065,7 @@ pub trait IbcActions { if data.denomination.starts_with(&prefix) { // mint the token because the sender chain is the sink zone let mint = Address::Internal(InternalAddress::IbcMint); - self.transfer_token(&mint, &dest, &token, amount); + self.transfer_token(&mint, &dest, &token, amount)?; } else { // unescrow the token because the sender chain is the source zone let escrow = @@ -979,7 +1073,7 @@ pub trait IbcActions { packet.source_port.to_string(), packet.source_channel.to_string(), )); - self.transfer_token(&escrow, &dest, &token, amount); + self.transfer_token(&escrow, &dest, &token, amount)?; } Ok(()) } diff --git a/shared/src/ledger/ibc/vp/channel.rs b/shared/src/ledger/ibc/vp/channel.rs index 354899f31d..08ed322452 100644 --- a/shared/src/ledger/ibc/vp/channel.rs +++ b/shared/src/ledger/ibc/vp/channel.rs @@ -45,7 +45,7 @@ use crate::ibc::core::ics24_host::identifier::{ use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; use crate::ibc::proofs::Proofs; use crate::ibc::timestamp::Timestamp; -use crate::ledger::native_vp::Error as NativeVpError; +use crate::ledger::native_vp::{Error as NativeVpError, VpEnv}; use crate::ledger::parameters; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::tendermint::Time; @@ -490,7 +490,7 @@ where } fn get_sequence_pre(&self, key: &Key) -> Result { - match self.ctx.read_pre(key)? { + match self.ctx.read_bytes_pre(key)? { Some(value) => { // As ibc-go, u64 like a counter is encoded with big-endian let index: [u8; 8] = value.try_into().map_err(|_| { @@ -508,7 +508,7 @@ where } fn get_sequence(&self, key: &Key) -> Result { - match self.ctx.read_post(key)? { + match self.ctx.read_bytes_post(key)? { Some(value) => { // As ibc-go, u64 like a counter is encoded with big-endian let index: [u8; 8] = value.try_into().map_err(|_| { @@ -547,7 +547,7 @@ where port_channel_id: &PortChannelId, ) -> Result { let key = channel_key(port_channel_id); - match self.ctx.read_pre(&key) { + match self.ctx.read_bytes_pre(&key) { Ok(Some(value)) => ChannelEnd::decode_vec(&value).map_err(|e| { Error::InvalidChannel(format!( "Decoding the channel failed: Port/Channel {}, {}", @@ -594,7 +594,7 @@ where key: &(PortId, ChannelId, Sequence), ) -> Result { let key = commitment_key(&key.0, &key.1, key.2); - match self.ctx.read_pre(&key)? { + match self.ctx.read_bytes_pre(&key)? { Some(value) => String::decode(&value[..]).map_err(|e| { Error::InvalidPacketInfo(format!( "Decoding the prior commitment failed: {}", @@ -613,7 +613,7 @@ where client_id: &ClientId, ) -> Result { let key = client_update_timestamp_key(client_id); - match self.ctx.read_pre(&key)? { + match self.ctx.read_bytes_pre(&key)? { Some(value) => { let time = Time::decode_vec(&value).map_err(|_| { Error::InvalidTimestamp(format!( @@ -635,7 +635,7 @@ where client_id: &ClientId, ) -> Result { let key = client_update_height_key(client_id); - match self.ctx.read_pre(&key)? { + match self.ctx.read_bytes_pre(&key)? { Some(value) => Height::decode_vec(&value).map_err(|_| { Error::InvalidHeight(format!( "Height conversion failed: ID {}", @@ -671,7 +671,7 @@ where channel_id: port_channel_id.1.clone(), }; let key = channel_key(&port_channel_id); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => ChannelEnd::decode_vec(&value) .map_err(|_| Ics04Error::implementation_specific()), Ok(None) => Err(Ics04Error::channel_not_found( @@ -818,7 +818,7 @@ where key: &(PortId, ChannelId, Sequence), ) -> Ics04Result { let commitment_key = commitment_key(&key.0, &key.1, key.2); - match self.ctx.read_post(&commitment_key) { + match self.ctx.read_bytes_post(&commitment_key) { Ok(Some(value)) => String::decode(&value[..]) .map_err(|_| Ics04Error::implementation_specific()), Ok(None) => Err(Ics04Error::packet_commitment_not_found(key.2)), @@ -832,7 +832,7 @@ where ) -> Ics04Result { let receipt_key = receipt_key(&key.0, &key.1, key.2); let expect = PacketReceipt::default().as_bytes().to_vec(); - match self.ctx.read_post(&receipt_key) { + match self.ctx.read_bytes_post(&receipt_key) { Ok(Some(v)) if v == expect => Ok(Receipt::Ok), _ => Err(Ics04Error::packet_receipt_not_found(key.2)), } @@ -844,7 +844,7 @@ where key: &(PortId, ChannelId, Sequence), ) -> Ics04Result { let ack_key = ack_key(&key.0, &key.1, key.2); - match self.ctx.read_post(&ack_key) { + match self.ctx.read_bytes_post(&ack_key) { Ok(Some(_)) => Ok(PacketAck::default().to_string()), Ok(None) => Err(Ics04Error::packet_commitment_not_found(key.2)), Err(_) => Err(Ics04Error::implementation_specific()), @@ -881,7 +881,7 @@ where height: Height, ) -> Ics04Result { let key = client_update_timestamp_key(client_id); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => { let time = Time::decode_vec(&value) .map_err(|_| Ics04Error::implementation_specific())?; @@ -901,7 +901,7 @@ where height: Height, ) -> Ics04Result { let key = client_update_height_key(client_id); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => Height::decode_vec(&value) .map_err(|_| Ics04Error::implementation_specific()), Ok(None) => Err(Ics04Error::processed_height_not_found( diff --git a/shared/src/ledger/ibc/vp/client.rs b/shared/src/ledger/ibc/vp/client.rs index 4b89e1ce30..453006f356 100644 --- a/shared/src/ledger/ibc/vp/client.rs +++ b/shared/src/ledger/ibc/vp/client.rs @@ -31,6 +31,7 @@ use crate::ibc::core::ics04_channel::context::ChannelReader; use crate::ibc::core::ics23_commitment::commitment::CommitmentRoot; use crate::ibc::core::ics24_host::identifier::ClientId; use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; +use crate::ledger::native_vp::VpEnv; use crate::ledger::storage::{self, StorageHasher}; use crate::tendermint_proto::Protobuf; use crate::types::ibc::data::{Error as IbcDataError, IbcMessage}; @@ -378,7 +379,7 @@ where fn client_state_pre(&self, client_id: &ClientId) -> Result { let key = client_state_key(client_id); - match self.ctx.read_pre(&key) { + match self.ctx.read_bytes_pre(&key) { Ok(Some(value)) => { AnyClientState::decode_vec(&value).map_err(|e| { Error::InvalidClient(format!( @@ -410,7 +411,7 @@ where { fn client_type(&self, client_id: &ClientId) -> Ics02Result { let key = client_type_key(client_id); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => { let type_str = std::str::from_utf8(&value) .map_err(|_| Ics02Error::implementation_specific())?; @@ -427,7 +428,7 @@ where client_id: &ClientId, ) -> Ics02Result { let key = client_state_key(client_id); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => AnyClientState::decode_vec(&value) .map_err(|_| Ics02Error::implementation_specific()), Ok(None) => Err(Ics02Error::client_not_found(client_id.clone())), @@ -441,7 +442,7 @@ where height: Height, ) -> Ics02Result { let key = consensus_state_key(client_id, height); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => AnyConsensusState::decode_vec(&value) .map_err(|_| Ics02Error::implementation_specific()), Ok(None) => Err(Ics02Error::consensus_state_not_found( @@ -459,7 +460,7 @@ where height: Height, ) -> Ics02Result> { let key = consensus_state_key(client_id, height); - match self.ctx.read_pre(&key) { + match self.ctx.read_bytes_pre(&key) { Ok(Some(value)) => { let cs = AnyConsensusState::decode_vec(&value) .map_err(|_| Ics02Error::implementation_specific())?; diff --git a/shared/src/ledger/ibc/vp/connection.rs b/shared/src/ledger/ibc/vp/connection.rs index 2e721bed08..0130dd3b84 100644 --- a/shared/src/ledger/ibc/vp/connection.rs +++ b/shared/src/ledger/ibc/vp/connection.rs @@ -27,6 +27,7 @@ use crate::ibc::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOp use crate::ibc::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; use crate::ibc::core::ics23_commitment::commitment::CommitmentPrefix; use crate::ibc::core::ics24_host::identifier::{ClientId, ConnectionId}; +use crate::ledger::native_vp::VpEnv; use crate::ledger::storage::{self, StorageHasher}; use crate::tendermint_proto::Protobuf; use crate::types::ibc::data::{Error as IbcDataError, IbcMessage}; @@ -324,7 +325,7 @@ where conn_id: &ConnectionId, ) -> Result { let key = connection_key(conn_id); - match self.ctx.read_pre(&key) { + match self.ctx.read_bytes_pre(&key) { Ok(Some(value)) => ConnectionEnd::decode_vec(&value).map_err(|e| { Error::InvalidConnection(format!( "Decoding the connection failed: {}", @@ -356,7 +357,7 @@ where conn_id: &ConnectionId, ) -> Ics03Result { let key = connection_key(conn_id); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => ConnectionEnd::decode_vec(&value) .map_err(|_| Ics03Error::implementation_specific()), Ok(None) => Err(Ics03Error::connection_not_found(conn_id.clone())), diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index b6bd15e43b..059862144d 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -17,7 +17,7 @@ pub use token::{Error as IbcTokenError, IbcToken}; use super::storage::{client_id, ibc_prefix, is_client_counter_key, IbcPrefix}; use crate::ibc::core::ics02_client::context::ClientReader; use crate::ibc::events::IbcEvent; -use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::ledger::native_vp::{self, Ctx, NativeVp, VpEnv}; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::proto::SignedTxData; use crate::types::address::{Address, InternalAddress}; @@ -184,7 +184,7 @@ where } fn read_counter_pre(&self, key: &Key) -> Result { - match self.ctx.read_pre(key) { + match self.ctx.read_bytes_pre(key) { Ok(Some(value)) => { // As ibc-go, u64 like a counter is encoded with big-endian let counter: [u8; 8] = value.try_into().map_err(|_| { @@ -205,7 +205,7 @@ where } fn read_counter(&self, key: &Key) -> Result { - match self.ctx.read_post(key) { + match self.ctx.read_bytes_post(key) { Ok(Some(value)) => { // As ibc-go, u64 like a counter is encoded with big-endian let counter: [u8; 8] = value.try_into().map_err(|_| { @@ -375,6 +375,8 @@ mod tests { use crate::vm::wasm; use crate::types::storage::{BlockHash, BlockHeight}; + const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); + fn get_client_id() -> ClientId { ClientId::from_str("test_client").expect("Creating a client ID failed") } @@ -568,13 +570,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); - let mut keys_changed = BTreeSet::new(); let client_state_key = client_state_key(&get_client_id()); keys_changed.insert(client_state_key); let verifiers = BTreeSet::new(); + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; // this should return true because state has been stored @@ -598,13 +608,22 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); let client_state_key = client_state_key(&get_client_id()); keys_changed.insert(client_state_key); let verifiers = BTreeSet::new(); + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; // this should fail because no state is stored @@ -668,13 +687,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(client_state_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; // this should return true because state has been stored assert!( @@ -717,13 +744,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(conn_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; // this should return true because state has been stored assert!( @@ -763,13 +798,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(conn_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; // this should fail because no client exists let result = ibc @@ -835,13 +878,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(conn_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; // this should return true because state has been stored assert!( @@ -913,13 +964,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(conn_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -978,13 +1037,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(conn_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1029,13 +1096,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(channel_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1099,13 +1174,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(channel_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1177,13 +1260,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(channel_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1250,13 +1341,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(channel_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1281,13 +1380,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(port_key(&get_port_id())); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1313,13 +1420,22 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); let cap_key = capability_key(index); keys_changed.insert(cap_key); let verifiers = BTreeSet::new(); + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( @@ -1390,13 +1506,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(seq_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1469,13 +1593,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(seq_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1553,13 +1685,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(seq_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1633,13 +1773,21 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); let mut keys_changed = BTreeSet::new(); keys_changed.insert(commitment_key); let verifiers = BTreeSet::new(); - + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( ibc.validate_tx( @@ -1717,12 +1865,20 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); - let mut keys_changed = BTreeSet::new(); keys_changed.insert(receipt_key); let verifiers = BTreeSet::new(); + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( @@ -1757,12 +1913,20 @@ mod tests { let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); - let ctx = Ctx::new(&storage, &write_log, &tx, gas_meter, vp_wasm_cache); - let mut keys_changed = BTreeSet::new(); keys_changed.insert(ack_key); let verifiers = BTreeSet::new(); + let ctx = Ctx::new( + &ADDRESS, + &storage, + &write_log, + &tx, + gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache, + ); let ibc = Ibc { ctx }; assert!( diff --git a/shared/src/ledger/ibc/vp/port.rs b/shared/src/ledger/ibc/vp/port.rs index 2819cdeab5..073f89147b 100644 --- a/shared/src/ledger/ibc/vp/port.rs +++ b/shared/src/ledger/ibc/vp/port.rs @@ -13,6 +13,7 @@ use crate::ibc::core::ics05_port::capabilities::{Capability, CapabilityName}; use crate::ibc::core::ics05_port::context::{CapabilityReader, PortReader}; use crate::ibc::core::ics05_port::error::Error as Ics05Error; use crate::ibc::core::ics24_host::identifier::PortId; +use crate::ledger::native_vp::VpEnv; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::types::storage::Key; use crate::vm::WasmCacheAccess; @@ -122,7 +123,7 @@ where fn get_port_by_capability(&self, cap: &Capability) -> Result { let key = capability_key(cap.index()); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => { let id = std::str::from_utf8(&value).map_err(|e| { Error::InvalidPort(format!( @@ -161,7 +162,7 @@ where port_id: &PortId, ) -> Ics05Result<(Self::ModuleId, Capability)> { let key = port_key(port_id); - match self.ctx.read_post(&key) { + match self.ctx.read_bytes_post(&key) { Ok(Some(value)) => { let index: [u8; 8] = value .try_into() diff --git a/shared/src/ledger/ibc/vp/token.rs b/shared/src/ledger/ibc/vp/token.rs index f56382b360..06181bdd45 100644 --- a/shared/src/ledger/ibc/vp/token.rs +++ b/shared/src/ledger/ibc/vp/token.rs @@ -10,7 +10,7 @@ use crate::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::Msg use crate::ibc::core::ics04_channel::msgs::PacketMsg; use crate::ibc::core::ics04_channel::packet::Packet; use crate::ibc::core::ics26_routing::msgs::Ics26Envelope; -use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::ledger::native_vp::{self, Ctx, NativeVp, VpEnv}; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::proto::SignedTxData; use crate::types::address::{Address, Error as AddressError, InternalAddress}; @@ -136,9 +136,10 @@ where // sink zone let target = Address::Internal(InternalAddress::IbcBurn); let target_key = token::balance_key(&token, &target); - let post = - try_decode_token_amount(self.ctx.read_temp(&target_key)?)? - .unwrap_or_default(); + let post = try_decode_token_amount( + self.ctx.read_bytes_temp(&target_key)?, + )? + .unwrap_or_default(); // the previous balance of the burn address should be zero post.change() } else { @@ -149,11 +150,13 @@ where msg.source_channel.to_string(), )); let target_key = token::balance_key(&token, &target); - let pre = try_decode_token_amount(self.ctx.read_pre(&target_key)?)? - .unwrap_or_default(); - let post = - try_decode_token_amount(self.ctx.read_post(&target_key)?)? + let pre = + try_decode_token_amount(self.ctx.read_bytes_pre(&target_key)?)? .unwrap_or_default(); + let post = try_decode_token_amount( + self.ctx.read_bytes_post(&target_key)?, + )? + .unwrap_or_default(); post.change() - pre.change() }; @@ -189,19 +192,22 @@ where packet.destination_channel.to_string(), )); let source_key = token::balance_key(&token, &source); - let pre = try_decode_token_amount(self.ctx.read_pre(&source_key)?)? - .unwrap_or_default(); - let post = - try_decode_token_amount(self.ctx.read_post(&source_key)?)? + let pre = + try_decode_token_amount(self.ctx.read_bytes_pre(&source_key)?)? .unwrap_or_default(); + let post = try_decode_token_amount( + self.ctx.read_bytes_post(&source_key)?, + )? + .unwrap_or_default(); pre.change() - post.change() } else { // the sender is the source let source = Address::Internal(InternalAddress::IbcMint); let source_key = token::balance_key(&token, &source); - let post = - try_decode_token_amount(self.ctx.read_temp(&source_key)?)? - .unwrap_or_default(); + let post = try_decode_token_amount( + self.ctx.read_bytes_temp(&source_key)?, + )? + .unwrap_or_default(); // the previous balance of the mint address should be the maximum Amount::max().change() - post.change() }; @@ -235,9 +241,10 @@ where // sink zone: mint the token for the refund let source = Address::Internal(InternalAddress::IbcMint); let source_key = token::balance_key(&token, &source); - let post = - try_decode_token_amount(self.ctx.read_temp(&source_key)?)? - .unwrap_or_default(); + let post = try_decode_token_amount( + self.ctx.read_bytes_temp(&source_key)?, + )? + .unwrap_or_default(); // the previous balance of the mint address should be the maximum Amount::max().change() - post.change() } else { @@ -248,11 +255,13 @@ where packet.source_channel.to_string(), )); let source_key = token::balance_key(&token, &source); - let pre = try_decode_token_amount(self.ctx.read_pre(&source_key)?)? - .unwrap_or_default(); - let post = - try_decode_token_amount(self.ctx.read_post(&source_key)?)? + let pre = + try_decode_token_amount(self.ctx.read_bytes_pre(&source_key)?)? .unwrap_or_default(); + let post = try_decode_token_amount( + self.ctx.read_bytes_post(&source_key)?, + )? + .unwrap_or_default(); pre.change() - post.change() }; diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 2d545f96f2..fefb32ac64 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -8,5 +8,7 @@ pub mod native_vp; pub mod parameters; pub mod pos; pub mod storage; +pub mod storage_api; pub mod treasury; +pub mod tx_env; pub mod vp_env; diff --git a/shared/src/ledger/native_vp.rs b/shared/src/ledger/native_vp.rs index faad84a0f4..2b7d41e795 100644 --- a/shared/src/ledger/native_vp.rs +++ b/shared/src/ledger/native_vp.rs @@ -3,27 +3,23 @@ use std::cell::RefCell; use std::collections::BTreeSet; -use thiserror::Error; - +use super::storage_api::{self, ResultExt, StorageRead}; +pub use super::vp_env::VpEnv; use crate::ledger::gas::VpGasMeter; use crate::ledger::storage::write_log::WriteLog; use crate::ledger::storage::{Storage, StorageHasher}; use crate::ledger::{storage, vp_env}; use crate::proto::Tx; use crate::types::address::{Address, InternalAddress}; +use crate::types::hash::Hash; use crate::types::storage::{BlockHash, BlockHeight, Epoch, Key}; use crate::vm::prefix_iter::PrefixIterators; use crate::vm::WasmCacheAccess; -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Host context error: {0}")] - ContextError(vp_env::RuntimeError), -} - -/// Native VP function result -pub type Result = std::result::Result; +/// Possible error in a native VP host function call +/// The `storage_api::Error` may wrap the `vp_env::RuntimeError` and can +/// be extended with other custom errors when using `trait VpEnv`. +pub type Error = storage_api::Error; /// A native VP module should implement its validation logic using this trait. pub trait NativeVp { @@ -54,6 +50,8 @@ where H: StorageHasher, CA: WasmCacheAccess, { + /// The address of the account that owns the VP + pub address: &'a Address, /// Storage prefix iterators. pub iterators: RefCell>, /// VP gas meter. @@ -64,6 +62,11 @@ where pub write_log: &'a WriteLog, /// The transaction code is used for signature verification pub tx: &'a Tx, + /// The storage keys that have been changed. Used for calls to `eval`. + pub keys_changed: &'a BTreeSet, + /// The verifiers whose validity predicates should be triggered. Used for + /// calls to `eval`. + pub verifiers: &'a BTreeSet
, /// VP WASM compilation cache #[cfg(feature = "wasm-runtime")] pub vp_wasm_cache: crate::vm::wasm::VpCache, @@ -72,6 +75,30 @@ where pub cache_access: std::marker::PhantomData, } +/// Read access to the prior storage (state before tx execution) via +/// [`trait@StorageRead`]. +#[derive(Debug)] +pub struct CtxPreStorageRead<'view, 'a: 'view, DB, H, CA> +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + ctx: &'view Ctx<'a, DB, H, CA>, +} + +/// Read access to the posterior storage (state after tx execution) via +/// [`trait@StorageRead`]. +#[derive(Debug)] +pub struct CtxPostStorageRead<'view, 'a: 'view, DB, H, CA> +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + ctx: &'view Ctx<'a, DB, H, CA>, +} + impl<'a, DB, H, CA> Ctx<'a, DB, H, CA> where DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, @@ -79,20 +106,27 @@ where CA: 'static + WasmCacheAccess, { /// Initialize a new context for native VP call + #[allow(clippy::too_many_arguments)] pub fn new( + address: &'a Address, storage: &'a Storage, write_log: &'a WriteLog, tx: &'a Tx, gas_meter: VpGasMeter, + keys_changed: &'a BTreeSet, + verifiers: &'a BTreeSet
, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: crate::vm::wasm::VpCache, ) -> Self { Self { + address, iterators: RefCell::new(PrefixIterators::default()), gas_meter: RefCell::new(gas_meter), storage, write_log, tx, + keys_changed, + verifiers, #[cfg(feature = "wasm-runtime")] vp_wasm_cache, #[cfg(not(feature = "wasm-runtime"))] @@ -101,153 +135,279 @@ where } /// Add a gas cost incured in a validity predicate - pub fn add_gas(&self, used_gas: u64) -> Result<()> { + pub fn add_gas(&self, used_gas: u64) -> Result<(), vp_env::RuntimeError> { vp_env::add_gas(&mut *self.gas_meter.borrow_mut(), used_gas) - .map_err(Error::ContextError) } - /// Storage read prior state (before tx execution). It will try to read from - /// the storage. - pub fn read_pre(&self, key: &Key) -> Result>> { + /// Read access to the prior storage (state before tx execution) + /// via [`trait@StorageRead`]. + pub fn pre<'view>(&'view self) -> CtxPreStorageRead<'view, 'a, DB, H, CA> { + CtxPreStorageRead { ctx: self } + } + + /// Read access to the posterior storage (state after tx execution) + /// via [`trait@StorageRead`]. + pub fn post<'view>( + &'view self, + ) -> CtxPostStorageRead<'view, 'a, DB, H, CA> { + CtxPostStorageRead { ctx: self } + } +} + +impl<'view, 'a, DB, H, CA> StorageRead<'view> + for CtxPreStorageRead<'view, 'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type PrefixIter = >::PrefixIter; + + fn read_bytes( + &self, + key: &crate::types::storage::Key, + ) -> Result>, storage_api::Error> { vp_env::read_pre( - &mut *self.gas_meter.borrow_mut(), - self.storage, - self.write_log, + &mut *self.ctx.gas_meter.borrow_mut(), + self.ctx.storage, + self.ctx.write_log, key, ) - .map_err(Error::ContextError) + .into_storage_result() } - /// Storage read posterior state (after tx execution). It will try to read - /// from the write log first and if no entry found then from the - /// storage. - pub fn read_post(&self, key: &Key) -> Result>> { + fn has_key( + &self, + key: &crate::types::storage::Key, + ) -> Result { + vp_env::has_key_pre( + &mut *self.ctx.gas_meter.borrow_mut(), + self.ctx.storage, + key, + ) + .into_storage_result() + } + + fn rev_iter_prefix( + &self, + prefix: &crate::types::storage::Key, + ) -> storage_api::Result { + self.ctx.rev_iter_prefix(prefix).into_storage_result() + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + vp_env::iter_pre_next::(&mut *self.ctx.gas_meter.borrow_mut(), iter) + .into_storage_result() + } + + // ---- Methods below are implemented in `self.ctx`, because they are + // the same in `pre/post` ---- + + fn iter_prefix( + &self, + prefix: &crate::types::storage::Key, + ) -> Result { + self.ctx.iter_prefix(prefix) + } + + fn get_chain_id(&self) -> Result { + self.ctx.get_chain_id() + } + + fn get_block_height(&self) -> Result { + self.ctx.get_block_height() + } + + fn get_block_hash(&self) -> Result { + self.ctx.get_block_hash() + } + + fn get_block_epoch(&self) -> Result { + self.ctx.get_block_epoch() + } +} + +impl<'view, 'a, DB, H, CA> StorageRead<'view> + for CtxPostStorageRead<'view, 'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type PrefixIter = >::PrefixIter; + + fn read_bytes( + &self, + key: &crate::types::storage::Key, + ) -> Result>, storage_api::Error> { vp_env::read_post( - &mut *self.gas_meter.borrow_mut(), - self.storage, - self.write_log, + &mut *self.ctx.gas_meter.borrow_mut(), + self.ctx.storage, + self.ctx.write_log, key, ) - .map_err(Error::ContextError) + .into_storage_result() } - /// Storage read temporary state (after tx execution). It will try to read - /// from only the write log. - pub fn read_temp(&self, key: &Key) -> Result>> { - vp_env::read_temp( - &mut *self.gas_meter.borrow_mut(), - self.write_log, + fn has_key( + &self, + key: &crate::types::storage::Key, + ) -> Result { + vp_env::has_key_post( + &mut *self.ctx.gas_meter.borrow_mut(), + self.ctx.storage, + self.ctx.write_log, key, ) - .map_err(Error::ContextError) + .into_storage_result() } - /// Storage `has_key` in prior state (before tx execution). It will try to - /// read from the storage. - pub fn has_key_pre(&self, key: &Key) -> Result { - vp_env::has_key_pre( + fn rev_iter_prefix( + &self, + prefix: &crate::types::storage::Key, + ) -> storage_api::Result { + self.ctx.rev_iter_prefix(prefix).into_storage_result() + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + vp_env::iter_post_next::( + &mut *self.ctx.gas_meter.borrow_mut(), + self.ctx.write_log, + iter, + ) + .into_storage_result() + } + + // ---- Methods below are implemented in `self.ctx`, because they are + // the same in `pre/post` ---- + + fn iter_prefix( + &self, + prefix: &crate::types::storage::Key, + ) -> Result { + self.ctx.iter_prefix(prefix) + } + + fn get_chain_id(&self) -> Result { + self.ctx.get_chain_id() + } + + fn get_block_height(&self) -> Result { + self.ctx.get_block_height() + } + + fn get_block_hash(&self) -> Result { + self.ctx.get_block_hash() + } + + fn get_block_epoch(&self) -> Result { + self.ctx.get_block_epoch() + } +} + +impl<'view, 'a: 'view, DB, H, CA> VpEnv<'view> for Ctx<'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type Post = CtxPostStorageRead<'view, 'a, DB, H, CA>; + type Pre = CtxPreStorageRead<'view, 'a, DB, H, CA>; + type PrefixIter = >::PrefixIter; + + fn pre(&'view self) -> Self::Pre { + CtxPreStorageRead { ctx: self } + } + + fn post(&'view self) -> Self::Post { + CtxPostStorageRead { ctx: self } + } + + fn read_temp( + &self, + key: &Key, + ) -> Result, storage_api::Error> { + vp_env::read_temp( &mut *self.gas_meter.borrow_mut(), - self.storage, + self.write_log, key, ) - .map_err(Error::ContextError) + .map(|data| data.and_then(|t| T::try_from_slice(&t[..]).ok())) + .into_storage_result() } - /// Storage `has_key` in posterior state (after tx execution). It will try - /// to check the write log first and if no entry found then the storage. - pub fn has_key_post(&self, key: &Key) -> Result { - vp_env::has_key_post( + fn read_bytes_temp( + &self, + key: &Key, + ) -> Result>, storage_api::Error> { + vp_env::read_temp( &mut *self.gas_meter.borrow_mut(), - self.storage, self.write_log, key, ) - .map_err(Error::ContextError) + .into_storage_result() } - /// Getting the chain ID. - pub fn get_chain_id(&self) -> Result { + fn get_chain_id(&'view self) -> Result { vp_env::get_chain_id(&mut *self.gas_meter.borrow_mut(), self.storage) - .map_err(Error::ContextError) + .into_storage_result() } - /// Getting the block height. The height is that of the block to which the - /// current transaction is being applied. - pub fn get_block_height(&self) -> Result { + fn get_block_height( + &'view self, + ) -> Result { vp_env::get_block_height( &mut *self.gas_meter.borrow_mut(), self.storage, ) - .map_err(Error::ContextError) + .into_storage_result() } - /// Getting the block hash. The height is that of the block to which the - /// current transaction is being applied. - pub fn get_block_hash(&self) -> Result { + fn get_block_hash(&'view self) -> Result { vp_env::get_block_hash(&mut *self.gas_meter.borrow_mut(), self.storage) - .map_err(Error::ContextError) + .into_storage_result() } - /// Getting the block epoch. The epoch is that of the block to which the - /// current transaction is being applied. - pub fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&'view self) -> Result { vp_env::get_block_epoch(&mut *self.gas_meter.borrow_mut(), self.storage) - .map_err(Error::ContextError) + .into_storage_result() } - /// Storage prefix iterator. It will try to get an iterator from the - /// storage. - pub fn iter_prefix( - &self, + fn iter_prefix( + &'view self, prefix: &Key, - ) -> Result<>::PrefixIter> { + ) -> Result { vp_env::iter_prefix( &mut *self.gas_meter.borrow_mut(), self.storage, prefix, ) - .map_err(Error::ContextError) + .into_storage_result() } - /// Storage prefix iterator for prior state (before tx execution). It will - /// try to read from the storage. - pub fn iter_pre_next( + fn rev_iter_prefix( &self, - iter: &mut >::PrefixIter, - ) -> Result)>> { - vp_env::iter_pre_next::(&mut *self.gas_meter.borrow_mut(), iter) - .map_err(Error::ContextError) + prefix: &Key, + ) -> Result { + vp_env::rev_iter_prefix( + &mut *self.gas_meter.borrow_mut(), + self.storage, + prefix, + ) + .into_storage_result() } - /// Storage prefix iterator next for posterior state (after tx execution). - /// It will try to read from the write log first and if no entry found - /// then from the storage. - pub fn iter_post_next( + fn eval( &self, - iter: &mut >::PrefixIter, - ) -> Result)>> { - vp_env::iter_post_next::( - &mut *self.gas_meter.borrow_mut(), - self.write_log, - iter, - ) - .map_err(Error::ContextError) - } - - /// Evaluate a validity predicate with given data. The address, changed - /// storage keys and verifiers will have the same values as the input to - /// caller's validity predicate. - /// - /// If the execution fails for whatever reason, this will return `false`. - /// Otherwise returns the result of evaluation. - pub fn eval( - &mut self, - address: &Address, - keys_changed: &BTreeSet, - verifiers: &BTreeSet
, vp_code: Vec, input_data: Vec, - ) -> bool { + ) -> Result { #[cfg(feature = "wasm-runtime")] { use std::marker::PhantomData; @@ -263,39 +423,54 @@ where let mut iterators: PrefixIterators<'_, DB> = PrefixIterators::default(); let mut result_buffer: Option> = None; + let mut vp_wasm_cache = self.vp_wasm_cache.clone(); let ctx = VpCtx::new( - address, + self.address, self.storage, self.write_log, &mut *self.gas_meter.borrow_mut(), self.tx, &mut iterators, - verifiers, + self.verifiers, &mut result_buffer, - keys_changed, + self.keys_changed, &eval_runner, - &mut self.vp_wasm_cache, + &mut vp_wasm_cache, ); match eval_runner.eval_native_result(ctx, vp_code, input_data) { - Ok(result) => result, + Ok(result) => Ok(result), Err(err) => { tracing::warn!( "VP eval from a native VP failed with: {}", err ); - false + Ok(false) } } } #[cfg(not(feature = "wasm-runtime"))] { - let _ = (address, keys_changed, verifiers, vp_code, input_data); + // This line is here to prevent unused var clippy warning + let _ = (vp_code, input_data); unimplemented!( "The \"wasm-runtime\" feature must be enabled to use the \ `eval` function." ) } } + + fn verify_tx_signature( + &self, + pk: &crate::types::key::common::PublicKey, + sig: &crate::types::key::common::Signature, + ) -> Result { + Ok(self.tx.verify_sig(pk, sig).is_ok()) + } + + fn get_tx_code_hash(&self) -> Result { + vp_env::get_tx_code_hash(&mut *self.gas_meter.borrow_mut(), self.tx) + .into_storage_result() + } } diff --git a/shared/src/ledger/pos/mod.rs b/shared/src/ledger/pos/mod.rs index c980da81da..3b498727df 100644 --- a/shared/src/ledger/pos/mod.rs +++ b/shared/src/ledger/pos/mod.rs @@ -13,6 +13,7 @@ use namada_proof_of_stake::PosBase; pub use storage::*; pub use vp::PosVP; +use super::storage_api; use crate::ledger::storage::{self as ledger_storage, Storage, StorageHasher}; use crate::types::address::{self, Address, InternalAddress}; use crate::types::storage::Epoch; @@ -87,3 +88,172 @@ impl From for Epoch { Epoch(epoch) } } + +// The error conversions are needed to implement `PosActions` in +// `tx_prelude/src/proof_of_stake.rs` +impl From> + for storage_api::Error +{ + fn from(err: namada_proof_of_stake::BecomeValidatorError
) -> Self { + Self::new(err) + } +} + +impl From> for storage_api::Error { + fn from(err: namada_proof_of_stake::BondError
) -> Self { + Self::new(err) + } +} + +impl From> + for storage_api::Error +{ + fn from( + err: namada_proof_of_stake::UnbondError, + ) -> Self { + Self::new(err) + } +} + +impl From> + for storage_api::Error +{ + fn from(err: namada_proof_of_stake::WithdrawError
) -> Self { + Self::new(err) + } +} + +#[macro_use] +mod macros { + /// Implement `PosReadOnly` for a type that implements + /// [`trait@crate::ledger::storage_api::StorageRead`]. + /// + /// Excuse the horrible syntax - we haven't found a better way to use this + /// for native_vp `CtxPreStorageRead`/`CtxPostStorageRead`, which have + /// generics and explicit lifetimes. + /// + /// # Examples + /// + /// ```ignore + /// impl_pos_read_only! { impl PosReadOnly for X } + /// ``` + #[macro_export] + macro_rules! impl_pos_read_only { + ( + // Type error type has to be declared before the impl. + // This error type must `impl From for $error`. + type $error:tt = $err_ty:ty ; + // Matches anything, so that we can use lifetimes and generic types. + // This expects `impl(<.*>)? PoSReadOnly for $ty(<.*>)?`. + $( $any:tt )* ) + => { + $( $any )* + { + type Address = $crate::types::address::Address; + type $error = $err_ty; + type PublicKey = $crate::types::key::common::PublicKey; + type TokenAmount = $crate::types::token::Amount; + type TokenChange = $crate::types::token::Change; + + const POS_ADDRESS: Self::Address = $crate::ledger::pos::ADDRESS; + + fn staking_token_address() -> Self::Address { + $crate::ledger::pos::staking_token_address() + } + + fn read_pos_params(&self) -> std::result::Result { + let value = $crate::ledger::storage_api::StorageRead::read_bytes(self, ¶ms_key())?.unwrap(); + Ok($crate::ledger::storage::types::decode(value).unwrap()) + } + + fn read_validator_staking_reward_address( + &self, + key: &Self::Address, + ) -> std::result::Result, Self::Error> { + let value = $crate::ledger::storage_api::StorageRead::read_bytes( + self, + &validator_staking_reward_address_key(key), + )?; + Ok(value.map(|value| $crate::ledger::storage::types::decode(value).unwrap())) + } + + fn read_validator_consensus_key( + &self, + key: &Self::Address, + ) -> std::result::Result, Self::Error> { + let value = + $crate::ledger::storage_api::StorageRead::read_bytes(self, &validator_consensus_key_key(key))?; + Ok(value.map(|value| $crate::ledger::storage::types::decode(value).unwrap())) + } + + fn read_validator_state( + &self, + key: &Self::Address, + ) -> std::result::Result, Self::Error> { + let value = $crate::ledger::storage_api::StorageRead::read_bytes(self, &validator_state_key(key))?; + Ok(value.map(|value| $crate::ledger::storage::types::decode(value).unwrap())) + } + + fn read_validator_total_deltas( + &self, + key: &Self::Address, + ) -> std::result::Result, Self::Error> { + let value = + $crate::ledger::storage_api::StorageRead::read_bytes(self, &validator_total_deltas_key(key))?; + Ok(value.map(|value| $crate::ledger::storage::types::decode(value).unwrap())) + } + + fn read_validator_voting_power( + &self, + key: &Self::Address, + ) -> std::result::Result, Self::Error> { + let value = + $crate::ledger::storage_api::StorageRead::read_bytes(self, &validator_voting_power_key(key))?; + Ok(value.map(|value| $crate::ledger::storage::types::decode(value).unwrap())) + } + + fn read_validator_slashes( + &self, + key: &Self::Address, + ) -> std::result::Result, Self::Error> { + let value = $crate::ledger::storage_api::StorageRead::read_bytes(self, &validator_slashes_key(key))?; + Ok(value + .map(|value| $crate::ledger::storage::types::decode(value).unwrap()) + .unwrap_or_default()) + } + + fn read_bond( + &self, + key: &BondId, + ) -> std::result::Result, Self::Error> { + let value = $crate::ledger::storage_api::StorageRead::read_bytes(self, &bond_key(key))?; + Ok(value.map(|value| $crate::ledger::storage::types::decode(value).unwrap())) + } + + fn read_unbond( + &self, + key: &BondId, + ) -> std::result::Result, Self::Error> { + let value = $crate::ledger::storage_api::StorageRead::read_bytes(self, &unbond_key(key))?; + Ok(value.map(|value| $crate::ledger::storage::types::decode(value).unwrap())) + } + + fn read_validator_set( + &self, + ) -> std::result::Result { + let value = + $crate::ledger::storage_api::StorageRead::read_bytes(self, &validator_set_key())?.unwrap(); + Ok($crate::ledger::storage::types::decode(value).unwrap()) + } + + fn read_total_voting_power( + &self, + ) -> std::result::Result { + let value = + $crate::ledger::storage_api::StorageRead::read_bytes(self, &total_voting_power_key())?.unwrap(); + Ok($crate::ledger::storage::types::decode(value).unwrap()) + } + } + } +} +} diff --git a/shared/src/ledger/pos/vp.rs b/shared/src/ledger/pos/vp.rs index 26be440536..0551c5de7f 100644 --- a/shared/src/ledger/pos/vp.rs +++ b/shared/src/ledger/pos/vp.rs @@ -26,17 +26,20 @@ use super::{ validator_total_deltas_key, validator_voting_power_key, BondId, Bonds, Unbonds, ValidatorConsensusKeys, ValidatorSets, ValidatorTotalDeltas, }; +use crate::impl_pos_read_only; use crate::ledger::governance::vp::is_proposal_accepted; -use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::ledger::native_vp::{ + self, Ctx, CtxPostStorageRead, CtxPreStorageRead, NativeVp, +}; use crate::ledger::pos::{ is_validator_address_raw_hash_key, is_validator_consensus_key_key, is_validator_state_key, }; -use crate::ledger::storage::types::decode; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; +use crate::ledger::storage_api::{self, StorageRead}; use crate::types::address::{Address, InternalAddress}; use crate::types::storage::{Key, KeySeg}; -use crate::types::{key, token}; +use crate::types::token; use crate::vm::WasmCacheAccess; #[allow(missing_docs)] @@ -118,7 +121,8 @@ where let addr = Address::Internal(Self::ADDR); let mut changes: Vec> = vec![]; - let current_epoch = self.ctx.get_block_epoch()?; + let current_epoch = self.ctx.pre().get_block_epoch()?; + for key in keys_changed { if is_params_key(key) { let proposal_id = u64::try_from_slice(tx_data).ok(); @@ -127,8 +131,8 @@ where _ => return Ok(false), } } else if let Some(owner) = key.is_validity_predicate() { - let has_pre = self.ctx.has_key_pre(key)?; - let has_post = self.ctx.has_key_post(key)?; + let has_pre = self.ctx.pre().has_key(key)?; + let has_post = self.ctx.post().has_key(key)?; if has_pre && has_post { // VP updates must be verified by the owner return Ok(!verifiers.contains(owner)); @@ -137,18 +141,18 @@ where return Ok(false); } } else if is_validator_set_key(key) { - let pre = self.ctx.read_pre(key)?.and_then(|bytes| { + let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { ValidatorSets::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.read_post(key)?.and_then(|bytes| { + let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { ValidatorSets::try_from_slice(&bytes[..]).ok() }); changes.push(ValidatorSet(Data { pre, post })); } else if let Some(validator) = is_validator_state_key(key) { - let pre = self.ctx.read_pre(key)?.and_then(|bytes| { + let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { ValidatorStates::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.read_post(key)?.and_then(|bytes| { + let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { ValidatorStates::try_from_slice(&bytes[..]).ok() }); changes.push(Validator { @@ -158,24 +162,24 @@ where } else if let Some(validator) = is_validator_staking_reward_address_key(key) { - let pre = self - .ctx - .read_pre(key)? - .and_then(|bytes| Address::try_from_slice(&bytes[..]).ok()); - let post = self - .ctx - .read_post(key)? - .and_then(|bytes| Address::try_from_slice(&bytes[..]).ok()); + let pre = + self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + Address::try_from_slice(&bytes[..]).ok() + }); + let post = + self.ctx.post().read_bytes(key)?.and_then(|bytes| { + Address::try_from_slice(&bytes[..]).ok() + }); changes.push(Validator { address: validator.clone(), update: StakingRewardAddress(Data { pre, post }), }); } else if let Some(validator) = is_validator_consensus_key_key(key) { - let pre = self.ctx.read_pre(key)?.and_then(|bytes| { + let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { ValidatorConsensusKeys::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.read_post(key)?.and_then(|bytes| { + let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { ValidatorConsensusKeys::try_from_slice(&bytes[..]).ok() }); changes.push(Validator { @@ -183,10 +187,10 @@ where update: ConsensusKey(Data { pre, post }), }); } else if let Some(validator) = is_validator_total_deltas_key(key) { - let pre = self.ctx.read_pre(key)?.and_then(|bytes| { + let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { ValidatorTotalDeltas::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.read_post(key)?.and_then(|bytes| { + let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { ValidatorTotalDeltas::try_from_slice(&bytes[..]).ok() }); changes.push(Validator { @@ -194,10 +198,10 @@ where update: TotalDeltas(Data { pre, post }), }); } else if let Some(validator) = is_validator_voting_power_key(key) { - let pre = self.ctx.read_pre(key)?.and_then(|bytes| { + let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { ValidatorVotingPowers::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.read_post(key)?.and_then(|bytes| { + let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { ValidatorVotingPowers::try_from_slice(&bytes[..]).ok() }); changes.push(Validator { @@ -207,14 +211,14 @@ where } else if let Some(raw_hash) = is_validator_address_raw_hash_key(key) { - let pre = self - .ctx - .read_pre(key)? - .and_then(|bytes| Address::try_from_slice(&bytes[..]).ok()); - let post = self - .ctx - .read_post(key)? - .and_then(|bytes| Address::try_from_slice(&bytes[..]).ok()); + let pre = + self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + Address::try_from_slice(&bytes[..]).ok() + }); + let post = + self.ctx.post().read_bytes(key)?.and_then(|bytes| { + Address::try_from_slice(&bytes[..]).ok() + }); // Find the raw hashes of the addresses let pre = pre.map(|pre| { let raw_hash = @@ -236,26 +240,27 @@ where if owner != &addr { continue; } - let pre = self.ctx.read_pre(key)?.and_then(|bytes| { + let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { token::Amount::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.read_post(key)?.and_then(|bytes| { + let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { token::Amount::try_from_slice(&bytes[..]).ok() }); changes.push(Balance(Data { pre, post })); } else if let Some(bond_id) = is_bond_key(key) { - let pre = self - .ctx - .read_pre(key)? - .and_then(|bytes| Bonds::try_from_slice(&bytes[..]).ok()); - let post = self - .ctx - .read_post(key)? - .and_then(|bytes| Bonds::try_from_slice(&bytes[..]).ok()); + let pre = + self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + Bonds::try_from_slice(&bytes[..]).ok() + }); + let post = + self.ctx.post().read_bytes(key)?.and_then(|bytes| { + Bonds::try_from_slice(&bytes[..]).ok() + }); // For bonds, we need to look-up slashes let slashes = self .ctx - .read_pre(&validator_slashes_key(&bond_id.validator))? + .pre() + .read_bytes(&validator_slashes_key(&bond_id.validator))? .and_then(|bytes| Slashes::try_from_slice(&bytes[..]).ok()) .unwrap_or_default(); changes.push(Bond { @@ -264,18 +269,19 @@ where slashes, }); } else if let Some(unbond_id) = is_unbond_key(key) { - let pre = self - .ctx - .read_pre(key)? - .and_then(|bytes| Unbonds::try_from_slice(&bytes[..]).ok()); - let post = self - .ctx - .read_post(key)? - .and_then(|bytes| Unbonds::try_from_slice(&bytes[..]).ok()); + let pre = + self.ctx.pre().read_bytes(key)?.and_then(|bytes| { + Unbonds::try_from_slice(&bytes[..]).ok() + }); + let post = + self.ctx.post().read_bytes(key)?.and_then(|bytes| { + Unbonds::try_from_slice(&bytes[..]).ok() + }); // For unbonds, we need to look-up slashes let slashes = self .ctx - .read_pre(&validator_slashes_key(&unbond_id.validator))? + .pre() + .read_bytes(&validator_slashes_key(&unbond_id.validator))? .and_then(|bytes| Slashes::try_from_slice(&bytes[..]).ok()) .unwrap_or_default(); changes.push(Unbond { @@ -284,10 +290,10 @@ where slashes, }); } else if is_total_voting_power_key(key) { - let pre = self.ctx.read_pre(key)?.and_then(|bytes| { + let pre = self.ctx.pre().read_bytes(key)?.and_then(|bytes| { TotalVotingPowers::try_from_slice(&bytes[..]).ok() }); - let post = self.ctx.read_post(key)?.and_then(|bytes| { + let post = self.ctx.post().read_bytes(key)?.and_then(|bytes| { TotalVotingPowers::try_from_slice(&bytes[..]).ok() }); changes.push(TotalVotingPower(Data { pre, post })); @@ -301,7 +307,7 @@ where } } - let params = self.read_pos_params(); + let params = self.ctx.pre().read_pos_params()?; let errors = validate(¶ms, changes, current_epoch); Ok(if errors.is_empty() { true @@ -315,106 +321,22 @@ where } } -impl PosReadOnly for PosVP<'_, D, H, CA> -where - D: 'static + ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter>, - H: 'static + StorageHasher, - CA: 'static + WasmCacheAccess, -{ - type Address = Address; - type PublicKey = key::common::PublicKey; - type TokenAmount = token::Amount; - type TokenChange = token::Change; - - const POS_ADDRESS: Self::Address = super::ADDRESS; - - fn staking_token_address() -> Self::Address { - super::staking_token_address() - } - - fn read_pos_params(&self) -> PosParams { - let value = self.ctx.read_pre(¶ms_key()).unwrap().unwrap(); - decode(value).unwrap() - } - - fn read_validator_staking_reward_address( - &self, - key: &Self::Address, - ) -> Option { - let value = self - .ctx - .read_pre(&validator_staking_reward_address_key(key)) - .unwrap(); - value.map(|value| decode(value).unwrap()) - } - - fn read_validator_consensus_key( - &self, - key: &Self::Address, - ) -> Option { - let value = self - .ctx - .read_pre(&validator_consensus_key_key(key)) - .unwrap(); - value.map(|value| decode(value).unwrap()) - } - - fn read_validator_state( - &self, - key: &Self::Address, - ) -> Option { - let value = self.ctx.read_pre(&validator_state_key(key)).unwrap(); - value.map(|value| decode(value).unwrap()) - } - - fn read_validator_total_deltas( - &self, - key: &Self::Address, - ) -> Option { - let value = - self.ctx.read_pre(&validator_total_deltas_key(key)).unwrap(); - value.map(|value| decode(value).unwrap()) - } - - fn read_validator_voting_power( - &self, - key: &Self::Address, - ) -> Option { - let value = - self.ctx.read_pre(&validator_voting_power_key(key)).unwrap(); - value.map(|value| decode(value).unwrap()) - } - - fn read_validator_slashes(&self, key: &Self::Address) -> Vec { - let value = self.ctx.read_pre(&validator_slashes_key(key)).unwrap(); - value - .map(|value| decode(value).unwrap()) - .unwrap_or_default() - } - - fn read_bond(&self, key: &BondId) -> Option { - let value = self.ctx.read_pre(&bond_key(key)).unwrap(); - value.map(|value| decode(value).unwrap()) - } - - fn read_unbond(&self, key: &BondId) -> Option { - let value = self.ctx.read_pre(&unbond_key(key)).unwrap(); - value.map(|value| decode(value).unwrap()) - } - - fn read_validator_set(&self) -> ValidatorSets { - let value = self.ctx.read_pre(&validator_set_key()).unwrap().unwrap(); - decode(value).unwrap() - } +impl_pos_read_only! { + type Error = storage_api::Error; + impl<'f, 'a, DB, H, CA> PosReadOnly for CtxPreStorageRead<'f, 'a, DB, H, CA> + where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter> +'static, + H: StorageHasher +'static, + CA: WasmCacheAccess +'static +} - fn read_total_voting_power(&self) -> TotalVotingPowers { - let value = self - .ctx - .read_pre(&total_voting_power_key()) - .unwrap() - .unwrap(); - decode(value).unwrap() - } +impl_pos_read_only! { + type Error = storage_api::Error; + impl<'f, 'a, DB, H, CA> PosReadOnly for CtxPostStorageRead<'f, 'a, DB, H, CA> + where + DB: ledger_storage::DB + for<'iter> ledger_storage::DBIter<'iter> +'static, + H: StorageHasher +'static, + CA: WasmCacheAccess +'static } impl From for Error { diff --git a/shared/src/ledger/storage/mockdb.rs b/shared/src/ledger/storage/mockdb.rs index 3cc7e8863c..a9e3e8057b 100644 --- a/shared/src/ledger/storage/mockdb.rs +++ b/shared/src/ledger/storage/mockdb.rs @@ -431,7 +431,28 @@ impl<'iter> DBIter<'iter> for MockDB { let db_prefix = "subspace/".to_owned(); let prefix = format!("{}{}", db_prefix, prefix); let iter = self.0.borrow().clone().into_iter(); - MockPrefixIterator::new(MockIterator { prefix, iter }, db_prefix) + MockPrefixIterator::new( + MockIterator { + prefix, + iter, + reverse_order: false, + }, + db_prefix, + ) + } + + fn rev_iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter { + let db_prefix = "subspace/".to_owned(); + let prefix = format!("{}{}", db_prefix, prefix); + let iter = self.0.borrow().clone().into_iter(); + MockPrefixIterator::new( + MockIterator { + prefix, + iter, + reverse_order: true, + }, + db_prefix, + ) } } @@ -441,6 +462,8 @@ pub struct MockIterator { prefix: String, /// The concrete iterator pub iter: btree_map::IntoIter>, + /// Is the iterator in reverse order? + reverse_order: bool, } /// A prefix iterator for the [`MockDB`]. @@ -450,12 +473,23 @@ impl Iterator for MockIterator { type Item = KVBytes; fn next(&mut self) -> Option { - for (key, val) in &mut self.iter { - if key.starts_with(&self.prefix) { - return Some(( - Box::from(key.as_bytes()), - Box::from(val.as_slice()), - )); + if self.reverse_order { + for (key, val) in (&mut self.iter).rev() { + if key.starts_with(&self.prefix) { + return Some(( + Box::from(key.as_bytes()), + Box::from(val.as_slice()), + )); + } + } + } else { + for (key, val) in &mut self.iter { + if key.starts_with(&self.prefix) { + return Some(( + Box::from(key.as_bytes()), + Box::from(val.as_slice()), + )); + } } } None diff --git a/shared/src/ledger/storage/mod.rs b/shared/src/ledger/storage/mod.rs index 0b3d19a742..bd969fa464 100644 --- a/shared/src/ledger/storage/mod.rs +++ b/shared/src/ledger/storage/mod.rs @@ -11,8 +11,9 @@ use core::fmt::Debug; use tendermint::merkle::proof::Proof; use thiserror::Error; -use super::parameters; use super::parameters::Parameters; +use super::storage_api::{ResultExt, StorageRead, StorageWrite}; +use super::{parameters, storage_api}; use crate::ledger::gas::MIN_STORAGE_GAS; use crate::ledger::parameters::EpochDuration; use crate::ledger::storage::merkle_tree::{ @@ -242,8 +243,13 @@ pub trait DBIter<'iter> { /// The concrete type of the iterator type PrefixIter: Debug + Iterator, u64)>; - /// Read account subspace key value pairs with the given prefix from the DB + /// Read account subspace key value pairs with the given prefix from the DB, + /// ordered by the storage keys. fn iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; + + /// Read account subspace key value pairs with the given prefix from the DB, + /// reverse ordered by the storage keys. + fn rev_iter_prefix(&'iter self, prefix: &Key) -> Self::PrefixIter; } /// Atomic batch write. @@ -410,7 +416,7 @@ where } } - /// Returns a prefix iterator and the gas cost + /// Returns a prefix iterator, ordered by storage keys, and the gas cost pub fn iter_prefix( &self, prefix: &Key, @@ -418,17 +424,29 @@ where (self.db.iter_prefix(prefix), prefix.len() as _) } + /// Returns a prefix iterator, reverse ordered by storage keys, and the gas + /// cost + pub fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> (>::PrefixIter, u64) { + (self.db.rev_iter_prefix(prefix), prefix.len() as _) + } + /// Write a value to the specified subspace and returns the gas cost and the /// size difference pub fn write( &mut self, key: &Key, - value: impl AsRef<[u8]> + Clone, + value: impl AsRef<[u8]>, ) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::write_bytes`, + // but with gas and storage bytes len diff accounting tracing::debug!("storage write key {}", key,); - self.block.tree.update(key, value.clone())?; + let value = value.as_ref(); + self.block.tree.update(key, &value)?; - let len = value.as_ref().len(); + let len = value.len(); let gas = key.len() + len; let size_diff = self.db.write_subspace_val(self.last_height, key, value)?; @@ -438,6 +456,8 @@ where /// Delete the specified subspace and returns the gas cost and the size /// difference pub fn delete(&mut self, key: &Key) -> Result<(u64, i64)> { + // Note that this method is the same as `StorageWrite::delete`, + // but with gas and storage bytes len diff accounting let mut deleted_bytes_len = 0; if self.has_key(key)?.0 { self.block.tree.delete(key)?; @@ -684,6 +704,148 @@ where } } +impl<'iter, D, H> StorageRead<'iter> for Storage +where + D: DB + for<'iter_> DBIter<'iter_>, + H: StorageHasher, +{ + type PrefixIter = >::PrefixIter; + + fn read_bytes( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result>, storage_api::Error> { + self.db.read_subspace_val(key).into_storage_result() + } + + fn has_key( + &self, + key: &crate::types::storage::Key, + ) -> std::result::Result { + self.block.tree.has_key(key).into_storage_result() + } + + fn iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.iter_prefix(prefix)) + } + + fn rev_iter_prefix( + &'iter self, + prefix: &crate::types::storage::Key, + ) -> std::result::Result { + Ok(self.db.rev_iter_prefix(prefix)) + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> std::result::Result)>, storage_api::Error> + { + Ok(iter.next().map(|(key, val, _gas)| (key, val))) + } + + fn get_chain_id(&self) -> std::result::Result { + Ok(self.chain_id.to_string()) + } + + fn get_block_height( + &self, + ) -> std::result::Result { + Ok(self.block.height) + } + + fn get_block_hash( + &self, + ) -> std::result::Result { + Ok(self.block.hash.clone()) + } + + fn get_block_epoch( + &self, + ) -> std::result::Result { + Ok(self.block.epoch) + } +} + +impl StorageWrite for Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::write`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + let val = val.as_ref(); + self.block.tree.update(key, &val).into_storage_result()?; + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + // Note that this method is the same as `Storage::delete`, but without + // gas and storage bytes len diff accounting, because it can only be + // used by the protocol that has a direct mutable access to storage + self.block.tree.delete(key).into_storage_result()?; + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + +impl StorageWrite for &mut Storage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + fn write( + &mut self, + key: &crate::types::storage::Key, + val: T, + ) -> storage_api::Result<()> { + let val = val.try_to_vec().unwrap(); + self.write_bytes(key, val) + } + + fn write_bytes( + &mut self, + key: &crate::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + let _ = self + .db + .write_subspace_val(self.block.height, key, val) + .into_storage_result()?; + Ok(()) + } + + fn delete( + &mut self, + key: &crate::types::storage::Key, + ) -> storage_api::Result<()> { + let _ = self + .db + .delete_subspace_val(self.block.height, key) + .into_storage_result()?; + Ok(()) + } +} + impl From for Error { fn from(error: MerkleTreeError) -> Self { Self::MerkleTreeError(error) diff --git a/shared/src/ledger/storage_api/collections/lazy_map.rs b/shared/src/ledger/storage_api/collections/lazy_map.rs new file mode 100644 index 0000000000..34a0f7d891 --- /dev/null +++ b/shared/src/ledger/storage_api/collections/lazy_map.rs @@ -0,0 +1,563 @@ +//! Lazy map. + +use std::collections::HashMap; +use std::fmt::Debug; +use std::hash::Hash; +use std::marker::PhantomData; + +use borsh::{BorshDeserialize, BorshSerialize}; +use thiserror::Error; + +use super::super::Result; +use super::{LazyCollection, ReadError}; +use crate::ledger::storage_api::validation::{self, Data}; +use crate::ledger::storage_api::{self, ResultExt, StorageRead, StorageWrite}; +use crate::ledger::vp_env::VpEnv; +use crate::types::storage::{self, DbKeySeg, KeySeg}; + +/// Subkey corresponding to the data elements of the LazyMap +pub const DATA_SUBKEY: &str = "data"; + +/// Lazy map. +/// +/// This can be used as an alternative to `std::collections::HashMap` and +/// `BTreeMap`. In the lazy map, the elements do not reside in memory but are +/// instead read and written to storage sub-keys of the storage `key` used to +/// construct the map. +/// +/// In the [`LazyMap`], the type of key `K` can be anything that implements +/// [`storage::KeySeg`] and this trait is used to turn the keys into key +/// segments. +#[derive(Debug)] +pub struct LazyMap { + key: storage::Key, + phantom_k: PhantomData, + phantom_v: PhantomData, + phantom_son: PhantomData, +} + +/// A `LazyMap` with another `LazyCollection` inside it's value `V` +pub type NestedMap = LazyMap; + +/// Possible sub-keys of a [`LazyMap`] +#[derive(Clone, Debug)] +pub enum SubKey { + /// Data sub-key, further sub-keyed by its literal map key + Data(K), +} + +/// Possible sub-keys of a [`LazyMap`], together with their [`validation::Data`] +/// that contains prior and posterior state. +#[derive(Clone, Debug)] +pub enum SubKeyWithData { + /// Data sub-key, further sub-keyed by its literal map key + Data(K, Data), +} + +/// Possible actions that can modify a simple (not nested) [`LazyMap`]. This +/// roughly corresponds to the methods that have `StorageWrite` access. +#[derive(Clone, Debug)] +pub enum Action { + /// Insert or update a value `V` at key `K` in a [`LazyMap`]. + Insert(K, V), + /// Remove a value `V` at key `K` from a [`LazyMap`]. + Remove(K, V), + /// Update a value `V` at key `K` in a [`LazyMap`]. + Update { + /// key at which the value is updated + key: K, + /// value before the update + pre: V, + /// value after the update + post: V, + }, +} + +/// Possible actions that can modify a nested [`LazyMap`]. +#[derive(Clone, Debug)] +pub enum NestedAction { + /// Nested collection action `A` at key `K` + At(K, A), +} + +/// Possible sub-keys of a nested [`LazyMap`] +#[derive(Clone, Debug)] +pub enum NestedSubKey { + /// Data sub-key + Data { + /// Literal map key + key: K, + /// Sub-key in the nested collection + nested_sub_key: S, + }, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ValidationError { + #[error("Invalid storage key {0}")] + InvalidSubKey(storage::Key), + #[error("Invalid nested storage key {0}")] + InvalidNestedSubKey(storage::Key), +} + +/// [`LazyMap`] validation result +pub type ValidationResult = std::result::Result; + +impl LazyCollection for LazyMap +where + K: storage::KeySeg + Clone + Hash + Eq + Debug, + V: LazyCollection + Debug, +{ + type Action = NestedAction::Action>; + type SubKey = NestedSubKey::SubKey>; + type SubKeyWithData = + NestedSubKey::SubKeyWithData>; + type Value = ::Value; + + fn open(key: storage::Key) -> Self { + Self { + key, + phantom_k: PhantomData, + phantom_v: PhantomData, + phantom_son: PhantomData, + } + } + + fn is_valid_sub_key( + &self, + key: &storage::Key, + ) -> storage_api::Result> { + let suffix = match key.split_prefix(&self.key) { + None => { + // not matching prefix, irrelevant + return Ok(None); + } + Some(None) => { + // no suffix, invalid + return Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(); + } + Some(Some(suffix)) => suffix, + }; + + // Match the suffix against expected sub-keys + match &suffix.segments[..2] { + [DbKeySeg::StringSeg(sub_a), DbKeySeg::StringSeg(sub_b)] + if sub_a == DATA_SUBKEY => + { + if let Ok(key_in_kv) = storage::KeySeg::parse(sub_b.clone()) { + let nested = self.at(&key_in_kv).is_valid_sub_key(key)?; + match nested { + Some(nested_sub_key) => Ok(Some(NestedSubKey::Data { + key: key_in_kv, + nested_sub_key, + })), + None => Err(ValidationError::InvalidNestedSubKey( + key.clone(), + )) + .into_storage_result(), + } + } else { + Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result() + } + } + _ => Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(), + } + } + + fn read_sub_key_data( + env: &ENV, + storage_key: &storage::Key, + sub_key: Self::SubKey, + ) -> storage_api::Result> + where + ENV: for<'a> VpEnv<'a>, + { + let NestedSubKey::Data { + key, + // In here, we just have a nested sub-key without data + nested_sub_key, + } = sub_key; + // Try to read data from the nested collection + let nested_data = ::read_sub_key_data( + env, + storage_key, + nested_sub_key, + )?; + // If found, transform it back into a `NestedSubKey`, but with + // `nested_sub_key` replaced with the one we read + Ok(nested_data.map(|nested_sub_key| NestedSubKey::Data { + key, + nested_sub_key, + })) + } + + fn validate_changed_sub_keys( + keys: Vec, + ) -> storage_api::Result> { + // We have to group the nested sub-keys by the key from this map + let mut grouped_by_key: HashMap< + K, + Vec<::SubKeyWithData>, + > = HashMap::new(); + for NestedSubKey::Data { + key, + nested_sub_key, + } in keys + { + grouped_by_key + .entry(key) + .or_insert_with(Vec::new) + .push(nested_sub_key); + } + + // Recurse for each sub-keys group + let mut actions = vec![]; + for (key, sub_keys) in grouped_by_key { + let nested_actions = + ::validate_changed_sub_keys(sub_keys)?; + actions.extend( + nested_actions + .into_iter() + .map(|action| NestedAction::At(key.clone(), action)), + ); + } + Ok(actions) + } +} + +impl LazyCollection for LazyMap +where + K: storage::KeySeg + Debug, + V: BorshDeserialize + BorshSerialize + 'static + Debug, +{ + type Action = Action; + type SubKey = SubKey; + type SubKeyWithData = SubKeyWithData; + type Value = V; + + /// Create or use an existing map with the given storage `key`. + fn open(key: storage::Key) -> Self { + Self { + key, + phantom_k: PhantomData, + phantom_v: PhantomData, + phantom_son: PhantomData, + } + } + + fn is_valid_sub_key( + &self, + key: &storage::Key, + ) -> storage_api::Result> { + let suffix = match key.split_prefix(&self.key) { + None => { + // not matching prefix, irrelevant + return Ok(None); + } + Some(None) => { + // no suffix, invalid + return Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(); + } + Some(Some(suffix)) => suffix, + }; + + // Match the suffix against expected sub-keys + match &suffix.segments[..] { + [DbKeySeg::StringSeg(sub_a), DbKeySeg::StringSeg(sub_b)] + if sub_a == DATA_SUBKEY => + { + if let Ok(key_in_kv) = storage::KeySeg::parse(sub_b.clone()) { + Ok(Some(SubKey::Data(key_in_kv))) + } else { + Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result() + } + } + _ => Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(), + } + } + + fn read_sub_key_data( + env: &ENV, + storage_key: &storage::Key, + sub_key: Self::SubKey, + ) -> storage_api::Result> + where + ENV: for<'a> VpEnv<'a>, + { + let SubKey::Data(key) = sub_key; + let data = validation::read_data(env, storage_key)?; + Ok(data.map(|data| SubKeyWithData::Data(key, data))) + } + + fn validate_changed_sub_keys( + keys: Vec, + ) -> storage_api::Result> { + Ok(keys + .into_iter() + .map(|change| { + let SubKeyWithData::Data(key, data) = change; + match data { + Data::Add { post } => Action::Insert(key, post), + Data::Update { pre, post } => { + Action::Update { key, pre, post } + } + Data::Delete { pre } => Action::Remove(key, pre), + } + }) + .collect()) + } +} + +// Generic `LazyMap` methods that require no bounds on values `V` +impl LazyMap +where + K: storage::KeySeg, +{ + /// Returns whether the set contains a value. + pub fn contains(&self, storage: &S, key: &K) -> Result + where + S: for<'iter> StorageRead<'iter>, + { + storage.has_key(&self.get_data_key(key)) + } + + /// Get the prefix of set's elements storage + fn get_data_prefix(&self) -> storage::Key { + self.key.push(&DATA_SUBKEY.to_owned()).unwrap() + } + + /// Get the sub-key of a given element + fn get_data_key(&self, key: &K) -> storage::Key { + let key_str = key.to_db_key(); + self.get_data_prefix().push(&key_str).unwrap() + } +} + +// `LazyMap` methods with nested `LazyCollection`s `V` +impl LazyMap +where + K: storage::KeySeg + Clone + Hash + Eq + Debug, + V: LazyCollection + Debug, +{ + /// Get a nested collection at given key `key`. If there is no nested + /// collection at the given key, a new empty one will be provided. The + /// nested collection may be manipulated through its methods. + pub fn at(&self, key: &K) -> V { + V::open(self.get_data_key(key)) + } + + /// An iterator visiting all key-value elements, where the values are from + /// the inner-most collection. The iterator element type is `Result<_>`, + /// because iterator's call to `next` may fail with e.g. out of gas or + /// data decoding error. + /// + /// Note that this function shouldn't be used in transactions and VPs code + /// on unbounded maps to avoid gas usage increasing with the length of the + /// map. + pub fn iter<'iter>( + &'iter self, + storage: &'iter impl StorageRead<'iter>, + ) -> Result< + impl Iterator< + Item = Result<( + ::SubKey, + ::Value, + )>, + > + 'iter, + > { + let iter = storage_api::iter_prefix(storage, &self.get_data_prefix())?; + Ok(iter.map(|key_val_res| { + let (key, val) = key_val_res?; + let sub_key = LazyCollection::is_valid_sub_key(self, &key)? + .ok_or(ReadError::UnexpectedlyEmptyStorageKey) + .into_storage_result()?; + Ok((sub_key, val)) + })) + } +} + +// `LazyMap` methods with borsh encoded values `V` +impl LazyMap +where + K: storage::KeySeg, + V: BorshDeserialize + BorshSerialize + 'static, +{ + /// Inserts a key-value pair into the map. + /// + /// The full storage key identifies the key in the pair, while the value is + /// held within the storage key. + /// + /// If the map did not have this key present, `None` is returned. + /// If the map did have this key present, the value is updated, and the old + /// value is returned. Unlike in `std::collection::HashMap`, the key is also + /// updated; this matters for types that can be `==` without being + /// identical. + pub fn insert( + &self, + storage: &mut S, + key: K, + val: V, + ) -> Result> + where + S: StorageWrite + for<'iter> StorageRead<'iter>, + { + let previous = self.get(storage, &key)?; + + let data_key = self.get_data_key(&key); + Self::write_key_val(storage, &data_key, val)?; + + Ok(previous) + } + + /// Removes a key from the map, returning the value at the key if the key + /// was previously in the map. + pub fn remove(&self, storage: &mut S, key: &K) -> Result> + where + S: StorageWrite + for<'iter> StorageRead<'iter>, + { + let value = self.get(storage, key)?; + + let data_key = self.get_data_key(key); + storage.delete(&data_key)?; + + Ok(value) + } + + /// Returns the value corresponding to the key, if any. + pub fn get(&self, storage: &S, key: &K) -> Result> + where + S: for<'iter> StorageRead<'iter>, + { + let data_key = self.get_data_key(key); + Self::read_key_val(storage, &data_key) + } + + /// Returns whether the map contains no elements. + pub fn is_empty(&self, storage: &S) -> Result + where + S: for<'iter> StorageRead<'iter>, + { + let mut iter = + storage_api::iter_prefix_bytes(storage, &self.get_data_prefix())?; + Ok(iter.next().is_none()) + } + + /// Reads the number of elements in the map. + /// + /// Note that this function shouldn't be used in transactions and VPs code + /// on unbounded maps to avoid gas usage increasing with the length of the + /// set. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self, storage: &S) -> Result + where + S: for<'iter> StorageRead<'iter>, + { + let iter = + storage_api::iter_prefix_bytes(storage, &self.get_data_prefix())?; + iter.count().try_into().into_storage_result() + } + + /// An iterator visiting all key-value elements. The iterator element type + /// is `Result<(K, V)>`, because iterator's call to `next` may fail with + /// e.g. out of gas or data decoding error. + /// + /// Note that this function shouldn't be used in transactions and VPs code + /// on unbounded maps to avoid gas usage increasing with the length of the + /// map. + pub fn iter<'iter>( + &self, + storage: &'iter impl StorageRead<'iter>, + ) -> Result> + 'iter> { + let iter = storage_api::iter_prefix(storage, &self.get_data_prefix())?; + Ok(iter.map(|key_val_res| { + let (key, val) = key_val_res?; + let last_key_seg = key + .last() + .ok_or(ReadError::UnexpectedlyEmptyStorageKey) + .into_storage_result()?; + let key = K::parse(last_key_seg.raw()).into_storage_result()?; + Ok((key, val)) + })) + } + + /// Reads a value from storage + fn read_key_val( + storage: &S, + storage_key: &storage::Key, + ) -> Result> + where + S: for<'iter> StorageRead<'iter>, + { + let res = storage.read(storage_key)?; + Ok(res) + } + + /// Write a value into storage + fn write_key_val( + storage: &mut impl StorageWrite, + storage_key: &storage::Key, + val: V, + ) -> Result<()> { + storage.write(storage_key, val) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ledger::storage::testing::TestStorage; + + #[test] + fn test_lazy_map_basics() -> storage_api::Result<()> { + let mut storage = TestStorage::default(); + + let key = storage::Key::parse("test").unwrap(); + let lazy_map = LazyMap::::open(key); + + // The map should be empty at first + assert!(lazy_map.is_empty(&storage)?); + assert!(lazy_map.len(&storage)? == 0); + assert!(!lazy_map.contains(&storage, &0)?); + assert!(!lazy_map.contains(&storage, &1)?); + assert!(lazy_map.iter(&storage)?.next().is_none()); + assert!(lazy_map.get(&storage, &0)?.is_none()); + assert!(lazy_map.get(&storage, &1)?.is_none()); + assert!(lazy_map.remove(&mut storage, &0)?.is_none()); + assert!(lazy_map.remove(&mut storage, &1)?.is_none()); + + // Insert a new value and check that it's added + let (key, val) = (123, "Test".to_string()); + lazy_map.insert(&mut storage, key, val.clone())?; + assert!(!lazy_map.contains(&storage, &0)?); + assert!(lazy_map.contains(&storage, &key)?); + assert!(!lazy_map.is_empty(&storage)?); + assert!(lazy_map.len(&storage)? == 1); + assert_eq!( + lazy_map.iter(&storage)?.next().unwrap()?, + (key, val.clone()) + ); + assert!(lazy_map.get(&storage, &0)?.is_none()); + assert_eq!(lazy_map.get(&storage, &key)?.unwrap(), val); + + // Remove the last value and check that the map is empty again + let removed = lazy_map.remove(&mut storage, &key)?.unwrap(); + assert_eq!(removed, val); + assert!(lazy_map.is_empty(&storage)?); + assert!(lazy_map.len(&storage)? == 0); + assert!(!lazy_map.contains(&storage, &0)?); + assert!(!lazy_map.contains(&storage, &1)?); + assert!(lazy_map.get(&storage, &0)?.is_none()); + assert!(lazy_map.get(&storage, &key)?.is_none()); + assert!(lazy_map.iter(&storage)?.next().is_none()); + assert!(lazy_map.remove(&mut storage, &key)?.is_none()); + + Ok(()) + } +} diff --git a/shared/src/ledger/storage_api/collections/lazy_vec.rs b/shared/src/ledger/storage_api/collections/lazy_vec.rs new file mode 100644 index 0000000000..59eaa225e5 --- /dev/null +++ b/shared/src/ledger/storage_api/collections/lazy_vec.rs @@ -0,0 +1,516 @@ +//! Lazy dynamically-sized vector. + +use std::collections::BTreeSet; +use std::fmt::Debug; +use std::marker::PhantomData; + +use borsh::{BorshDeserialize, BorshSerialize}; +use thiserror::Error; + +use super::super::Result; +use super::LazyCollection; +use crate::ledger::storage_api::validation::{self, Data}; +use crate::ledger::storage_api::{self, ResultExt, StorageRead, StorageWrite}; +use crate::ledger::vp_env::VpEnv; +use crate::types::storage::{self, DbKeySeg}; + +/// Subkey pointing to the length of the LazyVec +pub const LEN_SUBKEY: &str = "len"; +/// Subkey corresponding to the data elements of the LazyVec +pub const DATA_SUBKEY: &str = "data"; + +/// Using `u64` for vector's indices +pub type Index = u64; + +/// Lazy dynamically-sized vector. +/// +/// This can be used as an alternative to `std::collections::Vec`. In the lazy +/// vector, the elements do not reside in memory but are instead read and +/// written to storage sub-keys of the storage `key` used to construct the +/// vector. +#[derive(Clone, Debug)] +pub struct LazyVec { + key: storage::Key, + phantom: PhantomData, +} + +/// Possible sub-keys of a [`LazyVec`] +#[derive(Debug)] +pub enum SubKey { + /// Length sub-key + Len, + /// Data sub-key, further sub-keyed by its index + Data(Index), +} + +/// Possible sub-keys of a [`LazyVec`], together with their [`validation::Data`] +/// that contains prior and posterior state. +#[derive(Debug)] +pub enum SubKeyWithData { + /// Length sub-key + Len(Data), + /// Data sub-key, further sub-keyed by its index + Data(Index, Data), +} + +/// Possible actions that can modify a [`LazyVec`]. This roughly corresponds to +/// the methods that have `StorageWrite` access. +#[derive(Clone, Debug)] +pub enum Action { + /// Push a value `T` into a [`LazyVec`] + Push(T), + /// Pop a value `T` from a [`LazyVec`] + Pop(T), + /// Update a value `T` at index from pre to post state in a [`LazyVec`] + Update { + /// index at which the value is updated + index: Index, + /// value before the update + pre: T, + /// value after the update + post: T, + }, +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ValidationError { + #[error("Incorrect difference in LazyVec's length")] + InvalidLenDiff, + #[error("An empty LazyVec must be deleted from storage")] + EmptyVecShouldBeDeleted, + #[error("Push at a wrong index. Got {got}, expected {expected}.")] + UnexpectedPushIndex { got: Index, expected: Index }, + #[error("Pop at a wrong index. Got {got}, expected {expected}.")] + UnexpectedPopIndex { got: Index, expected: Index }, + #[error( + "Update (or a combination of pop and push) at a wrong index. Got \ + {got}, expected maximum {max}." + )] + UnexpectedUpdateIndex { got: Index, max: Index }, + #[error("An index has overflown its representation: {0}")] + IndexOverflow(>::Error), + #[error("Unexpected underflow in `{0} - {0}`")] + UnexpectedUnderflow(Index, Index), + #[error("Invalid storage key {0}")] + InvalidSubKey(storage::Key), +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum UpdateError { + #[error( + "Invalid index into a LazyVec. Got {index}, but the length is {len}" + )] + InvalidIndex { index: Index, len: u64 }, +} + +/// [`LazyVec`] validation result +pub type ValidationResult = std::result::Result; + +impl LazyCollection for LazyVec +where + T: BorshSerialize + BorshDeserialize + 'static + Debug, +{ + type Action = Action; + type SubKey = SubKey; + type SubKeyWithData = SubKeyWithData; + type Value = T; + + /// Create or use an existing vector with the given storage `key`. + fn open(key: storage::Key) -> Self { + Self { + key, + phantom: PhantomData, + } + } + + /// Check if the given storage key is a valid LazyVec sub-key and if so + /// return which one + fn is_valid_sub_key( + &self, + key: &storage::Key, + ) -> storage_api::Result> { + let suffix = match key.split_prefix(&self.key) { + None => { + // not matching prefix, irrelevant + return Ok(None); + } + Some(None) => { + // no suffix, invalid + return Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(); + } + Some(Some(suffix)) => suffix, + }; + + // Match the suffix against expected sub-keys + match &suffix.segments[..] { + [DbKeySeg::StringSeg(sub)] if sub == LEN_SUBKEY => { + Ok(Some(SubKey::Len)) + } + [DbKeySeg::StringSeg(sub_a), DbKeySeg::StringSeg(sub_b)] + if sub_a == DATA_SUBKEY => + { + if let Ok(index) = storage::KeySeg::parse(sub_b.clone()) { + Ok(Some(SubKey::Data(index))) + } else { + Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result() + } + } + _ => Err(ValidationError::InvalidSubKey(key.clone())) + .into_storage_result(), + } + } + + fn read_sub_key_data( + env: &ENV, + storage_key: &storage::Key, + sub_key: Self::SubKey, + ) -> storage_api::Result> + where + ENV: for<'a> VpEnv<'a>, + { + let change = match sub_key { + SubKey::Len => { + let data = validation::read_data(env, storage_key)?; + data.map(SubKeyWithData::Len) + } + SubKey::Data(index) => { + let data = validation::read_data(env, storage_key)?; + data.map(|data| SubKeyWithData::Data(index, data)) + } + }; + Ok(change) + } + + /// The validation rules for a [`LazyVec`] are: + /// - A difference in the vector's length must correspond to the + /// difference in how many elements were pushed versus how many elements + /// were popped. + /// - An empty vector must be deleted from storage + /// - In addition, we check that indices of any changes are within an + /// expected range (i.e. the vectors indices should always be + /// monotonically increasing from zero) + fn validate_changed_sub_keys( + keys: Vec, + ) -> storage_api::Result> { + let mut actions = vec![]; + + // We need to accumulate some values for what's changed + let mut post_gt_pre = false; + let mut len_diff: u64 = 0; + let mut len_pre: u64 = 0; + let mut added = BTreeSet::::default(); + let mut updated = BTreeSet::::default(); + let mut deleted = BTreeSet::::default(); + + for key in keys { + match key { + SubKeyWithData::Len(data) => match data { + Data::Add { post } => { + if post == 0 { + return Err( + ValidationError::EmptyVecShouldBeDeleted, + ) + .into_storage_result(); + } + post_gt_pre = true; + len_diff = post; + } + Data::Update { pre, post } => { + if post == 0 { + return Err( + ValidationError::EmptyVecShouldBeDeleted, + ) + .into_storage_result(); + } + if post > pre { + post_gt_pre = true; + len_diff = post - pre; + } else { + len_diff = pre - post; + } + len_pre = pre; + } + Data::Delete { pre } => { + len_diff = pre; + len_pre = pre; + } + }, + SubKeyWithData::Data(index, data) => match data { + Data::Add { post } => { + actions.push(Action::Push(post)); + added.insert(index); + } + Data::Update { pre, post } => { + actions.push(Action::Update { index, pre, post }); + updated.insert(index); + } + Data::Delete { pre } => { + actions.push(Action::Pop(pre)); + deleted.insert(index); + } + }, + } + } + let added_len: u64 = added + .len() + .try_into() + .map_err(ValidationError::IndexOverflow) + .into_storage_result()?; + let deleted_len: u64 = deleted + .len() + .try_into() + .map_err(ValidationError::IndexOverflow) + .into_storage_result()?; + + if len_diff != 0 + && !(if post_gt_pre { + deleted_len + len_diff == added_len + } else { + added_len + len_diff == deleted_len + }) + { + return Err(ValidationError::InvalidLenDiff).into_storage_result(); + } + + let mut last_added = Option::None; + // Iterate additions in increasing order of indices + for index in added { + if let Some(last_added) = last_added { + // Following additions should be at monotonically increasing + // indices + let expected = last_added + 1; + if expected != index { + return Err(ValidationError::UnexpectedPushIndex { + got: index, + expected, + }) + .into_storage_result(); + } + } else if index != len_pre { + // The first addition must be at the pre length value. + // If something is deleted and a new value is added + // in its place, it will go through `Data::Update` + // instead. + return Err(ValidationError::UnexpectedPushIndex { + got: index, + expected: len_pre, + }) + .into_storage_result(); + } + last_added = Some(index); + } + + let mut last_deleted = Option::None; + // Also iterate deletions in increasing order of indices + for index in deleted { + if let Some(last_added) = last_deleted { + // Following deletions should be at monotonically increasing + // indices + let expected = last_added + 1; + if expected != index { + return Err(ValidationError::UnexpectedPopIndex { + got: index, + expected, + }) + .into_storage_result(); + } + } + last_deleted = Some(index); + } + if let Some(index) = last_deleted { + if len_pre > 0 { + let expected = len_pre - 1; + if index != expected { + // The last deletion must be at the pre length value minus 1 + return Err(ValidationError::UnexpectedPopIndex { + got: index, + expected: len_pre, + }) + .into_storage_result(); + } + } + } + + // And finally iterate updates + for index in updated { + // Update index has to be within the length bounds + let max = len_pre + len_diff; + if index >= max { + return Err(ValidationError::UnexpectedUpdateIndex { + got: index, + max, + }) + .into_storage_result(); + } + } + + Ok(actions) + } +} + +// Generic `LazyVec` methods that require no bounds on values `T` +impl LazyVec { + /// Reads the number of elements in the vector. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self, storage: &S) -> Result + where + S: for<'iter> StorageRead<'iter>, + { + let len = storage.read(&self.get_len_key())?; + Ok(len.unwrap_or_default()) + } + + /// Returns `true` if the vector contains no elements. + pub fn is_empty(&self, storage: &S) -> Result + where + S: for<'iter> StorageRead<'iter>, + { + Ok(self.len(storage)? == 0) + } + + /// Get the prefix of set's elements storage + fn get_data_prefix(&self) -> storage::Key { + self.key.push(&DATA_SUBKEY.to_owned()).unwrap() + } + + /// Get the sub-key of vector's elements storage + fn get_data_key(&self, index: Index) -> storage::Key { + self.get_data_prefix().push(&index).unwrap() + } + + /// Get the sub-key of vector's length storage + fn get_len_key(&self) -> storage::Key { + self.key.push(&LEN_SUBKEY.to_owned()).unwrap() + } +} + +// `LazyVec` methods with borsh encoded values `T` +impl LazyVec +where + T: BorshSerialize + BorshDeserialize + 'static, +{ + /// Appends an element to the back of a collection. + pub fn push(&self, storage: &mut S, val: T) -> Result<()> + where + S: StorageWrite + for<'iter> StorageRead<'iter>, + { + let len = self.len(storage)?; + let data_key = self.get_data_key(len); + storage.write(&data_key, val)?; + storage.write(&self.get_len_key(), len + 1) + } + + /// Removes the last element from a vector and returns it, or `Ok(None)` if + /// it is empty. + /// + /// Note that an empty vector is completely removed from storage. + pub fn pop(&self, storage: &mut S) -> Result> + where + S: StorageWrite + for<'iter> StorageRead<'iter>, + { + let len = self.len(storage)?; + if len == 0 { + Ok(None) + } else { + let index = len - 1; + let data_key = self.get_data_key(index); + if len == 1 { + storage.delete(&self.get_len_key())?; + } else { + storage.write(&self.get_len_key(), index)?; + } + let popped_val = storage.read(&data_key)?; + storage.delete(&data_key)?; + Ok(popped_val) + } + } + + /// Update an element at the given index. + /// + /// The index must be smaller than the length of the vector, otherwise this + /// will fail with `UpdateError::InvalidIndex`. + pub fn update(&self, storage: &mut S, index: Index, val: T) -> Result<()> + where + S: StorageWrite + for<'iter> StorageRead<'iter>, + { + let len = self.len(storage)?; + if index >= len { + return Err(UpdateError::InvalidIndex { index, len }) + .into_storage_result(); + } + let data_key = self.get_data_key(index); + storage.write(&data_key, val) + } + + /// Read an element at the index or `Ok(None)` if out of bounds. + pub fn get(&self, storage: &S, index: Index) -> Result> + where + S: for<'iter> StorageRead<'iter>, + { + storage.read(&self.get_data_key(index)) + } + + /// An iterator visiting all elements. The iterator element type is + /// `Result`, because iterator's call to `next` may fail with e.g. out of + /// gas or data decoding error. + /// + /// Note that this function shouldn't be used in transactions and VPs code + /// on unbounded sets to avoid gas usage increasing with the length of the + /// set. + pub fn iter<'iter>( + &self, + storage: &'iter impl StorageRead<'iter>, + ) -> Result> + 'iter> { + let iter = storage_api::iter_prefix(storage, &self.get_data_prefix())?; + Ok(iter.map(|key_val_res| { + let (_key, val) = key_val_res?; + Ok(val) + })) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ledger::storage::testing::TestStorage; + + #[test] + fn test_lazy_vec_basics() -> storage_api::Result<()> { + let mut storage = TestStorage::default(); + + let key = storage::Key::parse("test").unwrap(); + let lazy_vec = LazyVec::::open(key); + + // The vec should be empty at first + assert!(lazy_vec.is_empty(&storage)?); + assert!(lazy_vec.len(&storage)? == 0); + assert!(lazy_vec.iter(&storage)?.next().is_none()); + assert!(lazy_vec.pop(&mut storage)?.is_none()); + assert!(lazy_vec.get(&storage, 0)?.is_none()); + assert!(lazy_vec.get(&storage, 1)?.is_none()); + + // Push a new value and check that it's added + lazy_vec.push(&mut storage, 15_u32)?; + assert!(!lazy_vec.is_empty(&storage)?); + assert!(lazy_vec.len(&storage)? == 1); + assert_eq!(lazy_vec.iter(&storage)?.next().unwrap()?, 15_u32); + assert_eq!(lazy_vec.get(&storage, 0)?.unwrap(), 15_u32); + assert!(lazy_vec.get(&storage, 1)?.is_none()); + + // Pop the last value and check that the vec is empty again + let popped = lazy_vec.pop(&mut storage)?.unwrap(); + assert_eq!(popped, 15_u32); + assert!(lazy_vec.is_empty(&storage)?); + assert!(lazy_vec.len(&storage)? == 0); + assert!(lazy_vec.iter(&storage)?.next().is_none()); + assert!(lazy_vec.pop(&mut storage)?.is_none()); + assert!(lazy_vec.get(&storage, 0)?.is_none()); + assert!(lazy_vec.get(&storage, 1)?.is_none()); + + Ok(()) + } +} diff --git a/shared/src/ledger/storage_api/collections/mod.rs b/shared/src/ledger/storage_api/collections/mod.rs new file mode 100644 index 0000000000..688b76bd49 --- /dev/null +++ b/shared/src/ledger/storage_api/collections/mod.rs @@ -0,0 +1,143 @@ +//! Lazy data structures for storage access where elements are not all loaded +//! into memory. This serves to minimize gas costs, avoid unbounded iteration +//! in some cases, and ease the validation of storage changes in VPs. +//! +//! Rather than finding the diff of the state before and after (which requires +//! iteration over both of the states that also have to be decoded), VPs will +//! just receive the storage sub-keys that have experienced changes without +//! having to check any of the unchanged elements. + +use std::fmt::Debug; + +use borsh::BorshDeserialize; +use derivative::Derivative; +use thiserror::Error; + +pub mod lazy_map; +pub mod lazy_vec; + +pub use lazy_map::LazyMap; +pub use lazy_vec::LazyVec; + +use crate::ledger::storage_api; +use crate::ledger::vp_env::VpEnv; +use crate::types::storage; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ReadError { + #[error("A storage key was unexpectedly empty")] + UnexpectedlyEmptyStorageKey, +} + +/// Simple lazy collection with borsh deserializable elements +#[derive(Debug)] +pub struct Simple; + +/// Lazy collection with a nested lazy collection +#[derive(Debug)] +pub struct Nested; + +/// A lazy collection of storage values is a handler with some storage prefix +/// that is given to its `fn new()`. The values are typically nested under this +/// prefix and they can be changed individually (e.g. without reading in the +/// whole collection) and their changes directly indicated to the validity +/// predicates, which do not need to iterate the whole collection pre/post to +/// find diffs. +/// +/// An empty collection must be deleted from storage. +pub trait LazyCollection { + /// Actions on the collection determined from changed storage keys by + /// `Self::validate` + type Action; + + /// Possible sub-keys in the collection + type SubKey: Debug; + + /// Possible sub-keys together with the data read from storage + type SubKeyWithData: Debug; + + /// A type of a value in the inner-most collection + type Value: BorshDeserialize; + + /// Create or use an existing vector with the given storage `key`. + fn open(key: storage::Key) -> Self; + + /// Check if the given storage key is a valid LazyVec sub-key and if so + /// return which one. Returns: + /// - `Ok(Some(_))` if it's a valid sub-key + /// - `Ok(None)` if it's not a sub-key + /// - `Err(_)` if it's an invalid sub-key + fn is_valid_sub_key( + &self, + key: &storage::Key, + ) -> storage_api::Result>; + + /// Try to read and decode the data for each change storage key in prior and + /// posterior state. If there is no value in neither prior or posterior + /// state (which is a possible state when transaction e.g. writes and then + /// deletes one storage key, but it is treated as a no-op as it doesn't + /// affect result of validation), returns `Ok(None)`. + fn read_sub_key_data( + env: &ENV, + storage_key: &storage::Key, + sub_key: Self::SubKey, + ) -> storage_api::Result> + where + ENV: for<'a> VpEnv<'a>; + + /// Validate changed sub-keys associated with their data and return back + /// a vector of `Self::Action`s, if the changes are valid + fn validate_changed_sub_keys( + keys: Vec, + ) -> storage_api::Result>; + + /// Accumulate storage changes inside a `ValidationBuilder`. This is + /// typically done by the validity predicate while looping through the + /// changed keys. If the resulting `builder` is not `None`, one must + /// call `fn build()` on it to get the validation result. + /// This function will return `Ok(true)` if the storage key is a valid + /// sub-key of this collection, `Ok(false)` if the storage key doesn't match + /// the prefix of this collection, or error if the prefix matches this + /// collection, but the key itself is not recognized. + fn accumulate( + &self, + env: &ENV, + builder: &mut Option>, + key_changed: &storage::Key, + ) -> storage_api::Result + where + ENV: for<'a> VpEnv<'a>, + { + if let Some(sub) = self.is_valid_sub_key(key_changed)? { + let change = Self::read_sub_key_data(env, key_changed, sub)?; + if let Some(change) = change { + let builder = + builder.get_or_insert(ValidationBuilder::default()); + builder.changes.push(change); + } + return Ok(true); + } + Ok(false) + } + + /// Execute validation on the validation builder, to be called when + /// `accumulate` instantiates the builder to `Some(_)`, after all the + /// changes storage keys have been processed. + fn validate( + builder: ValidationBuilder, + ) -> storage_api::Result> { + Self::validate_changed_sub_keys(builder.changes) + } +} + +/// Validation builder from storage changes. The changes can +/// be accumulated with `LazyCollection::accumulate()` and then turned into a +/// list of valid actions on the collection with `LazyCollection::validate()`. +#[derive(Debug, Derivative)] +// https://mcarton.github.io/rust-derivative/latest/Default.html#custom-bound +#[derivative(Default(bound = ""))] +pub struct ValidationBuilder { + /// The accumulator of found changes under the vector + pub changes: Vec, +} diff --git a/shared/src/ledger/storage_api/error.rs b/shared/src/ledger/storage_api/error.rs new file mode 100644 index 0000000000..f99539bc87 --- /dev/null +++ b/shared/src/ledger/storage_api/error.rs @@ -0,0 +1,91 @@ +//! Storage API error type, extensible with custom user errors and static string +//! messages. + +use thiserror::Error; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("{0}")] + SimpleMessage(&'static str), + #[error("{0}")] + Custom(CustomError), + #[error("{0}: {1}")] + CustomWithMessage(&'static str, CustomError), +} + +/// Result of a storage API call. +pub type Result = std::result::Result; + +/// Result extension to easily wrap custom errors into [`enum@Error`]. +// This is separate from `ResultExt`, because the implementation requires +// different bounds for `T`. +pub trait ResultExt { + /// Convert a [`std::result::Result`] into storage_api [`Result`]. + fn into_storage_result(self) -> Result; + + /// Add a static message to a possible error in [`Result`]. + fn wrap_err(self, msg: &'static str) -> Result; +} + +impl ResultExt for std::result::Result +where + E: std::error::Error + Send + Sync + 'static, +{ + fn into_storage_result(self) -> Result { + self.map_err(Error::new) + } + + fn wrap_err(self, msg: &'static str) -> Result { + self.map_err(|err| Error::wrap(msg, err)) + } +} + +impl Error { + /// Create an [`enum@Error`] from another [`std::error::Error`]. + pub fn new(error: E) -> Self + where + E: Into>, + { + Self::Custom(CustomError(error.into())) + } + + /// Create an [`enum@Error`] from a static message. + #[inline] + pub const fn new_const(msg: &'static str) -> Self { + Self::SimpleMessage(msg) + } + + /// Wrap another [`std::error::Error`] with a static message. + pub fn wrap(msg: &'static str, error: E) -> Self + where + E: Into>, + { + Self::CustomWithMessage(msg, CustomError(error.into())) + } +} + +/// A custom error +#[derive(Debug)] +pub struct CustomError(pub Box); + +impl std::fmt::Display for CustomError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +/// An extension to [`Option`] to allow turning `None` case to an Error from a +/// static string (handy for WASM). +pub trait OptionExt { + /// Transforms the [`Option`] into a [`Result`], mapping + /// [`Some(v)`] to [`Ok(v)`] and [`None`] to the given static error + /// message. + fn ok_or_err_msg(self, msg: &'static str) -> Result; +} + +impl OptionExt for Option { + fn ok_or_err_msg(self, msg: &'static str) -> Result { + self.ok_or_else(|| Error::new_const(msg)) + } +} diff --git a/shared/src/ledger/storage_api/mod.rs b/shared/src/ledger/storage_api/mod.rs new file mode 100644 index 0000000000..b806f35801 --- /dev/null +++ b/shared/src/ledger/storage_api/mod.rs @@ -0,0 +1,257 @@ +//! The common storage read trait is implemented in the storage, client RPC, tx +//! and VPs (both native and WASM). + +pub mod collections; +mod error; +pub mod validation; + +use borsh::{BorshDeserialize, BorshSerialize}; +pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; + +use crate::types::storage::{self, BlockHash, BlockHeight, Epoch}; + +/// Common storage read interface +/// +/// If you're using this trait and having compiler complaining about needing an +/// explicit lifetime parameter, simply use trait bounds with the following +/// syntax: +/// +/// ```rust,ignore +/// where +/// S: for<'iter> StorageRead<'iter> +/// ``` +/// +/// If you want to know why this is needed, see the to-do task below. The +/// syntax for this relies on higher-rank lifetimes, see e.g. +/// . +/// +/// TODO: once GATs are stabilized, we should be able to remove the `'iter` +/// lifetime param that is currently the only way to make the prefix iterator +/// typecheck in the `>::PrefixIter` associated type used in +/// `impl StorageRead for Storage` (shared/src/ledger/storage/mod.rs). +/// See +pub trait StorageRead<'iter> { + /// Storage read prefix iterator + type PrefixIter; + + /// Storage read Borsh encoded value. It will try to read from the storage + /// and decode it if found. + fn read( + &self, + key: &storage::Key, + ) -> Result> { + let bytes = self.read_bytes(key)?; + match bytes { + Some(bytes) => { + let val = T::try_from_slice(&bytes).into_storage_result()?; + Ok(Some(val)) + } + None => Ok(None), + } + } + + /// Storage read raw bytes. It will try to read from the storage. + fn read_bytes(&self, key: &storage::Key) -> Result>>; + + /// Storage `has_key` in. It will try to read from the storage. + fn has_key(&self, key: &storage::Key) -> Result; + + /// Storage prefix iterator ordered by the storage keys. It will try to get + /// an iterator from the storage. + /// + /// For a more user-friendly iterator API, use [`fn@iter_prefix`] or + /// [`fn@iter_prefix_bytes`] instead. + fn iter_prefix( + &'iter self, + prefix: &storage::Key, + ) -> Result; + + /// Storage prefix iterator in reverse order of the storage keys. It will + /// try to get an iterator from the storage. + /// + /// For a more user-friendly iterator API, use [`fn@rev_iter_prefix`] or + /// [`fn@rev_iter_prefix_bytes`] instead. + fn rev_iter_prefix( + &'iter self, + prefix: &storage::Key, + ) -> Result; + + /// Storage prefix iterator. It will try to read from the storage. + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> Result)>>; + + /// Getting the chain ID. + fn get_chain_id(&self) -> Result; + + /// Getting the block height. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_height(&self) -> Result; + + /// Getting the block hash. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_hash(&self) -> Result; + + /// Getting the block epoch. The epoch is that of the block to which the + /// current transaction is being applied. + fn get_block_epoch(&self) -> Result; +} + +/// Common storage write interface +pub trait StorageWrite { + /// Write a value to be encoded with Borsh at the given key to storage. + fn write( + &mut self, + key: &storage::Key, + val: T, + ) -> Result<()> { + let bytes = val.try_to_vec().into_storage_result()?; + self.write_bytes(key, bytes) + } + + /// Write a value as bytes at the given key to storage. + fn write_bytes( + &mut self, + key: &storage::Key, + val: impl AsRef<[u8]>, + ) -> Result<()>; + + /// Delete a value at the given key from storage. + fn delete(&mut self, key: &storage::Key) -> Result<()>; +} + +/// Iterate items matching the given prefix, ordered by the storage keys. +pub fn iter_prefix_bytes<'a>( + storage: &'a impl StorageRead<'a>, + prefix: &crate::types::storage::Key, +) -> Result)>> + 'a> { + let iter = storage.iter_prefix(prefix)?; + let iter = itertools::unfold(iter, |iter| { + match storage.iter_next(iter) { + Ok(Some((key, val))) => { + let key = match storage::Key::parse(key).into_storage_result() { + Ok(key) => key, + Err(err) => { + // Propagate key encoding errors into Iterator's Item + return Some(Err(err)); + } + }; + Some(Ok((key, val))) + } + Ok(None) => None, + Err(err) => { + // Propagate `iter_next` errors into Iterator's Item + Some(Err(err)) + } + } + }); + Ok(iter) +} + +/// Iterate Borsh encoded items matching the given prefix, ordered by the +/// storage keys. +pub fn iter_prefix<'a, T>( + storage: &'a impl StorageRead<'a>, + prefix: &crate::types::storage::Key, +) -> Result> + 'a> +where + T: BorshDeserialize, +{ + let iter = storage.iter_prefix(prefix)?; + let iter = itertools::unfold(iter, |iter| { + match storage.iter_next(iter) { + Ok(Some((key, val))) => { + let key = match storage::Key::parse(key).into_storage_result() { + Ok(key) => key, + Err(err) => { + // Propagate key encoding errors into Iterator's Item + return Some(Err(err)); + } + }; + let val = match T::try_from_slice(&val).into_storage_result() { + Ok(val) => val, + Err(err) => { + // Propagate val encoding errors into Iterator's Item + return Some(Err(err)); + } + }; + Some(Ok((key, val))) + } + Ok(None) => None, + Err(err) => { + // Propagate `iter_next` errors into Iterator's Item + Some(Err(err)) + } + } + }); + Ok(iter) +} + +/// Iterate items matching the given prefix, reverse ordered by the storage +/// keys. +pub fn rev_iter_prefix_bytes<'a>( + storage: &'a impl StorageRead<'a>, + prefix: &crate::types::storage::Key, +) -> Result)>> + 'a> { + let iter = storage.rev_iter_prefix(prefix)?; + let iter = itertools::unfold(iter, |iter| { + match storage.iter_next(iter) { + Ok(Some((key, val))) => { + let key = match storage::Key::parse(key).into_storage_result() { + Ok(key) => key, + Err(err) => { + // Propagate key encoding errors into Iterator's Item + return Some(Err(err)); + } + }; + Some(Ok((key, val))) + } + Ok(None) => None, + Err(err) => { + // Propagate `iter_next` errors into Iterator's Item + Some(Err(err)) + } + } + }); + Ok(iter) +} + +/// Iterate Borsh encoded items matching the given prefix, reverse ordered by +/// the storage keys. +pub fn rev_iter_prefix<'a, T>( + storage: &'a impl StorageRead<'a>, + prefix: &crate::types::storage::Key, +) -> Result> + 'a> +where + T: BorshDeserialize, +{ + let iter = storage.rev_iter_prefix(prefix)?; + let iter = itertools::unfold(iter, |iter| { + match storage.iter_next(iter) { + Ok(Some((key, val))) => { + let key = match storage::Key::parse(key).into_storage_result() { + Ok(key) => key, + Err(err) => { + // Propagate key encoding errors into Iterator's Item + return Some(Err(err)); + } + }; + let val = match T::try_from_slice(&val).into_storage_result() { + Ok(val) => val, + Err(err) => { + // Propagate val encoding errors into Iterator's Item + return Some(Err(err)); + } + }; + Some(Ok((key, val))) + } + Ok(None) => None, + Err(err) => { + // Propagate `iter_next` errors into Iterator's Item + Some(Err(err)) + } + } + }); + Ok(iter) +} diff --git a/shared/src/ledger/storage_api/validation/mod.rs b/shared/src/ledger/storage_api/validation/mod.rs new file mode 100644 index 0000000000..ca0e779a75 --- /dev/null +++ b/shared/src/ledger/storage_api/validation/mod.rs @@ -0,0 +1,54 @@ +//! Storage change validation helpers + +use std::fmt::Debug; + +use borsh::BorshDeserialize; + +use crate::ledger::storage_api; +use crate::ledger::vp_env::VpEnv; +use crate::types::storage; + +/// Data update with prior and posterior state. +#[derive(Clone, Debug)] +pub enum Data { + /// Newly added value + Add { + /// Posterior state + post: T, + }, + /// Updated value prior and posterior state + Update { + /// Prior state + pre: T, + /// Posterior state + post: T, + }, + /// Deleted value + Delete { + /// Prior state + pre: T, + }, +} + +/// Read the prior and posterior state for the given key. +pub fn read_data( + env: &ENV, + key: &storage::Key, +) -> Result>, storage_api::Error> +where + T: BorshDeserialize, + ENV: for<'a> VpEnv<'a>, +{ + let pre = env.read_pre(key)?; + let post = env.read_post(key)?; + Ok(match (pre, post) { + (None, None) => { + // If the key was inserted and then deleted in the same tx, we don't + // need to validate it as it's not visible to any VPs + None + } + (None, Some(post)) => Some(Data::Add { post }), + (Some(pre), None) => Some(Data::Delete { pre }), + (Some(pre), Some(post)) => Some(Data::Update { pre, post }), + }) +} diff --git a/shared/src/ledger/treasury/mod.rs b/shared/src/ledger/treasury/mod.rs index 071019059b..97965282da 100644 --- a/shared/src/ledger/treasury/mod.rs +++ b/shared/src/ledger/treasury/mod.rs @@ -11,7 +11,7 @@ use thiserror::Error; use self::storage as treasury_storage; use super::governance::vp::is_proposal_accepted; -use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::ledger::native_vp::{self, Ctx, NativeVp, VpEnv}; use crate::ledger::storage::{self as ledger_storage, StorageHasher}; use crate::types::address::{xan as nam, Address, InternalAddress}; use crate::types::storage::Key; @@ -80,63 +80,30 @@ where let is_max_funds_transfer_key = treasury_storage::get_max_transferable_fund_key(); let balance_key = token::balance_key(&nam(), &ADDRESS); - let max_transfer_amount = - self.ctx.read_pre(&is_max_funds_transfer_key); - let pre_balance = self.ctx.read_pre(&balance_key); - let post_balance = self.ctx.read_post(&balance_key); + let max_transfer_amount: std::result::Result< + Option, + _, + > = self.ctx.read_pre(&is_max_funds_transfer_key); + let pre_balance: std::result::Result< + Option, + _, + > = self.ctx.read_pre(&balance_key); + let post_balance: std::result::Result< + Option, + _, + > = self.ctx.read_post(&balance_key); if addr.ne(&ADDRESS) { return true; } match (max_transfer_amount, pre_balance, post_balance) { ( - Ok(max_transfer_amount), - Ok(pre_balance), - Ok(post_balance), + Ok(Some(max_transfer_amount)), + Ok(Some(pre_balance)), + Ok(Some(post_balance)), ) => { - match ( - max_transfer_amount, - pre_balance, - post_balance, - ) { - ( - Some(max_transfer_amount), - Some(pre_balance), - Some(post_balance), - ) => { - let max_transfer_amount = - token::Amount::try_from_slice( - &max_transfer_amount[..], - ) - .ok(); - let pre_balance = - token::Amount::try_from_slice( - &pre_balance[..], - ) - .ok(); - let post_balance = - token::Amount::try_from_slice( - &post_balance[..], - ) - .ok(); - match ( - max_transfer_amount, - pre_balance, - post_balance, - ) { - ( - Some(max_transfer_amount), - Some(pre_balance), - Some(post_balance), - ) => { - post_balance > pre_balance - || (pre_balance - post_balance - <= max_transfer_amount) - } - _ => false, - } - } - _ => false, - } + post_balance > pre_balance + || (pre_balance - post_balance + <= max_transfer_amount) } _ => false, } diff --git a/shared/src/ledger/tx_env.rs b/shared/src/ledger/tx_env.rs new file mode 100644 index 0000000000..7672ac6505 --- /dev/null +++ b/shared/src/ledger/tx_env.rs @@ -0,0 +1,63 @@ +//! Transaction environment contains functions that can be called from +//! inside a tx. + +use borsh::BorshSerialize; + +use crate::ledger::storage_api::{self, StorageRead, StorageWrite}; +use crate::types::address::Address; +use crate::types::ibc::IbcEvent; +use crate::types::storage; +use crate::types::time::Rfc3339String; + +/// Transaction host functions +pub trait TxEnv<'iter>: StorageRead<'iter> + StorageWrite { + /// Write a temporary value to be encoded with Borsh at the given key to + /// storage. + fn write_temp( + &mut self, + key: &storage::Key, + val: T, + ) -> Result<(), storage_api::Error>; + + /// Write a temporary value as bytes at the given key to storage. + fn write_bytes_temp( + &mut self, + key: &storage::Key, + val: impl AsRef<[u8]>, + ) -> Result<(), storage_api::Error>; + + /// Insert a verifier address. This address must exist on chain, otherwise + /// the transaction will be rejected. + /// + /// Validity predicates of each verifier addresses inserted in the + /// transaction will validate the transaction and will receive all the + /// changed storage keys and initialized accounts in their inputs. + fn insert_verifier( + &mut self, + addr: &Address, + ) -> Result<(), storage_api::Error>; + + /// Initialize a new account generates a new established address and + /// writes the given code as its validity predicate into the storage. + fn init_account( + &mut self, + code: impl AsRef<[u8]>, + ) -> Result; + + /// Update a validity predicate + fn update_validity_predicate( + &mut self, + addr: &Address, + code: impl AsRef<[u8]>, + ) -> Result<(), storage_api::Error>; + + /// Emit an IBC event. There can be only one event per transaction. On + /// multiple calls, only the last emitted event will be used. + fn emit_ibc_event( + &mut self, + event: &IbcEvent, + ) -> Result<(), storage_api::Error>; + + /// Get time of the current block header as rfc 3339 string + fn get_block_time(&self) -> Result; +} diff --git a/shared/src/ledger/vp_env.rs b/shared/src/ledger/vp_env.rs index 1f59613e54..3f1b22ddc6 100644 --- a/shared/src/ledger/vp_env.rs +++ b/shared/src/ledger/vp_env.rs @@ -3,17 +3,181 @@ use std::num::TryFromIntError; +use borsh::BorshDeserialize; use thiserror::Error; use super::gas::MIN_STORAGE_GAS; +use super::storage_api::{self, StorageRead}; use crate::ledger::gas; use crate::ledger::gas::VpGasMeter; use crate::ledger::storage::write_log::WriteLog; use crate::ledger::storage::{self, write_log, Storage, StorageHasher}; use crate::proto::Tx; use crate::types::hash::Hash; +use crate::types::key::common; use crate::types::storage::{BlockHash, BlockHeight, Epoch, Key}; +/// Validity predicate's environment is available for native VPs and WASM VPs +pub trait VpEnv<'view> { + /// Storage read prefix iterator + type PrefixIter; + + /// Type to read storage state before the transaction execution + type Pre: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Type to read storage state after the transaction execution + type Post: StorageRead<'view, PrefixIter = Self::PrefixIter>; + + /// Read storage state before the transaction execution + fn pre(&'view self) -> Self::Pre; + + /// Read storage state after the transaction execution + fn post(&'view self) -> Self::Post; + + /// Storage read temporary state Borsh encoded value (after tx execution). + /// It will try to read from only the write log and then decode it if + /// found. + fn read_temp( + &self, + key: &Key, + ) -> Result, storage_api::Error>; + + /// Storage read temporary state raw bytes (after tx execution). It will try + /// to read from only the write log. + fn read_bytes_temp( + &self, + key: &Key, + ) -> Result>, storage_api::Error>; + + /// Getting the chain ID. + fn get_chain_id(&'view self) -> Result; + + /// Getting the block height. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_height(&'view self) + -> Result; + + /// Getting the block hash. The height is that of the block to which the + /// current transaction is being applied. + fn get_block_hash(&'view self) -> Result; + + /// Getting the block epoch. The epoch is that of the block to which the + /// current transaction is being applied. + fn get_block_epoch(&'view self) -> Result; + + /// Storage prefix iterator, ordered by storage keys. It will try to get an + /// iterator from the storage. + fn iter_prefix( + &'view self, + prefix: &Key, + ) -> Result; + + /// Storage prefix iterator, reverse ordered by storage keys. It will try to + /// get an iterator from the storage. + fn rev_iter_prefix( + &self, + prefix: &Key, + ) -> Result; + + /// Evaluate a validity predicate with given data. The address, changed + /// storage keys and verifiers will have the same values as the input to + /// caller's validity predicate. + /// + /// If the execution fails for whatever reason, this will return `false`. + /// Otherwise returns the result of evaluation. + fn eval( + &self, + vp_code: Vec, + input_data: Vec, + ) -> Result; + + /// Verify a transaction signature. The signature is expected to have been + /// produced on the encoded transaction [`crate::proto::Tx`] + /// using [`crate::proto::Tx::sign`]. + fn verify_tx_signature( + &self, + pk: &common::PublicKey, + sig: &common::Signature, + ) -> Result; + + /// Get a tx hash + fn get_tx_code_hash(&self) -> Result; + + // ---- Methods below have default implementation via `pre/post` ---- + + /// Storage read prior state Borsh encoded value (before tx execution). It + /// will try to read from the storage and decode it if found. + fn read_pre( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.pre().read(key) + } + + /// Storage read prior state raw bytes (before tx execution). It + /// will try to read from the storage. + fn read_bytes_pre( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.pre().read_bytes(key) + } + + /// Storage read posterior state Borsh encoded value (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage and then decode it if found. + fn read_post( + &'view self, + key: &Key, + ) -> Result, storage_api::Error> { + self.post().read(key) + } + + /// Storage read posterior state raw bytes (after tx execution). It will try + /// to read from the write log first and if no entry found then from the + /// storage. + fn read_bytes_post( + &'view self, + key: &Key, + ) -> Result>, storage_api::Error> { + self.post().read_bytes(key) + } + + /// Storage `has_key` in prior state (before tx execution). It will try to + /// read from the storage. + fn has_key_pre(&'view self, key: &Key) -> Result { + self.pre().has_key(key) + } + + /// Storage `has_key` in posterior state (after tx execution). It will try + /// to check the write log first and if no entry found then the storage. + fn has_key_post( + &'view self, + key: &Key, + ) -> Result { + self.post().has_key(key) + } + + /// Storage prefix iterator for prior state (before tx execution). It will + /// try to read from the storage. + fn iter_pre_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.pre().iter_next(iter) + } + + /// Storage prefix iterator next for posterior state (after tx execution). + /// It will try to read from the write log first and if no entry found + /// then from the storage. + fn iter_post_next( + &'view self, + iter: &mut Self::PrefixIter, + ) -> Result)>, storage_api::Error> { + self.post().iter_next(iter) + } +} + /// These runtime errors will abort VP execution immediately #[allow(missing_docs)] #[derive(Error, Debug)] @@ -37,10 +201,10 @@ pub enum RuntimeError { } /// VP environment function result -pub type Result = std::result::Result; +pub type EnvResult = std::result::Result; /// Add a gas cost incured in a validity predicate -pub fn add_gas(gas_meter: &mut VpGasMeter, used_gas: u64) -> Result<()> { +pub fn add_gas(gas_meter: &mut VpGasMeter, used_gas: u64) -> EnvResult<()> { let result = gas_meter.add(used_gas).map_err(RuntimeError::OutOfGas); if let Err(err) = &result { tracing::info!("Stopping VP execution because of gas error: {}", err); @@ -55,7 +219,7 @@ pub fn read_pre( storage: &Storage, write_log: &WriteLog, key: &Key, -) -> Result>> +) -> EnvResult>> where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -96,7 +260,7 @@ pub fn read_post( storage: &Storage, write_log: &WriteLog, key: &Key, -) -> Result>> +) -> EnvResult>> where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -137,7 +301,7 @@ pub fn read_temp( gas_meter: &mut VpGasMeter, write_log: &WriteLog, key: &Key, -) -> Result>> { +) -> EnvResult>> { // Try to read from the write log first let (log_val, gas) = write_log.read(key); add_gas(gas_meter, gas)?; @@ -156,7 +320,7 @@ pub fn has_key_pre( gas_meter: &mut VpGasMeter, storage: &Storage, key: &Key, -) -> Result +) -> EnvResult where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -174,7 +338,7 @@ pub fn has_key_post( storage: &Storage, write_log: &WriteLog, key: &Key, -) -> Result +) -> EnvResult where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -204,7 +368,7 @@ where pub fn get_chain_id( gas_meter: &mut VpGasMeter, storage: &Storage, -) -> Result +) -> EnvResult where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -219,7 +383,7 @@ where pub fn get_block_height( gas_meter: &mut VpGasMeter, storage: &Storage, -) -> Result +) -> EnvResult where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -234,7 +398,7 @@ where pub fn get_block_hash( gas_meter: &mut VpGasMeter, storage: &Storage, -) -> Result +) -> EnvResult where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -246,7 +410,10 @@ where /// Getting the block hash. The height is that of the block to which the /// current transaction is being applied. -pub fn get_tx_code_hash(gas_meter: &mut VpGasMeter, tx: &Tx) -> Result { +pub fn get_tx_code_hash( + gas_meter: &mut VpGasMeter, + tx: &Tx, +) -> EnvResult { let hash = Hash(tx.code_hash()); add_gas(gas_meter, MIN_STORAGE_GAS)?; Ok(hash) @@ -257,7 +424,7 @@ pub fn get_tx_code_hash(gas_meter: &mut VpGasMeter, tx: &Tx) -> Result { pub fn get_block_epoch( gas_meter: &mut VpGasMeter, storage: &Storage, -) -> Result +) -> EnvResult where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -267,12 +434,13 @@ where Ok(epoch) } -/// Storage prefix iterator. It will try to get an iterator from the storage. +/// Storage prefix iterator, ordered by storage keys. It will try to get an +/// iterator from the storage. pub fn iter_prefix<'a, DB, H>( gas_meter: &mut VpGasMeter, storage: &'a Storage, prefix: &Key, -) -> Result<>::PrefixIter> +) -> EnvResult<>::PrefixIter> where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -282,12 +450,28 @@ where Ok(iter) } +/// Storage prefix iterator, reverse ordered by storage keys. It will try to get +/// an iterator from the storage. +pub fn rev_iter_prefix<'a, DB, H>( + gas_meter: &mut VpGasMeter, + storage: &'a Storage, + prefix: &Key, +) -> EnvResult<>::PrefixIter> +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, +{ + let (iter, gas) = storage.rev_iter_prefix(prefix); + add_gas(gas_meter, gas)?; + Ok(iter) +} + /// Storage prefix iterator for prior state (before tx execution). It will try /// to read from the storage. pub fn iter_pre_next( gas_meter: &mut VpGasMeter, iter: &mut >::PrefixIter, -) -> Result)>> +) -> EnvResult)>> where DB: storage::DB + for<'iter> storage::DBIter<'iter>, { @@ -305,7 +489,7 @@ pub fn iter_post_next( gas_meter: &mut VpGasMeter, write_log: &WriteLog, iter: &mut >::PrefixIter, -) -> Result)>> +) -> EnvResult)>> where DB: storage::DB + for<'iter> storage::DBIter<'iter>, { diff --git a/shared/src/proto/mod.rs b/shared/src/proto/mod.rs index aa971f0b96..1ee5da63c8 100644 --- a/shared/src/proto/mod.rs +++ b/shared/src/proto/mod.rs @@ -9,6 +9,7 @@ pub use types::{ #[cfg(test)] mod tests { + use data_encoding::HEXLOWER; use generated::types::Tx; use prost::Message; @@ -23,8 +24,8 @@ mod tests { }; let mut tx_bytes = vec![]; tx.encode(&mut tx_bytes).unwrap(); - let tx_hex = hex::encode(tx_bytes); - let tx_from_hex = hex::decode(tx_hex).unwrap(); + let tx_hex = HEXLOWER.encode(&tx_bytes); + let tx_from_hex = HEXLOWER.decode(tx_hex.as_ref()).unwrap(); let tx_from_bytes = Tx::decode(&tx_from_hex[..]).unwrap(); assert_eq!(tx, tx_from_bytes); } diff --git a/shared/src/proto/types.rs b/shared/src/proto/types.rs index 7a936dd58f..6c6aa23234 100644 --- a/shared/src/proto/types.rs +++ b/shared/src/proto/types.rs @@ -4,6 +4,7 @@ use std::fmt::Display; use std::hash::{Hash, Hasher}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXLOWER; use prost::Message; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -374,7 +375,7 @@ impl>> From for IntentId { impl Display for IntentId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.0)) + write!(f, "{}", HEXLOWER.encode(&self.0)) } } diff --git a/shared/src/types/key/common.rs b/shared/src/types/key/common.rs index 3cdec73bb9..e928579367 100644 --- a/shared/src/types/key/common.rs +++ b/shared/src/types/key/common.rs @@ -4,6 +4,7 @@ use std::fmt::Display; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; @@ -67,7 +68,7 @@ impl super::PublicKey for PublicKey { impl Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.try_to_vec().unwrap())) + write!(f, "{}", HEXLOWER.encode(&self.try_to_vec().unwrap())) } } @@ -75,7 +76,9 @@ impl FromStr for PublicKey { type Err = ParsePublicKeyError; fn from_str(str: &str) -> Result { - let vec = hex::decode(str).map_err(ParsePublicKeyError::InvalidHex)?; + let vec = HEXLOWER + .decode(str.as_ref()) + .map_err(ParsePublicKeyError::InvalidHex)?; Self::try_from_slice(vec.as_slice()) .map_err(ParsePublicKeyError::InvalidEncoding) } @@ -175,7 +178,7 @@ impl RefTo for SecretKey { impl Display for SecretKey { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.try_to_vec().unwrap())) + write!(f, "{}", HEXLOWER.encode(&self.try_to_vec().unwrap())) } } @@ -183,7 +186,9 @@ impl FromStr for SecretKey { type Err = ParseSecretKeyError; fn from_str(str: &str) -> Result { - let vec = hex::decode(str).map_err(ParseSecretKeyError::InvalidHex)?; + let vec = HEXLOWER + .decode(str.as_ref()) + .map_err(ParseSecretKeyError::InvalidHex)?; Self::try_from_slice(vec.as_slice()) .map_err(ParseSecretKeyError::InvalidEncoding) } diff --git a/shared/src/types/key/dkg_session_keys.rs b/shared/src/types/key/dkg_session_keys.rs index 26f6fffa00..f2cafb639c 100644 --- a/shared/src/types/key/dkg_session_keys.rs +++ b/shared/src/types/key/dkg_session_keys.rs @@ -7,6 +7,7 @@ use std::str::FromStr; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXLOWER; use serde::{Deserialize, Serialize}; use crate::types::address::Address; @@ -142,7 +143,7 @@ impl Display for DkgPublicKey { let vec = self .try_to_vec() .expect("Encoding public key shouldn't fail"); - write!(f, "{}", hex::encode(&vec)) + write!(f, "{}", HEXLOWER.encode(&vec)) } } @@ -150,7 +151,9 @@ impl FromStr for DkgPublicKey { type Err = ParsePublicKeyError; fn from_str(s: &str) -> Result { - let vec = hex::decode(s).map_err(ParsePublicKeyError::InvalidHex)?; + let vec = HEXLOWER + .decode(s.as_ref()) + .map_err(ParsePublicKeyError::InvalidHex)?; BorshDeserialize::try_from_slice(&vec) .map_err(ParsePublicKeyError::InvalidEncoding) } diff --git a/shared/src/types/key/ed25519.rs b/shared/src/types/key/ed25519.rs index dbcf9fe04c..052461de9a 100644 --- a/shared/src/types/key/ed25519.rs +++ b/shared/src/types/key/ed25519.rs @@ -6,6 +6,7 @@ use std::io::Write; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; @@ -106,7 +107,7 @@ impl Ord for PublicKey { impl Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.0.to_bytes())) + write!(f, "{}", HEXLOWER.encode(&self.0.to_bytes())) } } @@ -114,7 +115,9 @@ impl FromStr for PublicKey { type Err = ParsePublicKeyError; fn from_str(s: &str) -> Result { - let vec = hex::decode(s).map_err(ParsePublicKeyError::InvalidHex)?; + let vec = HEXLOWER + .decode(s.as_ref()) + .map_err(ParsePublicKeyError::InvalidHex)?; BorshDeserialize::try_from_slice(&vec) .map_err(ParsePublicKeyError::InvalidEncoding) } @@ -203,7 +206,7 @@ impl BorshSchema for SecretKey { impl Display for SecretKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.0.to_bytes())) + write!(f, "{}", HEXLOWER.encode(&self.0.to_bytes())) } } @@ -211,7 +214,9 @@ impl FromStr for SecretKey { type Err = ParseSecretKeyError; fn from_str(s: &str) -> Result { - let vec = hex::decode(s).map_err(ParseSecretKeyError::InvalidHex)?; + let vec = HEXLOWER + .decode(s.as_ref()) + .map_err(ParseSecretKeyError::InvalidHex)?; BorshDeserialize::try_from_slice(&vec) .map_err(ParseSecretKeyError::InvalidEncoding) } diff --git a/shared/src/types/key/mod.rs b/shared/src/types/key/mod.rs index 666cc3fb5e..b010cead5d 100644 --- a/shared/src/types/key/mod.rs +++ b/shared/src/types/key/mod.rs @@ -80,7 +80,7 @@ pub enum VerifySigError { #[derive(Error, Debug)] pub enum ParsePublicKeyError { #[error("Invalid public key hex: {0}")] - InvalidHex(hex::FromHexError), + InvalidHex(data_encoding::DecodeError), #[error("Invalid public key encoding: {0}")] InvalidEncoding(std::io::Error), #[error("Parsed public key does not belong to desired scheme")] @@ -91,7 +91,7 @@ pub enum ParsePublicKeyError { #[derive(Error, Debug)] pub enum ParseSignatureError { #[error("Invalid signature hex: {0}")] - InvalidHex(hex::FromHexError), + InvalidHex(data_encoding::DecodeError), #[error("Invalid signature encoding: {0}")] InvalidEncoding(std::io::Error), #[error("Parsed signature does not belong to desired scheme")] @@ -102,7 +102,7 @@ pub enum ParseSignatureError { #[derive(Error, Debug)] pub enum ParseSecretKeyError { #[error("Invalid secret key hex: {0}")] - InvalidHex(hex::FromHexError), + InvalidHex(data_encoding::DecodeError), #[error("Invalid secret key encoding: {0}")] InvalidEncoding(std::io::Error), #[error("Parsed secret key does not belong to desired scheme")] diff --git a/shared/src/types/key/secp256k1.rs b/shared/src/types/key/secp256k1.rs index 99bcbb3f67..889b4de258 100644 --- a/shared/src/types/key/secp256k1.rs +++ b/shared/src/types/key/secp256k1.rs @@ -7,6 +7,7 @@ use std::io::{ErrorKind, Write}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXLOWER; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::de::{Error, SeqAccess, Visitor}; @@ -113,7 +114,7 @@ impl Ord for PublicKey { impl Display for PublicKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.0.serialize_compressed())) + write!(f, "{}", HEXLOWER.encode(&self.0.serialize_compressed())) } } @@ -121,7 +122,9 @@ impl FromStr for PublicKey { type Err = ParsePublicKeyError; fn from_str(s: &str) -> Result { - let vec = hex::decode(s).map_err(ParsePublicKeyError::InvalidHex)?; + let vec = HEXLOWER + .decode(s.as_bytes()) + .map_err(ParsePublicKeyError::InvalidHex)?; BorshDeserialize::try_from_slice(&vec) .map_err(ParsePublicKeyError::InvalidEncoding) } @@ -226,7 +229,7 @@ impl BorshSchema for SecretKey { impl Display for SecretKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex::encode(&self.0.serialize())) + write!(f, "{}", HEXLOWER.encode(&self.0.serialize())) } } @@ -234,7 +237,9 @@ impl FromStr for SecretKey { type Err = ParseSecretKeyError; fn from_str(s: &str) -> Result { - let vec = hex::decode(s).map_err(ParseSecretKeyError::InvalidHex)?; + let vec = HEXLOWER + .decode(s.as_bytes()) + .map_err(ParseSecretKeyError::InvalidHex)?; BorshDeserialize::try_from_slice(&vec) .map_err(ParseSecretKeyError::InvalidEncoding) } diff --git a/shared/src/types/storage.rs b/shared/src/types/storage.rs index fc87bc8d51..c9f87908fe 100644 --- a/shared/src/types/storage.rs +++ b/shared/src/types/storage.rs @@ -6,6 +6,7 @@ use std::ops::{Add, Div, Mul, Rem, Sub}; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::BASE32HEX_NOPAD; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -27,6 +28,8 @@ pub enum Error { ParseAddressFromKey, #[error("Reserved prefix or string is specified: {0}")] InvalidKeySeg(String), + #[error("Error parsing key segment {0}")] + ParseKeySeg(String), } /// Result for functions that may fail @@ -193,6 +196,7 @@ impl Header { BorshDeserialize, BorshSchema, Debug, + Default, Eq, PartialEq, Ord, @@ -206,6 +210,13 @@ pub struct Key { pub segments: Vec, } +/// A [`Key`] made of borrowed key segments [`DbKeySeg`]. +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct KeyRef<'a> { + /// Reference of key segments + pub segments: &'a [DbKeySeg], +} + impl From for Key { fn from(seg: DbKeySeg) -> Self { Self { @@ -274,6 +285,23 @@ impl Key { self.len() == 0 } + /// Returns the first segment of the key, or `None` if it is empty. + pub fn first(&self) -> Option<&DbKeySeg> { + self.segments.first() + } + + /// Returns the last segment of the key, or `None` if it is empty. + pub fn last(&self) -> Option<&DbKeySeg> { + self.segments.last() + } + + /// Returns the prefix before the last segment and last segment of the key, + /// or `None` if it is empty. + pub fn split_last(&self) -> Option<(KeyRef<'_>, &DbKeySeg)> { + let (last, prefix) = self.segments.split_last()?; + Some((KeyRef { segments: prefix }, last)) + } + /// Returns a key of the validity predicate of the given address /// Only this function can push "?" segment for validity predicate pub fn validity_predicate(addr: &Address) -> Self { @@ -317,8 +345,11 @@ impl Key { .split_off(2) .join(&KEY_SEGMENT_SEPARATOR.to_string()), ) - .map_err(|e| Error::Temporary { - error: format!("Cannot parse key segments {}: {}", db_key, e), + .map_err(|e| { + Error::ParseKeySeg(format!( + "Cannot parse key segments {}: {}", + db_key, e + )) })?, }; Ok(key) @@ -346,6 +377,28 @@ impl Key { }), } } + + /// Check if the key begins with the given prefix and returns: + /// - `Some(Some(suffix))` the suffix after the match with, if any, or + /// - `Some(None)` if the prefix is matched, but it has no suffix, or + /// - `None` if it doesn't match + pub fn split_prefix(&self, prefix: &Self) -> Option> { + if self.segments.len() < prefix.segments.len() { + return None; + } else if self == prefix { + return Some(None); + } + // This is safe, because we check that the length of segments in self >= + // in prefix above + let (self_prefix, rest) = self.segments.split_at(prefix.segments.len()); + if self_prefix == prefix.segments { + Some(Some(Key { + segments: rest.to_vec(), + })) + } else { + None + } + } } impl Display for Key { @@ -360,6 +413,20 @@ impl Display for Key { } } +impl KeyRef<'_> { + /// Check if [`KeyRef`] is equal to a [`Key`]. + pub fn eq_owned(&self, other: &Key) -> bool { + self.segments == other.segments + } + + /// Returns the prefix before the last segment and last segment of the key, + /// or `None` if it is empty. + pub fn split_last(&self) -> Option<(KeyRef<'_>, &DbKeySeg)> { + let (last, prefix) = self.segments.split_last()?; + Some((KeyRef { segments: prefix }, last)) + } +} + // TODO use std::convert::{TryFrom, Into}? /// Represents a segment in a path that may be used as a database key pub trait KeySeg { @@ -446,14 +513,17 @@ impl KeySeg for String { impl KeySeg for BlockHeight { fn parse(string: String) -> Result { - let h = string.parse::().map_err(|e| Error::Temporary { - error: format!("Unexpected height value {}, {}", string, e), + let h = string.parse::().map_err(|e| { + Error::ParseKeySeg(format!( + "Unexpected height value {}, {}", + string, e + )) })?; Ok(BlockHeight(h)) } fn raw(&self) -> String { - format!("{}", self.0) + self.0.raw() } fn to_db_key(&self) -> DbKeySeg { @@ -481,6 +551,67 @@ impl KeySeg for Address { } } +/// Implement [`KeySeg`] for a type via base32hex of its BE bytes (using +/// `to_le_bytes()` and `from_le_bytes` methods) that maintains sort order of +/// the original data. +// TODO this could be a bit more efficient without the string conversion (atm +// with base32hex), if we can use bytes for storage key directly (which we can +// with rockDB, but atm, we're calling `to_string()` using the custom `Display` +// impl from here) +macro_rules! impl_int_key_seg { + ($unsigned:ty, $signed:ty, $len:literal) => { + impl KeySeg for $unsigned { + fn parse(string: String) -> Result { + let bytes = + BASE32HEX_NOPAD.decode(string.as_ref()).map_err(|err| { + Error::ParseKeySeg(format!( + "Failed parsing {} with {}", + string, err + )) + })?; + let mut fixed_bytes = [0; $len]; + fixed_bytes.copy_from_slice(&bytes); + Ok(<$unsigned>::from_be_bytes(fixed_bytes)) + } + + fn raw(&self) -> String { + BASE32HEX_NOPAD.encode(&self.to_be_bytes()) + } + + fn to_db_key(&self) -> DbKeySeg { + DbKeySeg::StringSeg(self.raw()) + } + } + + impl KeySeg for $signed { + fn parse(string: String) -> Result { + // get signed int from a unsigned int complemented with a min + // value + let complemented = <$unsigned>::parse(string)?; + let signed = (complemented as $signed) ^ <$signed>::MIN; + Ok(signed) + } + + fn raw(&self) -> String { + // signed int is converted to unsigned int that preserves the + // order by complementing it with a min value + let complemented = (*self ^ <$signed>::MIN) as $unsigned; + complemented.raw() + } + + fn to_db_key(&self) -> DbKeySeg { + DbKeySeg::StringSeg(self.raw()) + } + } + }; +} + +impl_int_key_seg!(u8, i8, 1); +impl_int_key_seg!(u16, i16, 2); +impl_int_key_seg!(u32, i32, 4); +impl_int_key_seg!(u64, i64, 8); +impl_int_key_seg!(u128, i128, 16); + /// Epoch identifier. Epochs are identified by consecutive numbers. #[derive( Clone, diff --git a/shared/src/vm/host_env.rs b/shared/src/vm/host_env.rs index ac9f89c31e..533a05d94e 100644 --- a/shared/src/vm/host_env.rs +++ b/shared/src/vm/host_env.rs @@ -68,7 +68,7 @@ pub enum TxRuntimeError { type TxResult = std::result::Result; /// A transaction's host environment -pub struct TxEnv<'a, MEM, DB, H, CA> +pub struct TxVmEnv<'a, MEM, DB, H, CA> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -112,7 +112,7 @@ where pub cache_access: std::marker::PhantomData, } -impl<'a, MEM, DB, H, CA> TxEnv<'a, MEM, DB, H, CA> +impl<'a, MEM, DB, H, CA> TxVmEnv<'a, MEM, DB, H, CA> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -167,7 +167,7 @@ where } } -impl Clone for TxEnv<'_, MEM, DB, H, CA> +impl Clone for TxVmEnv<'_, MEM, DB, H, CA> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -207,7 +207,7 @@ where } /// A validity predicate's host environment -pub struct VpEnv<'a, MEM, DB, H, EVAL, CA> +pub struct VpVmEnv<'a, MEM, DB, H, EVAL, CA> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -282,7 +282,7 @@ pub trait VpEvaluator { ) -> HostEnvResult; } -impl<'a, MEM, DB, H, EVAL, CA> VpEnv<'a, MEM, DB, H, EVAL, CA> +impl<'a, MEM, DB, H, EVAL, CA> VpVmEnv<'a, MEM, DB, H, EVAL, CA> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -331,7 +331,7 @@ where } } -impl Clone for VpEnv<'_, MEM, DB, H, EVAL, CA> +impl Clone for VpVmEnv<'_, MEM, DB, H, EVAL, CA> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -435,7 +435,7 @@ where /// Called from tx wasm to request to use the given gas amount pub fn tx_charge_gas( - env: &TxEnv, + env: &TxVmEnv, used_gas: i32, ) -> TxResult<()> where @@ -454,7 +454,7 @@ where /// Add a gas cost incured in a transaction pub fn tx_add_gas( - env: &TxEnv, + env: &TxVmEnv, used_gas: u64, ) -> TxResult<()> where @@ -477,9 +477,9 @@ where /// Called from VP wasm to request to use the given gas amount pub fn vp_charge_gas( - env: &VpEnv, + env: &VpVmEnv, used_gas: i32, -) -> vp_env::Result<()> +) -> vp_env::EnvResult<()> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -499,7 +499,7 @@ where /// Storage `has_key` function exposed to the wasm VM Tx environment. It will /// try to check the write log first and if no entry found then the storage. pub fn tx_has_key( - env: &TxEnv, + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult @@ -555,7 +555,7 @@ where /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). pub fn tx_read( - env: &TxEnv, + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult @@ -645,7 +645,7 @@ where /// any) back to the guest, the second step reads the value from cache into a /// pre-allocated buffer with the obtained size. pub fn tx_result_buffer( - env: &TxEnv, + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where @@ -665,9 +665,9 @@ where /// Storage prefix iterator function exposed to the wasm VM Tx environment. /// It will try to get an iterator from the storage and return the corresponding -/// ID of the iterator. +/// ID of the iterator, ordered by storage keys. pub fn tx_iter_prefix( - env: &TxEnv, + env: &TxVmEnv, prefix_ptr: u64, prefix_len: u64, ) -> TxResult @@ -695,6 +695,38 @@ where Ok(iterators.insert(iter).id()) } +/// Storage prefix iterator function exposed to the wasm VM Tx environment. +/// It will try to get an iterator from the storage and return the corresponding +/// ID of the iterator, reverse ordered by storage keys. +pub fn tx_rev_iter_prefix( + env: &TxVmEnv, + prefix_ptr: u64, + prefix_len: u64, +) -> TxResult +where + MEM: VmMemory, + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + CA: WasmCacheAccess, +{ + let (prefix, gas) = env + .memory + .read_string(prefix_ptr, prefix_len as _) + .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; + tx_add_gas(env, gas)?; + + tracing::debug!("tx_rev_iter_prefix {}, prefix {}", prefix, prefix_ptr); + + let prefix = + Key::parse(prefix).map_err(TxRuntimeError::StorageDataError)?; + + let storage = unsafe { env.ctx.storage.get() }; + let iterators = unsafe { env.ctx.iterators.get() }; + let (iter, gas) = storage.rev_iter_prefix(&prefix); + tx_add_gas(env, gas)?; + Ok(iterators.insert(iter).id()) +} + /// Storage prefix iterator next function exposed to the wasm VM Tx environment. /// It will try to read from the write log first and if no entry found then from /// the storage. @@ -702,7 +734,7 @@ where /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). pub fn tx_iter_next( - env: &TxEnv, + env: &TxVmEnv, iter_id: u64, ) -> TxResult where @@ -781,7 +813,7 @@ where /// Storage write function exposed to the wasm VM Tx environment. The given /// key/value will be written to the write log. pub fn tx_write( - env: &TxEnv, + env: &TxVmEnv, key_ptr: u64, key_len: u64, val_ptr: u64, @@ -822,7 +854,7 @@ where /// given key/value will be written only to the write log. It will be never /// written to the storage. pub fn tx_write_temp( - env: &TxEnv, + env: &TxVmEnv, key_ptr: u64, key_len: u64, val_ptr: u64, @@ -860,7 +892,7 @@ where } fn check_address_existence( - env: &TxEnv, + env: &TxVmEnv, key: &Key, ) -> TxResult<()> where @@ -904,7 +936,7 @@ where /// Storage delete function exposed to the wasm VM Tx environment. The given /// key/value will be written as deleted to the write log. pub fn tx_delete( - env: &TxEnv, + env: &TxVmEnv, key_ptr: u64, key_len: u64, ) -> TxResult<()> @@ -938,7 +970,7 @@ where /// Emitting an IBC event function exposed to the wasm VM Tx environment. /// The given IBC event will be set to the write log. pub fn tx_emit_ibc_event( - env: &TxEnv, + env: &TxVmEnv, event_ptr: u64, event_len: u64, ) -> TxResult<()> @@ -966,10 +998,10 @@ where /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). pub fn vp_read_pre( - env: &VpEnv, + env: &VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1017,10 +1049,10 @@ where /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). pub fn vp_read_post( - env: &VpEnv, + env: &VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1063,10 +1095,10 @@ where /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). pub fn vp_read_temp( - env: &VpEnv, + env: &VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1111,9 +1143,9 @@ where /// any) back to the guest, the second step reads the value from cache into a /// pre-allocated buffer with the obtained size. pub fn vp_result_buffer( - env: &VpEnv, + env: &VpVmEnv, result_ptr: u64, -) -> vp_env::Result<()> +) -> vp_env::EnvResult<()> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1134,10 +1166,10 @@ where /// Storage `has_key` in prior state (before tx execution) function exposed to /// the wasm VM VP environment. It will try to read from the storage. pub fn vp_has_key_pre( - env: &VpEnv, + env: &VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1165,10 +1197,10 @@ where /// to the wasm VM VP environment. It will try to check the write log first and /// if no entry found then the storage. pub fn vp_has_key_post( - env: &VpEnv, + env: &VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1195,12 +1227,12 @@ where /// Storage prefix iterator function exposed to the wasm VM VP environment. /// It will try to get an iterator from the storage and return the corresponding -/// ID of the iterator. +/// ID of the iterator, ordered by storage keys. pub fn vp_iter_prefix( - env: &VpEnv, + env: &VpVmEnv, prefix_ptr: u64, prefix_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1225,15 +1257,47 @@ where Ok(iterators.insert(iter).id()) } +/// Storage prefix iterator function exposed to the wasm VM VP environment. +/// It will try to get an iterator from the storage and return the corresponding +/// ID of the iterator, reverse ordered by storage keys. +pub fn vp_rev_iter_prefix( + env: &VpVmEnv, + prefix_ptr: u64, + prefix_len: u64, +) -> vp_env::EnvResult +where + MEM: VmMemory, + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: StorageHasher, + EVAL: VpEvaluator, + CA: WasmCacheAccess, +{ + let (prefix, gas) = env + .memory + .read_string(prefix_ptr, prefix_len as _) + .map_err(|e| vp_env::RuntimeError::MemoryError(Box::new(e)))?; + let gas_meter = unsafe { env.ctx.gas_meter.get() }; + vp_env::add_gas(gas_meter, gas)?; + + let prefix = + Key::parse(prefix).map_err(vp_env::RuntimeError::StorageDataError)?; + tracing::debug!("vp_rev_iter_prefix {}", prefix); + + let storage = unsafe { env.ctx.storage.get() }; + let iter = vp_env::rev_iter_prefix(gas_meter, storage, &prefix)?; + let iterators = unsafe { env.ctx.iterators.get() }; + Ok(iterators.insert(iter).id()) +} + /// Storage prefix iterator for prior state (before tx execution) function /// exposed to the wasm VM VP environment. It will try to read from the storage. /// /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). pub fn vp_iter_pre_next( - env: &VpEnv, + env: &VpVmEnv, iter_id: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1271,9 +1335,9 @@ where /// Returns `-1` when the key is not present, or the length of the data when /// the key is present (the length may be `0`). pub fn vp_iter_post_next( - env: &VpEnv, + env: &VpVmEnv, iter_id: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1308,7 +1372,7 @@ where /// Verifier insertion function exposed to the wasm VM Tx environment. pub fn tx_insert_verifier( - env: &TxEnv, + env: &TxVmEnv, addr_ptr: u64, addr_len: u64, ) -> TxResult<()> @@ -1335,7 +1399,7 @@ where /// Update a validity predicate function exposed to the wasm VM Tx environment pub fn tx_update_validity_predicate( - env: &TxEnv, + env: &TxVmEnv, addr_ptr: u64, addr_len: u64, code_ptr: u64, @@ -1376,7 +1440,7 @@ where /// Initialize a new account established address. pub fn tx_init_account( - env: &TxEnv, + env: &TxVmEnv, code_ptr: u64, code_len: u64, result_ptr: u64, @@ -1419,7 +1483,7 @@ where /// Getting the chain ID function exposed to the wasm VM Tx environment. pub fn tx_get_chain_id( - env: &TxEnv, + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where @@ -1442,7 +1506,7 @@ where /// environment. The height is that of the block to which the current /// transaction is being applied. pub fn tx_get_block_height( - env: &TxEnv, + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, @@ -1459,7 +1523,7 @@ where /// Getting the block hash function exposed to the wasm VM Tx environment. The /// hash is that of the block to which the current transaction is being applied. pub fn tx_get_block_hash( - env: &TxEnv, + env: &TxVmEnv, result_ptr: u64, ) -> TxResult<()> where @@ -1482,7 +1546,7 @@ where /// environment. The epoch is that of the block to which the current /// transaction is being applied. pub fn tx_get_block_epoch( - env: &TxEnv, + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, @@ -1498,9 +1562,9 @@ where /// Getting the chain ID function exposed to the wasm VM VP environment. pub fn vp_get_chain_id( - env: &VpEnv, + env: &VpVmEnv, result_ptr: u64, -) -> vp_env::Result<()> +) -> vp_env::EnvResult<()> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1522,8 +1586,8 @@ where /// environment. The height is that of the block to which the current /// transaction is being applied. pub fn vp_get_block_height( - env: &VpEnv, -) -> vp_env::Result + env: &VpVmEnv, +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1541,7 +1605,7 @@ where /// environment. The time is that of the block header to which the current /// transaction is being applied. pub fn tx_get_block_time( - env: &TxEnv, + env: &TxVmEnv, ) -> TxResult where MEM: VmMemory, @@ -1576,9 +1640,9 @@ where /// Getting the block hash function exposed to the wasm VM VP environment. The /// hash is that of the block to which the current transaction is being applied. pub fn vp_get_block_hash( - env: &VpEnv, + env: &VpVmEnv, result_ptr: u64, -) -> vp_env::Result<()> +) -> vp_env::EnvResult<()> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1598,9 +1662,9 @@ where /// Getting the transaction hash function exposed to the wasm VM VP environment. pub fn vp_get_tx_code_hash( - env: &VpEnv, + env: &VpVmEnv, result_ptr: u64, -) -> vp_env::Result<()> +) -> vp_env::EnvResult<()> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1622,8 +1686,8 @@ where /// environment. The epoch is that of the block to which the current /// transaction is being applied. pub fn vp_get_block_epoch( - env: &VpEnv, -) -> vp_env::Result + env: &VpVmEnv, +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1639,12 +1703,12 @@ where /// Verify a transaction signature. pub fn vp_verify_tx_signature( - env: &VpEnv, + env: &VpVmEnv, pk_ptr: u64, pk_len: u64, sig_ptr: u64, sig_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1678,7 +1742,7 @@ where /// printed at the [`tracing::Level::INFO`]. This function is for development /// only. pub fn tx_log_string( - env: &TxEnv, + env: &TxVmEnv, str_ptr: u64, str_len: u64, ) -> TxResult<()> @@ -1698,12 +1762,12 @@ where /// Evaluate a validity predicate with the given input data. pub fn vp_eval( - env: &VpEnv<'static, MEM, DB, H, EVAL, CA>, + env: &VpVmEnv<'static, MEM, DB, H, EVAL, CA>, vp_code_ptr: u64, vp_code_len: u64, input_data_ptr: u64, input_data_len: u64, -) -> vp_env::Result +) -> vp_env::EnvResult where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1734,10 +1798,10 @@ where /// printed at the [`tracing::Level::INFO`]. This function is for development /// only. pub fn vp_log_string( - env: &VpEnv, + env: &VpVmEnv, str_ptr: u64, str_len: u64, -) -> vp_env::Result<()> +) -> vp_env::EnvResult<()> where MEM: VmMemory, DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -1773,13 +1837,13 @@ pub mod testing { result_buffer: &mut Option>, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, #[cfg(feature = "wasm-runtime")] tx_wasm_cache: &mut TxCache, - ) -> TxEnv<'static, NativeMemory, DB, H, CA> + ) -> TxVmEnv<'static, NativeMemory, DB, H, CA> where DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, CA: WasmCacheAccess, { - TxEnv::new( + TxVmEnv::new( NativeMemory::default(), storage, write_log, @@ -1808,14 +1872,14 @@ pub mod testing { keys_changed: &BTreeSet, eval_runner: &EVAL, #[cfg(feature = "wasm-runtime")] vp_wasm_cache: &mut VpCache, - ) -> VpEnv<'static, NativeMemory, DB, H, EVAL, CA> + ) -> VpVmEnv<'static, NativeMemory, DB, H, EVAL, CA> where DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, EVAL: VpEvaluator, CA: WasmCacheAccess, { - VpEnv::new( + VpVmEnv::new( NativeMemory::default(), address, storage, diff --git a/shared/src/vm/wasm/host_env.rs b/shared/src/vm/wasm/host_env.rs index 3b6715f383..06d45fa4f9 100644 --- a/shared/src/vm/wasm/host_env.rs +++ b/shared/src/vm/wasm/host_env.rs @@ -9,11 +9,11 @@ use wasmer::{ }; use crate::ledger::storage::{self, StorageHasher}; -use crate::vm::host_env::{TxEnv, VpEnv, VpEvaluator}; +use crate::vm::host_env::{TxVmEnv, VpEvaluator, VpVmEnv}; use crate::vm::wasm::memory::WasmMemory; use crate::vm::{host_env, WasmCacheAccess}; -impl WasmerEnv for TxEnv<'_, WasmMemory, DB, H, CA> +impl WasmerEnv for TxVmEnv<'_, WasmMemory, DB, H, CA> where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -27,7 +27,7 @@ where } } -impl WasmerEnv for VpEnv<'_, WasmMemory, DB, H, EVAL, CA> +impl WasmerEnv for VpVmEnv<'_, WasmMemory, DB, H, EVAL, CA> where DB: storage::DB + for<'iter> storage::DBIter<'iter>, H: StorageHasher, @@ -48,7 +48,7 @@ where pub fn tx_imports( wasm_store: &Store, initial_memory: Memory, - env: TxEnv<'static, WasmMemory, DB, H, CA>, + env: TxVmEnv<'static, WasmMemory, DB, H, CA>, ) -> ImportObject where DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -67,6 +67,7 @@ where "anoma_tx_write_temp" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_write_temp), "anoma_tx_delete" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_delete), "anoma_tx_iter_prefix" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_iter_prefix), + "anoma_tx_rev_iter_prefix" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_rev_iter_prefix), "anoma_tx_iter_next" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_iter_next), "anoma_tx_insert_verifier" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_insert_verifier), "anoma_tx_update_validity_predicate" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_validity_predicate), @@ -87,7 +88,7 @@ where pub fn vp_imports( wasm_store: &Store, initial_memory: Memory, - env: VpEnv<'static, WasmMemory, DB, H, EVAL, CA>, + env: VpVmEnv<'static, WasmMemory, DB, H, EVAL, CA>, ) -> ImportObject where DB: storage::DB + for<'iter> storage::DBIter<'iter>, @@ -107,6 +108,7 @@ where "anoma_vp_has_key_pre" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_has_key_pre), "anoma_vp_has_key_post" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_has_key_post), "anoma_vp_iter_prefix" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_iter_prefix), + "anoma_vp_rev_iter_prefix" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_rev_iter_prefix), "anoma_vp_iter_pre_next" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_iter_pre_next), "anoma_vp_iter_post_next" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_iter_post_next), "anoma_vp_get_chain_id" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_chain_id), diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 75fbfb4add..d9977393d8 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -17,7 +17,7 @@ use crate::proto::Tx; use crate::types::address::Address; use crate::types::internal::HostEnvResult; use crate::types::storage::Key; -use crate::vm::host_env::{TxEnv, VpCtx, VpEnv, VpEvaluator}; +use crate::vm::host_env::{TxVmEnv, VpCtx, VpEvaluator, VpVmEnv}; use crate::vm::prefix_iter::PrefixIterators; use crate::vm::types::VpInput; use crate::vm::wasm::host_env::{tx_imports, vp_imports}; @@ -94,7 +94,7 @@ where let mut verifiers = BTreeSet::new(); let mut result_buffer: Option> = None; - let env = TxEnv::new( + let env = TxVmEnv::new( WasmMemory::default(), storage, write_log, @@ -189,7 +189,7 @@ where cache_access: PhantomData, }; - let env = VpEnv::new( + let env = VpVmEnv::new( WasmMemory::default(), address, storage, @@ -344,7 +344,7 @@ where let keys_changed = unsafe { ctx.keys_changed.get() }; let verifiers = unsafe { ctx.verifiers.get() }; let vp_wasm_cache = unsafe { ctx.vp_wasm_cache.get() }; - let env = VpEnv { + let env = VpVmEnv { memory: WasmMemory::default(), ctx, }; diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 4ae17e8fff..7c9127261e 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -13,7 +13,8 @@ wasm-runtime = ["namada/wasm-runtime"] [dependencies] namada = {path = "../shared", features = ["testing", "ibc-mocks"]} -namada_vm_env = {path = "../vm_env"} +namada_vp_prelude = {path = "../vp_prelude"} +namada_tx_prelude = {path = "../tx_prelude"} chrono = "0.4.19" concat-idents = "1.1.2" prost = "0.9.0" @@ -30,13 +31,13 @@ namada_apps = {path = "../apps", default-features = false, features = ["testing" assert_cmd = "1.0.7" borsh = "0.9.1" color-eyre = "0.5.11" +data-encoding = "2.3.2" # NOTE: enable "print" feature to see output from builds ran by e2e tests escargot = {version = "0.5.7"} # , features = ["print"]} expectrl = {version = "=0.5.2"} eyre = "0.6.5" file-serve = "0.2.0" fs_extra = "1.2.0" -hex = "0.4.3" itertools = "0.10.0" libp2p = "0.38.0" pretty_assertions = "0.7.2" diff --git a/tests/proptest-regressions/storage_api/collections/lazy_map.txt b/tests/proptest-regressions/storage_api/collections/lazy_map.txt new file mode 100644 index 0000000000..2de7510923 --- /dev/null +++ b/tests/proptest-regressions/storage_api/collections/lazy_map.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 59b8eaaf5d8e03e58b346ef229a2487f68fea488197420f150682f7275ce2b83 # shrinks to (initial_state, transitions) = (AbstractLazyMapState { valid_transitions: [], committed_transitions: [] }, [Insert(11178241982156558453, TestVal { x: 9618691367534591266, y: true }), CommitTx, Update(11178241982156558453, TestVal { x: 2635377083098935189, y: false }), Update(11178241982156558453, TestVal { x: 11485387163946255361, y: false }), Insert(4380901092919801530, TestVal { x: 17235291421018840542, y: false }), Update(11178241982156558453, TestVal { x: 1936190700145956620, y: false }), Update(11178241982156558453, TestVal { x: 6934621224353358508, y: false }), Update(11178241982156558453, TestVal { x: 16175036327810390362, y: true }), Remove(5606457884982633480), Insert(7124206407862523505, TestVal { x: 5513772825695605555, y: true }), CommitTxAndBlock, CommitTx, Insert(13347045100814804679, TestVal { x: 5157295776286367034, y: false }), Update(7124206407862523505, TestVal { x: 1989909525753197955, y: false }), Update(4380901092919801530, TestVal { x: 13085578877588425331, y: false }), Update(7124206407862523505, TestVal { x: 1620781139263176467, y: true }), Insert(5806457332157050619, TestVal { x: 14632354209749334932, y: true }), Remove(1613213961397167063), Update(7124206407862523505, TestVal { x: 3848976302483310370, y: true }), Update(4380901092919801530, TestVal { x: 15281186775251770467, y: false }), Remove(5303306623647571548), Insert(5905425607805327902, TestVal { x: 1274794101048822414, y: false }), Insert(2305446651611241243, TestVal { x: 7872403441503057017, y: true }), Insert(2843165193114615911, TestVal { x: 13698490566286768452, y: false }), Insert(3364298091459048760, TestVal { x: 8891279000465212397, y: true }), CommitTx, Insert(17278527568142155478, TestVal { x: 8166151895050476136, y: false }), Remove(9206713523174765253), Remove(1148985045479283759), Insert(13346103305566843535, TestVal { x: 13148026974798633058, y: true }), Remove(17185699086139524651), CommitTx, Update(7124206407862523505, TestVal { x: 3047872255943216792, y: false }), CommitTxAndBlock, CommitTxAndBlock, Remove(4672009405538026945), Update(5905425607805327902, TestVal { x: 6635343936644805461, y: false }), Insert(14100441716981493843, TestVal { x: 8068697312326956479, y: true }), Insert(8370580326875672309, TestVal { x: 18416630552728813406, y: false }), Update(2305446651611241243, TestVal { x: 3777718192999015176, y: false }), Remove(1532142753559370584), Remove(10097030807802775125), Insert(10080356901530935857, TestVal { x: 17171047520093964037, y: false }), Update(3364298091459048760, TestVal { x: 702372485798608773, y: true }), Insert(5504969092734638033, TestVal { x: 314752460808087203, y: true }), Remove(5486040497128339175), Insert(7884678026881625058, TestVal { x: 4313610278903495077, y: true }), CommitTx, Insert(11228024342874184864, TestVal { x: 428512502841968552, y: false }), Insert(4684666745142518471, TestVal { x: 13122515680485564107, y: true }), Remove(14243063045921130600), Remove(4530767959521683042), Insert(10236349778753659715, TestVal { x: 3138294567956031715, y: true }), Update(2305446651611241243, TestVal { x: 8133236604817109805, y: false }), Update(2843165193114615911, TestVal { x: 12001998927296899868, y: false }), CommitTxAndBlock, CommitTx, CommitTxAndBlock]) diff --git a/tests/proptest-regressions/storage_api/collections/lazy_vec.txt b/tests/proptest-regressions/storage_api/collections/lazy_vec.txt new file mode 100644 index 0000000000..97a16dcbeb --- /dev/null +++ b/tests/proptest-regressions/storage_api/collections/lazy_vec.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 4330a283e32b5ff3f38d0af2298e1e98c30b1901c1027b572070a1af3356688e # shrinks to (initial_state, transitions) = (AbstractLazyVecState { valid_transitions: [], committed_transitions: [] }, [Push(TestVecItem { x: 15352583996758053781, y: true }), Pop, CommitTx, Push(TestVecItem { x: 6904067244182623445, y: false }), CommitTx, Pop, Push(TestVecItem { x: 759762287021483883, y: true }), Push(TestVecItem { x: 7885704082671389345, y: true }), Pop, Pop, Push(TestVecItem { x: 2762344561419437403, y: false }), Push(TestVecItem { x: 11448034977049028254, y: false }), Update { index: 0, value: TestVecItem { x: 7097339541298715775, y: false } }, Pop, Pop, Push(TestVecItem { x: 457884036257686887, y: true }), CommitTx, Push(TestVecItem { x: 17719281119971095810, y: true }), CommitTx, Push(TestVecItem { x: 4612681906563857058, y: false }), CommitTx, CommitTx, Pop, CommitTx, Pop, Push(TestVecItem { x: 4269537158299505726, y: false }), CommitTx, Pop, Pop, CommitTx, CommitTx, CommitTx, CommitTx, Push(TestVecItem { x: 9020889554694833528, y: true }), Push(TestVecItem { x: 4022797489860699620, y: false }), Update { index: 0, value: TestVecItem { x: 6485081152860611495, y: true } }, Pop, CommitTx, Push(TestVecItem { x: 14470031031894733310, y: false }), Push(TestVecItem { x: 1113274973965556867, y: true }), Push(TestVecItem { x: 4122902042678339346, y: false }), Push(TestVecItem { x: 9672639635189564637, y: true }), Pop, Pop, Pop, CommitTx, Update { index: 0, value: TestVecItem { x: 6372193991838429158, y: false } }, Push(TestVecItem { x: 15140852824102579010, y: false }), Pop, Pop, Pop, Push(TestVecItem { x: 4012218522073776592, y: false }), Push(TestVecItem { x: 10637893847792386454, y: true }), Push(TestVecItem { x: 3357788278949652885, y: false }), CommitTx, CommitTx, Pop, Pop, CommitTx, Pop, Push(TestVecItem { x: 11768518086398350214, y: true }), Push(TestVecItem { x: 4361685178396183644, y: true }), Pop, CommitTx, Push(TestVecItem { x: 2450907664540456425, y: false }), Push(TestVecItem { x: 18184919885943118586, y: true }), Update { index: 1, value: TestVecItem { x: 10611906658537706503, y: false } }, Push(TestVecItem { x: 4887827541279511396, y: false }), Update { index: 0, value: TestVecItem { x: 13021774003761931172, y: false } }, Push(TestVecItem { x: 3644118228573898014, y: false }), CommitTx, Update { index: 0, value: TestVecItem { x: 1276840798381751183, y: false } }, Pop, Pop]) diff --git a/tests/proptest-regressions/storage_api/collections/nested_lazy_map.txt b/tests/proptest-regressions/storage_api/collections/nested_lazy_map.txt new file mode 100644 index 0000000000..d587a9680e --- /dev/null +++ b/tests/proptest-regressions/storage_api/collections/nested_lazy_map.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc b5ce7502439712f95a4b50de0d5455e0a6788cc95dbd535e749d5717da0ee8e1 # shrinks to (initial_state, transitions) = (AbstractLazyMapState { valid_transitions: [], committed_transitions: [] }, [Insert((22253647846329582, -2060910714, -85), TestVal { x: 16862967849328560500, y: true })]) diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 22eb4f4ac5..a9049c4675 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -17,6 +17,7 @@ use std::time::{Duration, Instant}; use borsh::BorshSerialize; use color_eyre::eyre::Result; +use data_encoding::HEXLOWER; use namada::types::token; use namada_apps::config::genesis::genesis_config::{ GenesisConfig, ParametersConfig, PosParamsConfig, @@ -386,7 +387,7 @@ fn ledger_txs_and_queries() -> Result<()> { &validator_one_rpc, ], // expect hex encoded of borsh encoded bytes - hex::encode(christel_balance.try_to_vec().unwrap()), + HEXLOWER.encode(&christel_balance.try_to_vec().unwrap()), ), ]; for (query_args, expected) in &query_args_and_expected_response { diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 1b75f83bdc..78ebf2473c 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -12,6 +12,8 @@ mod e2e; #[cfg(test)] mod native_vp; pub mod storage; +#[cfg(test)] +mod storage_api; /// Using this import requires `tracing` and `tracing-subscriber` dependencies. /// Set env var `RUST_LOG=info` to see the logs from a test run (and diff --git a/tests/src/native_vp/mod.rs b/tests/src/native_vp/mod.rs index be450a7086..808299a86d 100644 --- a/tests/src/native_vp/mod.rs +++ b/tests/src/native_vp/mod.rs @@ -1,47 +1,38 @@ mod pos; +use std::collections::BTreeSet; + use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::storage::mockdb::MockDB; use namada::ledger::storage::Sha256Hasher; -use namada::vm::wasm::compilation_cache; -use namada::vm::wasm::compilation_cache::common::Cache; -use namada::vm::{wasm, WasmCacheRwAccess}; -use tempfile::TempDir; +use namada::types::address::Address; +use namada::types::storage; +use namada::vm::WasmCacheRwAccess; use crate::tx::TestTxEnv; type NativeVpCtx<'a> = Ctx<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>; -type VpCache = Cache; #[derive(Debug)] pub struct TestNativeVpEnv { - pub vp_cache_dir: TempDir, - pub vp_wasm_cache: VpCache, pub tx_env: TestTxEnv, + pub address: Address, + pub verifiers: BTreeSet
, + pub keys_changed: BTreeSet, } impl TestNativeVpEnv { - pub fn new(tx_env: TestTxEnv) -> Self { - let (vp_wasm_cache, vp_cache_dir) = - wasm::compilation_cache::common::testing::cache(); - - Self { - vp_cache_dir, - vp_wasm_cache, - tx_env, - } - } -} + pub fn from_tx_env(tx_env: TestTxEnv, address: Address) -> Self { + // Find the tx verifiers and keys_changes the same way as protocol would + let verifiers = tx_env.get_verifiers(); -impl Default for TestNativeVpEnv { - fn default() -> Self { - let (vp_wasm_cache, vp_cache_dir) = - wasm::compilation_cache::common::testing::cache(); + let keys_changed = tx_env.all_touched_storage_keys(); Self { - vp_cache_dir, - vp_wasm_cache, - tx_env: TestTxEnv::default(), + address, + tx_env, + verifiers, + keys_changed, } } } @@ -51,20 +42,10 @@ impl TestNativeVpEnv { pub fn validate_tx<'a, T>( &'a self, init_native_vp: impl Fn(NativeVpCtx<'a>) -> T, - // The function is applied on the `tx_data` when called - mut apply_tx: impl FnMut(&[u8]), ) -> Result::Error> where T: NativeVp, { - let tx_data = self.tx_env.tx.data.as_ref().cloned().unwrap_or_default(); - apply_tx(&tx_data); - - // Find the tx verifiers and keys_changes the same way as protocol would - let verifiers = self.tx_env.get_verifiers(); - - let keys_changed = self.tx_env.all_touched_storage_keys(); - let ctx = Ctx { iterators: Default::default(), gas_meter: Default::default(), @@ -72,10 +53,13 @@ impl TestNativeVpEnv { write_log: &self.tx_env.write_log, tx: &self.tx_env.tx, vp_wasm_cache: self.tx_env.vp_wasm_cache.clone(), + address: &self.address, + keys_changed: &self.keys_changed, + verifiers: &self.verifiers, }; let tx_data = self.tx_env.tx.data.as_ref().cloned().unwrap_or_default(); let native_vp = init_native_vp(ctx); - native_vp.validate_tx(&tx_data, &keys_changed, &verifiers) + native_vp.validate_tx(&tx_data, &self.keys_changed, &self.verifiers) } } diff --git a/tests/src/native_vp/pos.rs b/tests/src/native_vp/pos.rs index be1844c6cd..1c68ac0269 100644 --- a/tests/src/native_vp/pos.rs +++ b/tests/src/native_vp/pos.rs @@ -105,10 +105,10 @@ mod tests { use namada::ledger::pos::namada_proof_of_stake::PosBase; use namada::ledger::pos::PosParams; use namada::types::storage::Epoch; - use namada::types::token; - use namada_vm_env::proof_of_stake::parameters::testing::arb_pos_params; - use namada_vm_env::proof_of_stake::{staking_token_address, PosVP}; - use namada_vm_env::tx_prelude::Address; + use namada::types::{address, token}; + use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; + use namada_tx_prelude::proof_of_stake::{staking_token_address, PosVP}; + use namada_tx_prelude::Address; use proptest::prelude::*; use proptest::prop_state_machine; use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; @@ -410,10 +410,13 @@ mod tests { fn validate_transitions(&self) { // Use the tx_env to run PoS VP let tx_env = tx_host_env::take(); - let vp_env = TestNativeVpEnv::new(tx_env); - let result = vp_env.validate_tx(PosVP::new, |_tx_data| {}); + + let vp_env = TestNativeVpEnv::from_tx_env(tx_env, address::POS); + let result = vp_env.validate_tx(PosVP::new); + // Put the tx_env back before checking the result tx_host_env::set(vp_env.tx_env); + let result = result.expect("Validation of valid changes must not fail!"); @@ -538,20 +541,20 @@ pub mod testing { use namada::types::key::RefTo; use namada::types::storage::Epoch; use namada::types::{address, key, token}; - use namada_vm_env::proof_of_stake::epoched::{ + use namada_tx_prelude::proof_of_stake::epoched::{ DynEpochOffset, Epoched, EpochedDelta, }; - use namada_vm_env::proof_of_stake::types::{ + use namada_tx_prelude::proof_of_stake::types::{ Bond, Unbond, ValidatorState, VotingPower, VotingPowerDelta, WeightedValidator, }; - use namada_vm_env::proof_of_stake::{ + use namada_tx_prelude::proof_of_stake::{ staking_token_address, BondId, Bonds, PosParams, Unbonds, }; - use namada_vm_env::tx_prelude::{Address, PoS}; + use namada_tx_prelude::{Address, StorageRead, StorageWrite}; use proptest::prelude::*; - use crate::tx::tx_host_env; + use crate::tx::{self, tx_host_env}; #[derive(Clone, Debug, Default)] pub struct TestValidator { @@ -783,8 +786,8 @@ pub mod testing { /// the VP. pub fn apply(self, is_current_tx_valid: bool) { // Read the PoS parameters - use namada_vm_env::tx_prelude::PosRead; - let params = PoS.read_pos_params(); + use namada_tx_prelude::PosRead; + let params = tx::ctx().read_pos_params().unwrap(); let current_epoch = tx_host_env::with(|env| { // Reset the gas meter on each change, so that we never run @@ -811,7 +814,7 @@ pub mod testing { params: &PosParams, current_epoch: Epoch, ) -> PosStorageChanges { - use namada_vm_env::tx_prelude::PosRead; + use namada_tx_prelude::PosRead; match self { ValidPosAction::InitValidator(addr) => { @@ -869,8 +872,10 @@ pub mod testing { // Read the validator's current total deltas (this may be // updated by previous transition(s) within the same // transaction via write log) - let validator_total_deltas = - PoS.read_validator_total_deltas(&validator).unwrap(); + let validator_total_deltas = tx::ctx() + .read_validator_total_deltas(&validator) + .unwrap() + .unwrap(); let total_delta = validator_total_deltas .get_at_offset(current_epoch, offset, params) .unwrap_or_default(); @@ -1007,8 +1012,10 @@ pub mod testing { // Read the validator's current total deltas (this may be // updated by previous transition(s) within the same // transaction via write log) - let validator_total_deltas_cur = - PoS.read_validator_total_deltas(&validator).unwrap(); + let validator_total_deltas_cur = tx::ctx() + .read_validator_total_deltas(&validator) + .unwrap() + .unwrap(); let total_delta_cur = validator_total_deltas_cur .get_at_offset(current_epoch, offset, params) .unwrap_or_default(); @@ -1073,10 +1080,12 @@ pub mod testing { changes } ValidPosAction::Withdraw { owner, validator } => { - let unbonds = PoS.read_unbond(&BondId { - source: owner.clone(), - validator: validator.clone(), - }); + let unbonds = tx::ctx() + .read_unbond(&BondId { + source: owner.clone(), + validator: validator.clone(), + }) + .unwrap(); let token_delta: i128 = unbonds .and_then(|unbonds| unbonds.get(current_epoch)) @@ -1108,7 +1117,7 @@ pub mod testing { // invalid changes is_current_tx_valid: bool, ) { - use namada_vm_env::tx_prelude::{PosRead, PosWrite}; + use namada_tx_prelude::{PosRead, PosWrite}; match change { PosStorageChange::SpawnAccount { address } => { @@ -1126,7 +1135,7 @@ pub mod testing { source: owner, validator, }; - let bonds = PoS.read_bond(&bond_id); + let bonds = tx::ctx().read_bond(&bond_id).unwrap(); let bonds = if delta >= 0 { let amount: u64 = delta.try_into().unwrap(); let amount: token::Amount = amount.into(); @@ -1190,7 +1199,7 @@ pub mod testing { ); bonds }; - PoS.write_bond(&bond_id, bonds); + tx::ctx().write_bond(&bond_id, bonds).unwrap(); } PosStorageChange::Unbond { owner, @@ -1202,8 +1211,8 @@ pub mod testing { source: owner, validator, }; - let bonds = PoS.read_bond(&bond_id).unwrap(); - let unbonds = PoS.read_unbond(&bond_id); + let bonds = tx::ctx().read_bond(&bond_id).unwrap().unwrap(); + let unbonds = tx::ctx().read_unbond(&bond_id).unwrap(); let amount: u64 = delta.try_into().unwrap(); let mut to_unbond: token::Amount = amount.into(); let mut value = Unbond { @@ -1260,10 +1269,11 @@ pub mod testing { } None => Unbonds::init(value, current_epoch, params), }; - PoS.write_unbond(&bond_id, unbonds); + tx::ctx().write_unbond(&bond_id, unbonds).unwrap(); } PosStorageChange::TotalVotingPower { vp_delta, offset } => { - let mut total_voting_powers = PoS.read_total_voting_power(); + let mut total_voting_powers = + tx::ctx().read_total_voting_power().unwrap(); let vp_delta: i64 = vp_delta.try_into().unwrap(); match offset { Either::Left(offset) => { @@ -1283,10 +1293,14 @@ pub mod testing { ); } } - PoS.write_total_voting_power(total_voting_powers) + tx::ctx() + .write_total_voting_power(total_voting_powers) + .unwrap() } PosStorageChange::ValidatorAddressRawHash { address } => { - PoS.write_validator_address_raw_hash(&address); + tx::ctx() + .write_validator_address_raw_hash(&address) + .unwrap(); } PosStorageChange::ValidatorSet { validator, @@ -1302,8 +1316,9 @@ pub mod testing { ); } PosStorageChange::ValidatorConsensusKey { validator, pk } => { - let consensus_key = PoS + let consensus_key = tx::ctx() .read_validator_consensus_key(&validator) + .unwrap() .map(|mut consensus_keys| { consensus_keys.set(pk.clone(), current_epoch, params); consensus_keys @@ -1311,21 +1326,26 @@ pub mod testing { .unwrap_or_else(|| { Epoched::init(pk, current_epoch, params) }); - PoS.write_validator_consensus_key(&validator, consensus_key); + tx::ctx() + .write_validator_consensus_key(&validator, consensus_key) + .unwrap(); } PosStorageChange::ValidatorStakingRewardsAddress { validator, address, } => { - PoS.write_validator_staking_reward_address(&validator, address); + tx::ctx() + .write_validator_staking_reward_address(&validator, address) + .unwrap(); } PosStorageChange::ValidatorTotalDeltas { validator, delta, offset, } => { - let total_deltas = PoS + let total_deltas = tx::ctx() .read_validator_total_deltas(&validator) + .unwrap() .map(|mut total_deltas| { total_deltas.add_at_offset( delta, @@ -1343,15 +1363,18 @@ pub mod testing { params, ) }); - PoS.write_validator_total_deltas(&validator, total_deltas); + tx::ctx() + .write_validator_total_deltas(&validator, total_deltas) + .unwrap(); } PosStorageChange::ValidatorVotingPower { validator, vp_delta: delta, offset, } => { - let voting_power = PoS + let voting_power = tx::ctx() .read_validator_voting_power(&validator) + .unwrap() .map(|mut voting_powers| { match offset { Either::Left(offset) => { @@ -1381,11 +1404,14 @@ pub mod testing { params, ) }); - PoS.write_validator_voting_power(&validator, voting_power); + tx::ctx() + .write_validator_voting_power(&validator, voting_power) + .unwrap(); } PosStorageChange::ValidatorState { validator, state } => { - let state = PoS + let state = tx::ctx() .read_validator_state(&validator) + .unwrap() .map(|mut states| { states.set(state, current_epoch, params); states @@ -1393,16 +1419,15 @@ pub mod testing { .unwrap_or_else(|| { Epoched::init_at_genesis(state, current_epoch) }); - PoS.write_validator_state(&validator, state); + tx::ctx().write_validator_state(&validator, state).unwrap(); } PosStorageChange::StakingTokenPosBalance { delta } => { let balance_key = token::balance_key( &staking_token_address(), - &::POS_ADDRESS, - ) - .to_string(); + &::POS_ADDRESS, + ); let mut balance: token::Amount = - tx_host_env::read(&balance_key).unwrap_or_default(); + tx::ctx().read(&balance_key).unwrap().unwrap_or_default(); if delta < 0 { let to_spend: u64 = (-delta).try_into().unwrap(); let to_spend: token::Amount = to_spend.into(); @@ -1412,16 +1437,17 @@ pub mod testing { let to_recv: token::Amount = to_recv.into(); balance.receive(&to_recv); } - tx_host_env::write(&balance_key, balance); + tx::ctx().write(&balance_key, balance).unwrap(); } PosStorageChange::WithdrawUnbond { owner, validator } => { let bond_id = BondId { source: owner, validator, }; - let mut unbonds = PoS.read_unbond(&bond_id).unwrap(); + let mut unbonds = + tx::ctx().read_unbond(&bond_id).unwrap().unwrap(); unbonds.delete_current(current_epoch, params); - PoS.write_unbond(&bond_id, unbonds); + tx::ctx().write_unbond(&bond_id, unbonds).unwrap(); } } } @@ -1433,12 +1459,12 @@ pub mod testing { current_epoch: Epoch, params: &PosParams, ) { - use namada_vm_env::tx_prelude::{PosRead, PosWrite}; + use namada_tx_prelude::{PosRead, PosWrite}; let validator_total_deltas = - PoS.read_validator_total_deltas(&validator); + tx::ctx().read_validator_total_deltas(&validator).unwrap(); // println!("Read validator set"); - let mut validator_set = PoS.read_validator_set(); + let mut validator_set = tx::ctx().read_validator_set().unwrap(); // println!("Read validator set: {:#?}", validator_set); validator_set.update_from_offset( |validator_set, epoch| { @@ -1545,7 +1571,7 @@ pub mod testing { params, ); // println!("Write validator set {:#?}", validator_set); - PoS.write_validator_set(validator_set); + tx::ctx().write_validator_set(validator_set).unwrap(); } pub fn arb_invalid_pos_action( @@ -1625,8 +1651,8 @@ pub mod testing { /// Apply an invalid PoS storage action. pub fn apply(self) { // Read the PoS parameters - use namada_vm_env::tx_prelude::PosRead; - let params = PoS.read_pos_params(); + use namada_tx_prelude::PosRead; + let params = tx::ctx().read_pos_params().unwrap(); for (epoch, changes) in self.changes { for change in changes { @@ -1641,9 +1667,9 @@ pub mod testing { params: &PosParams, current_epoch: Epoch, ) -> bool { - use namada_vm_env::tx_prelude::PosRead; + use namada_tx_prelude::PosRead; - let validator_sets = PoS.read_validator_set(); + let validator_sets = tx::ctx().read_validator_set().unwrap(); let validator_set = validator_sets .get_at_offset(current_epoch, DynEpochOffset::PipelineLen, params) .unwrap(); diff --git a/tests/src/storage_api/collections/lazy_map.rs b/tests/src/storage_api/collections/lazy_map.rs new file mode 100644 index 0000000000..afff09bbf1 --- /dev/null +++ b/tests/src/storage_api/collections/lazy_map.rs @@ -0,0 +1,613 @@ +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + use std::convert::TryInto; + + use borsh::{BorshDeserialize, BorshSerialize}; + use namada::types::address::{self, Address}; + use namada::types::storage; + use namada_tx_prelude::storage::KeySeg; + use namada_tx_prelude::storage_api::collections::{ + lazy_map, LazyCollection, LazyMap, + }; + use proptest::prelude::*; + use proptest::prop_state_machine; + use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; + use proptest::test_runner::Config; + use test_log::test; + + use crate::tx::tx_host_env; + use crate::vp::vp_host_env; + + prop_state_machine! { + #![proptest_config(Config { + // Instead of the default 256, we only run 5 because otherwise it + // takes too long and it's preferable to crank up the number of + // transitions instead, to allow each case to run for more epochs as + // some issues only manifest once the model progresses further. + // Additionally, more cases will be explored every time this test is + // executed in the CI. + cases: 5, + .. Config::default() + })] + #[test] + fn lazy_map_api_state_machine_test(sequential 1..100 => ConcreteLazyMapState); + } + + /// Type of key used in the map + type TestKey = u64; + + /// Some borsh-serializable type with arbitrary fields to be used inside + /// LazyMap state machine test + #[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + PartialEq, + Eq, + PartialOrd, + Ord, + )] + struct TestVal { + x: u64, + y: bool, + } + + /// A `StateMachineTest` implemented on this struct manipulates it with + /// `Transition`s, which are also being accumulated into + /// `current_transitions`. It then: + /// + /// - checks its state against an in-memory `std::collections::HashMap` + /// - runs validation and checks that the `LazyMap::Action`s reported from + /// validation match with transitions that were applied + /// + /// Additionally, one of the transitions is to commit a block and/or + /// transaction, during which the currently accumulated state changes are + /// persisted, or promoted from transaction write log to block's write log. + #[derive(Debug)] + struct ConcreteLazyMapState { + /// Address is used to prefix the storage key of the `lazy_map` in + /// order to simulate a transaction and a validity predicate + /// check from changes on the `lazy_map` + address: Address, + /// In the test, we apply the same transitions on the `lazy_map` as on + /// `eager_map` to check that `lazy_map`'s state is consistent with + /// `eager_map`. + eager_map: BTreeMap, + /// Handle to a lazy map + lazy_map: LazyMap, + /// Valid LazyMap changes in the current transaction + current_transitions: Vec, + } + + #[derive(Clone, Debug, Default)] + struct AbstractLazyMapState { + /// Valid LazyMap changes in the current transaction + valid_transitions: Vec, + /// Valid LazyMap changes committed to storage + committed_transitions: Vec, + } + + /// Possible transitions that can modify a [`LazyMap`]. + /// This roughly corresponds to the methods that have `StorageWrite` + /// access and is very similar to [`Action`] + #[derive(Clone, Debug)] + enum Transition { + /// Commit all valid transitions in the current transaction + CommitTx, + /// Commit all valid transitions in the current transaction and also + /// commit the current block + CommitTxAndBlock, + /// Insert a key-val into a [`LazyMap`] + Insert(TestKey, TestVal), + /// Remove a key-val from a [`LazyMap`] + Remove(TestKey), + /// Update a value at key from pre to post state in a + /// [`LazyMap`] + Update(TestKey, TestVal), + } + + impl AbstractStateMachine for AbstractLazyMapState { + type State = Self; + type Transition = Transition; + + fn init_state() -> BoxedStrategy { + Just(Self::default()).boxed() + } + + // Apply a random transition to the state + fn transitions(state: &Self::State) -> BoxedStrategy { + let length = state.len(); + if length == 0 { + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => (arb_map_key(), arb_map_val()).prop_map(|(key, val)| Transition::Insert(key, val)) + ] + .boxed() + } else { + let keys = state.find_existing_keys(); + let arb_existing_map_key = + || proptest::sample::select(keys.clone()); + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => (arb_existing_map_key(), arb_map_val()).prop_map(|(key, val)| + Transition::Update(key, val) + ), + 3 => arb_existing_map_key().prop_map(Transition::Remove), + 5 => (arb_map_key().prop_filter("insert on non-existing keys only", + move |key| !keys.contains(key)), arb_map_val()) + .prop_map(|(key, val)| Transition::Insert(key, val)) + ] + .boxed() + } + } + + fn apply_abstract( + mut state: Self::State, + transition: &Self::Transition, + ) -> Self::State { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + let valid_actions_to_commit = + std::mem::take(&mut state.valid_transitions); + state + .committed_transitions + .extend(valid_actions_to_commit.into_iter()); + } + _ => state.valid_transitions.push(transition.clone()), + } + state + } + + fn preconditions( + state: &Self::State, + transition: &Self::Transition, + ) -> bool { + let length = state.len(); + // Ensure that the remove or update transitions are not applied + // to an empty state + if length == 0 + && matches!( + transition, + Transition::Remove(_) | Transition::Update(_, _) + ) + { + return false; + } + match transition { + Transition::Update(key, _) | Transition::Remove(key) => { + let keys = state.find_existing_keys(); + // Ensure that the update/remove key is an existing one + keys.contains(key) + } + Transition::Insert(key, _) => { + let keys = state.find_existing_keys(); + // Ensure that the insert key is not an existing one + !keys.contains(key) + } + _ => true, + } + } + } + + impl StateMachineTest for ConcreteLazyMapState { + type Abstract = AbstractLazyMapState; + type ConcreteState = Self; + + fn init_test( + _initial_state: ::State, + ) -> Self::ConcreteState { + // Init transaction env in which we'll be applying the transitions + tx_host_env::init(); + + // The lazy_map's path must be prefixed by the address to be able + // to trigger a validity predicate on it + let address = address::testing::established_address_1(); + tx_host_env::with(|env| env.spawn_accounts([&address])); + let lazy_map_prefix: storage::Key = address.to_db_key().into(); + + Self { + address, + eager_map: BTreeMap::new(), + lazy_map: LazyMap::open( + lazy_map_prefix.push(&"arbitrary".to_string()).unwrap(), + ), + current_transitions: vec![], + } + } + + fn apply_concrete( + mut state: Self::ConcreteState, + transition: ::Transition, + ) -> Self::ConcreteState { + // Apply transitions in transaction env + let ctx = tx_host_env::ctx(); + + // Persist the transitions in the current tx, or clear previous ones + // if we're committing a tx + match &transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + state.current_transitions = vec![]; + } + _ => { + state.current_transitions.push(transition.clone()); + } + } + + // Transition application on lazy map and post-conditions: + match &transition { + Transition::CommitTx => { + // commit the tx without committing the block + tx_host_env::with(|env| env.write_log.commit_tx()); + } + Transition::CommitTxAndBlock => { + // commit the tx and the block + tx_host_env::commit_tx_and_block(); + } + Transition::Insert(key, value) => { + state.lazy_map.insert(ctx, *key, value.clone()).unwrap(); + + // Post-conditions: + let stored_value = + state.lazy_map.get(ctx, key).unwrap().unwrap(); + assert_eq!( + &stored_value, value, + "the new item must be added to the back" + ); + + state.assert_validation_accepted(); + } + Transition::Remove(key) => { + let removed = + state.lazy_map.remove(ctx, key).unwrap().unwrap(); + + // Post-conditions: + assert_eq!( + &removed, + state.eager_map.get(key).unwrap(), + "removed element matches the value in eager map \ + before it's updated" + ); + + state.assert_validation_accepted(); + } + Transition::Update(key, value) => { + let old_val = + state.lazy_map.get(ctx, key).unwrap().unwrap(); + + state.lazy_map.insert(ctx, *key, value.clone()).unwrap(); + + // Post-conditions: + let new_val = + state.lazy_map.get(ctx, key).unwrap().unwrap(); + assert_eq!( + &old_val, + state.eager_map.get(key).unwrap(), + "old value must match the value at the same key in \ + the eager map before it's updated" + ); + assert_eq!( + &new_val, value, + "new value must match that which was passed into the \ + Transition::Update" + ); + + state.assert_validation_accepted(); + } + } + + // Apply transition in the eager map for comparison + apply_transition_on_eager_map(&mut state.eager_map, &transition); + + // Global post-conditions: + + // All items in eager map must be present in lazy map + for (key, expected_item) in state.eager_map.iter() { + let got = + state.lazy_map.get(ctx, key).unwrap().expect( + "The expected item must be present in lazy map", + ); + assert_eq!(expected_item, &got, "at key {key}"); + } + + // All items in lazy map must be present in eager map + for key_val in state.lazy_map.iter(ctx).unwrap() { + let (key, expected_val) = key_val.unwrap(); + let got = state + .eager_map + .get(&key) + .expect("The expected item must be present in eager map"); + assert_eq!(&expected_val, got, "at key {key}"); + } + + state + } + } + + impl AbstractLazyMapState { + /// Find the length of the map from the applied transitions + fn len(&self) -> u64 { + (map_len_diff_from_transitions(self.committed_transitions.iter()) + + map_len_diff_from_transitions(self.valid_transitions.iter())) + .try_into() + .expect( + "It shouldn't be possible to underflow length from all \ + transactions applied in abstract state", + ) + } + + /// Build an eager map from the committed and current transitions + fn eager_map(&self) -> BTreeMap { + let mut eager_map = BTreeMap::new(); + for transition in &self.committed_transitions { + apply_transition_on_eager_map(&mut eager_map, transition); + } + for transition in &self.valid_transitions { + apply_transition_on_eager_map(&mut eager_map, transition); + } + eager_map + } + + /// Find the keys currently present in the map + fn find_existing_keys(&self) -> Vec { + self.eager_map().keys().cloned().collect() + } + } + + /// Find the difference in length of the map from the applied transitions + fn map_len_diff_from_transitions<'a>( + transitions: impl Iterator, + ) -> i64 { + let mut insert_count: i64 = 0; + let mut remove_count: i64 = 0; + + for trans in transitions { + match trans { + Transition::CommitTx + | Transition::CommitTxAndBlock + | Transition::Update(_, _) => {} + Transition::Insert(_, _) => insert_count += 1, + Transition::Remove(_) => remove_count += 1, + } + } + insert_count - remove_count + } + + impl ConcreteLazyMapState { + fn assert_validation_accepted(&self) { + // Init the VP env from tx env in which we applied the map + // transitions + let tx_env = tx_host_env::take(); + vp_host_env::init_from_tx(self.address.clone(), tx_env, |_| {}); + + // Simulate a validity predicate run using the lazy map's validation + // helpers + let changed_keys = + vp_host_env::with(|env| env.all_touched_storage_keys()); + + let mut validation_builder = None; + + // Push followed by pop is a no-op, in which case we'd still see the + // changed keys for these actions, but they wouldn't affect the + // validation result and they never get persisted, but we'd still + // them as changed key here. To guard against this case, + // we check that `map_len_from_transitions` is not empty. + let map_len_diff = + map_len_diff_from_transitions(self.current_transitions.iter()); + + // To help debug validation issues... + dbg!( + &self.current_transitions, + &changed_keys + .iter() + .map(storage::Key::to_string) + .collect::>() + ); + + for key in &changed_keys { + let is_sub_key = self + .lazy_map + .accumulate( + vp_host_env::ctx(), + &mut validation_builder, + key, + ) + .unwrap(); + + assert!( + is_sub_key, + "We're only modifying the lazy_map's keys here. Key: \ + \"{key}\", map length diff {map_len_diff}" + ); + } + if !changed_keys.is_empty() && map_len_diff != 0 { + assert!( + validation_builder.is_some(), + "If some keys were changed, the builder must get filled in" + ); + let actions = LazyMap::::validate( + validation_builder.unwrap(), + ) + .unwrap(); + let mut actions_to_check = actions.clone(); + + // Check that every transition has a corresponding action from + // validation. We drop the found actions to check that all + // actions are matched too. + let current_transitions = + normalize_transitions(&self.current_transitions); + for transition in ¤t_transitions { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + } + Transition::Insert(expected_key, expected_val) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let lazy_map::Action::Insert(key, val) = + action + { + if expected_key == key + && expected_val == val + { + actions_to_check.remove(ix); + break; + } + } + } + } + Transition::Remove(expected_key) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let lazy_map::Action::Remove(key, _val) = + action + { + if expected_key == key { + actions_to_check.remove(ix); + break; + } + } + } + } + Transition::Update(expected_key, value) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let lazy_map::Action::Update { + key, + pre: _, + post, + } = action + { + if expected_key == key && post == value { + actions_to_check.remove(ix); + break; + } + } + } + } + } + } + + assert!( + actions_to_check.is_empty(), + "All the actions reported from validation {actions:#?} \ + should have been matched with SM transitions \ + {current_transitions:#?}, but these actions didn't \ + match: {actions_to_check:#?}", + ) + } + + // Put the tx_env back before checking the result + tx_host_env::set_from_vp_env(vp_host_env::take()); + } + } + + /// Generate an arbitrary `TestKey` + fn arb_map_key() -> impl Strategy { + any::() + } + + /// Generate an arbitrary `TestVal` + fn arb_map_val() -> impl Strategy { + (any::(), any::()).prop_map(|(x, y)| TestVal { x, y }) + } + + /// Apply `Transition` on an eager `Map`. + fn apply_transition_on_eager_map( + map: &mut BTreeMap, + transition: &Transition, + ) { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => {} + Transition::Insert(key, value) => { + map.insert(*key, value.clone()); + } + Transition::Remove(key) => { + let _popped = map.remove(key); + } + Transition::Update(key, value) => { + let entry = map.get_mut(key).unwrap(); + *entry = value.clone(); + } + } + } + + /// Normalize transitions: + /// - remove(key) + insert(key, val) -> update(key, val) + /// - insert(key, val) + update(key, new_val) -> insert(key, new_val) + /// - update(key, val) + update(key, new_val) -> update(key, new_val) + /// + /// Note that the normalizable transitions pairs do not have to be directly + /// next to each other, but their order does matter. + fn normalize_transitions(transitions: &[Transition]) -> Vec { + let mut collapsed = vec![]; + 'outer: for transition in transitions { + match transition { + Transition::CommitTx + | Transition::CommitTxAndBlock + | Transition::Remove(_) => collapsed.push(transition.clone()), + Transition::Insert(key, val) => { + for (ix, collapsed_transition) in + collapsed.iter().enumerate() + { + if let Transition::Remove(remove_key) = + collapsed_transition + { + if key == remove_key { + // remove(key) + insert(key, val) -> update(key, + // val) + + // Replace the Remove with an Update instead of + // inserting the Insert + *collapsed.get_mut(ix).unwrap() = + Transition::Update(*key, val.clone()); + continue 'outer; + } + } + } + collapsed.push(transition.clone()); + } + Transition::Update(key, value) => { + for (ix, collapsed_transition) in + collapsed.iter().enumerate() + { + if let Transition::Insert(insert_key, _) = + collapsed_transition + { + if key == insert_key { + // insert(key, val) + update(key, new_val) -> + // insert(key, new_val) + + // Replace the insert with the new update's + // value instead of inserting it + *collapsed.get_mut(ix).unwrap() = + Transition::Insert(*key, value.clone()); + continue 'outer; + } + } else if let Transition::Update(update_key, _) = + collapsed_transition + { + if key == update_key { + // update(key, val) + update(key, new_val) -> + // update(key, new_val) + + // Replace the insert with the new update's + // value instead of inserting it + *collapsed.get_mut(ix).unwrap() = + Transition::Update(*key, value.clone()); + continue 'outer; + } + } + } + collapsed.push(transition.clone()); + } + } + } + collapsed + } +} diff --git a/tests/src/storage_api/collections/lazy_vec.rs b/tests/src/storage_api/collections/lazy_vec.rs new file mode 100644 index 0000000000..65e08b4ca7 --- /dev/null +++ b/tests/src/storage_api/collections/lazy_vec.rs @@ -0,0 +1,634 @@ +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use borsh::{BorshDeserialize, BorshSerialize}; + use namada::types::address::{self, Address}; + use namada::types::storage; + use namada_tx_prelude::storage::KeySeg; + use namada_tx_prelude::storage_api::collections::{ + lazy_vec, LazyCollection, LazyVec, + }; + use proptest::prelude::*; + use proptest::prop_state_machine; + use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; + use proptest::test_runner::Config; + use test_log::test; + + use crate::tx::tx_host_env; + use crate::vp::vp_host_env; + + prop_state_machine! { + #![proptest_config(Config { + // Instead of the default 256, we only run 5 because otherwise it + // takes too long and it's preferable to crank up the number of + // transitions instead, to allow each case to run for more epochs as + // some issues only manifest once the model progresses further. + // Additionally, more cases will be explored every time this test is + // executed in the CI. + cases: 5, + .. Config::default() + })] + #[test] + fn lazy_vec_api_state_machine_test(sequential 1..100 => ConcreteLazyVecState); + } + + /// Some borsh-serializable type with arbitrary fields to be used inside + /// LazyVec state machine test + #[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + PartialEq, + Eq, + PartialOrd, + Ord, + )] + struct TestVecItem { + x: u64, + y: bool, + } + + /// A `StateMachineTest` implemented on this struct manipulates it with + /// `Transition`s, which are also being accumulated into + /// `current_transitions`. It then: + /// + /// - checks its state against an in-memory `std::collections::Vec` + /// - runs validation and checks that the `LazyVec::Action`s reported from + /// validation match with transitions that were applied + /// + /// Additionally, one of the transitions is to commit a block and/or + /// transaction, during which the currently accumulated state changes are + /// persisted, or promoted from transaction write log to block's write log. + #[derive(Debug)] + struct ConcreteLazyVecState { + /// Address is used to prefix the storage key of the `lazy_vec` in + /// order to simulate a transaction and a validity predicate + /// check from changes on the `lazy_vec` + address: Address, + /// In the test, we apply the same transitions on the `lazy_vec` as on + /// `eager_vec` to check that `lazy_vec`'s state is consistent with + /// `eager_vec`. + eager_vec: Vec, + /// Handle to a lazy vec + lazy_vec: LazyVec, + /// Valid LazyVec changes in the current transaction + current_transitions: Vec>, + } + + #[derive(Clone, Debug)] + struct AbstractLazyVecState { + /// Valid LazyVec changes in the current transaction + valid_transitions: Vec>, + /// Valid LazyVec changes committed to storage + committed_transitions: Vec>, + } + + /// Possible transitions that can modify a [`LazyVec`]. This roughly + /// corresponds to the methods that have `StorageWrite` access and is very + /// similar to [`Action`] + #[derive(Clone, Debug)] + pub enum Transition { + /// Commit all valid transitions in the current transaction + CommitTx, + /// Commit all valid transitions in the current transaction and also + /// commit the current block + CommitTxAndBlock, + /// Push a value `T` into a [`LazyVec`] + Push(T), + /// Pop a value from a [`LazyVec`] + Pop, + /// Update a value `T` at index from pre to post state in a + /// [`LazyVec`] + Update { + /// index at which the value is updated + index: lazy_vec::Index, + /// value to update the element to + value: T, + }, + } + + impl AbstractStateMachine for AbstractLazyVecState { + type State = Self; + type Transition = Transition; + + fn init_state() -> BoxedStrategy { + Just(Self { + valid_transitions: vec![], + committed_transitions: vec![], + }) + .boxed() + } + + // Apply a random transition to the state + fn transitions(state: &Self::State) -> BoxedStrategy { + let length = state.len(); + if length == 0 { + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => arb_test_vec_item().prop_map(Transition::Push) + ] + .boxed() + } else { + let arb_index = || { + let indices: Vec = (0..length).collect(); + proptest::sample::select(indices) + }; + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => (arb_index(), arb_test_vec_item()).prop_map( + |(index, value)| Transition::Update { index, value } + ), + 3 => Just(Transition::Pop), + 5 => arb_test_vec_item().prop_map(Transition::Push), + ] + .boxed() + } + } + + fn apply_abstract( + mut state: Self::State, + transition: &Self::Transition, + ) -> Self::State { + match transition { + Transition::CommitTx => { + let valid_actions_to_commit = + std::mem::take(&mut state.valid_transitions); + state + .committed_transitions + .extend(valid_actions_to_commit.into_iter()); + } + _ => state.valid_transitions.push(transition.clone()), + } + state + } + + fn preconditions( + state: &Self::State, + transition: &Self::Transition, + ) -> bool { + let length = state.len(); + if length == 0 { + // Ensure that the pop or update transitions are not applied to + // an empty state + !matches!( + transition, + Transition::Pop | Transition::Update { .. } + ) + } else if let Transition::Update { index, .. } = transition { + // Ensure that the update index is a valid one + *index < (length - 1) + } else { + true + } + } + } + + impl StateMachineTest for ConcreteLazyVecState { + type Abstract = AbstractLazyVecState; + type ConcreteState = Self; + + fn init_test( + _initial_state: ::State, + ) -> Self::ConcreteState { + // Init transaction env in which we'll be applying the transitions + tx_host_env::init(); + + // The lazy_vec's path must be prefixed by the address to be able + // to trigger a validity predicate on it + let address = address::testing::established_address_1(); + tx_host_env::with(|env| env.spawn_accounts([&address])); + let lazy_vec_prefix: storage::Key = address.to_db_key().into(); + + Self { + address, + eager_vec: vec![], + lazy_vec: LazyVec::open( + lazy_vec_prefix.push(&"arbitrary".to_string()).unwrap(), + ), + current_transitions: vec![], + } + } + + fn apply_concrete( + mut state: Self::ConcreteState, + transition: ::Transition, + ) -> Self::ConcreteState { + // Apply transitions in transaction env + let ctx = tx_host_env::ctx(); + + // Persist the transitions in the current tx, or clear previous ones + // if we're committing a tx + match &transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + state.current_transitions = vec![]; + } + _ => { + state.current_transitions.push(transition.clone()); + } + } + + // Transition application on lazy vec and post-conditions: + match &transition { + Transition::CommitTx => { + // commit the tx without committing the block + tx_host_env::with(|env| env.write_log.commit_tx()); + } + Transition::CommitTxAndBlock => { + // commit the tx and the block + tx_host_env::commit_tx_and_block(); + } + Transition::Push(value) => { + let old_len = state.lazy_vec.len(ctx).unwrap(); + + state.lazy_vec.push(ctx, value.clone()).unwrap(); + + // Post-conditions: + let new_len = state.lazy_vec.len(ctx).unwrap(); + let stored_value = + state.lazy_vec.get(ctx, new_len - 1).unwrap().unwrap(); + assert_eq!( + &stored_value, value, + "the new item must be added to the back" + ); + assert_eq!(old_len + 1, new_len, "length must increment"); + + state.assert_validation_accepted(new_len); + } + Transition::Pop => { + let old_len = state.lazy_vec.len(ctx).unwrap(); + + let popped = state.lazy_vec.pop(ctx).unwrap().unwrap(); + + // Post-conditions: + let new_len = state.lazy_vec.len(ctx).unwrap(); + assert_eq!(old_len, new_len + 1, "length must decrement"); + assert_eq!( + &popped, + state.eager_vec.last().unwrap(), + "popped element matches the last element in eager vec \ + before it's updated" + ); + + state.assert_validation_accepted(new_len); + } + Transition::Update { index, value } => { + let old_len = state.lazy_vec.len(ctx).unwrap(); + let old_val = + state.lazy_vec.get(ctx, *index).unwrap().unwrap(); + + state.lazy_vec.update(ctx, *index, value.clone()).unwrap(); + + // Post-conditions: + let new_len = state.lazy_vec.len(ctx).unwrap(); + let new_val = + state.lazy_vec.get(ctx, *index).unwrap().unwrap(); + assert_eq!(old_len, new_len, "length must not change"); + assert_eq!( + &old_val, + state.eager_vec.get(*index as usize).unwrap(), + "old value must match the value at the same index in \ + the eager vec before it's updated" + ); + assert_eq!( + &new_val, value, + "new value must match that which was passed into the \ + Transition::Update" + ); + + state.assert_validation_accepted(new_len); + } + } + + // Apply transition in the eager vec for comparison + apply_transition_on_eager_vec(&mut state.eager_vec, &transition); + + // Global post-conditions: + + // All items in eager vec must be present in lazy vec + for (ix, expected_item) in state.eager_vec.iter().enumerate() { + let got = state + .lazy_vec + .get(ctx, ix as lazy_vec::Index) + .unwrap() + .expect("The expected item must be present in lazy vec"); + assert_eq!(expected_item, &got, "at index {ix}"); + } + + // All items in lazy vec must be present in eager vec + for (ix, expected_item) in + state.lazy_vec.iter(ctx).unwrap().enumerate() + { + let expected_item = expected_item.unwrap(); + let got = state + .eager_vec + .get(ix) + .expect("The expected item must be present in eager vec"); + assert_eq!(&expected_item, got, "at index {ix}"); + } + + state + } + } + + impl AbstractLazyVecState { + /// Find the length of the vector from the applied transitions + fn len(&self) -> u64 { + (vec_len_diff_from_transitions(self.committed_transitions.iter()) + + vec_len_diff_from_transitions(self.valid_transitions.iter())) + .try_into() + .expect( + "It shouldn't be possible to underflow length from all \ + transactions applied in abstract state", + ) + } + } + + /// Find the difference in length of the vector from the applied transitions + fn vec_len_diff_from_transitions<'a>( + all_transitions: impl Iterator>, + ) -> i64 { + let mut push_count: i64 = 0; + let mut pop_count: i64 = 0; + + for trans in all_transitions { + match trans { + Transition::CommitTx + | Transition::CommitTxAndBlock + | Transition::Update { .. } => {} + Transition::Push(_) => push_count += 1, + Transition::Pop => pop_count += 1, + } + } + push_count - pop_count + } + + impl ConcreteLazyVecState { + fn assert_validation_accepted(&self, new_vec_len: u64) { + // Init the VP env from tx env in which we applied the vec + // transitions + let tx_env = tx_host_env::take(); + vp_host_env::init_from_tx(self.address.clone(), tx_env, |_| {}); + + // Simulate a validity predicate run using the lazy vec's validation + // helpers + let changed_keys = + vp_host_env::with(|env| env.all_touched_storage_keys()); + + let mut validation_builder = None; + + // Push followed by pop is a no-op, in which case we'd still see the + // changed keys for these actions, but they wouldn't affect the + // validation result and they never get persisted, but we'd still + // them as changed key here. To guard against this case, + // we check that `vec_len_from_transitions` is not empty. + let vec_len_diff = + vec_len_diff_from_transitions(self.current_transitions.iter()); + + // To help debug validation issues... + dbg!( + &self.current_transitions, + &changed_keys + .iter() + .map(storage::Key::to_string) + .collect::>() + ); + + for key in &changed_keys { + let is_sub_key = self + .lazy_vec + .accumulate( + vp_host_env::ctx(), + &mut validation_builder, + key, + ) + .unwrap(); + + assert!( + is_sub_key, + "We're only modifying the lazy_vec's keys here. Key: \ + \"{key}\", vec length diff {vec_len_diff}" + ); + } + if !changed_keys.is_empty() && vec_len_diff != 0 { + assert!( + validation_builder.is_some(), + "If some keys were changed, the builder must get filled in" + ); + let actions = LazyVec::::validate( + validation_builder.unwrap(), + ) + .expect( + "With valid transitions only, validation should always \ + pass", + ); + let mut actions_to_check = actions.clone(); + + // Check that every transition has a corresponding action from + // validation. We drop the found actions to check that all + // actions are matched too. + let current_transitions = normalize_transitions( + &self.current_transitions, + new_vec_len, + ); + for transition in ¤t_transitions { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + } + Transition::Push(expected_val) => { + let mut ix = 0; + while ix < actions_to_check.len() { + if let lazy_vec::Action::Push(val) = + &actions_to_check[ix] + { + if expected_val == val { + actions_to_check.remove(ix); + break; + } + } + ix += 1; + } + } + Transition::Pop => { + let mut ix = 0; + while ix < actions_to_check.len() { + if let lazy_vec::Action::Pop(_val) = + &actions_to_check[ix] + { + actions_to_check.remove(ix); + break; + } + ix += 1; + } + } + Transition::Update { + index: expected_index, + value, + } => { + let mut ix = 0; + while ix < actions_to_check.len() { + if let lazy_vec::Action::Update { + index, + pre: _, + post, + } = &actions_to_check[ix] + { + if expected_index == index && post == value + { + actions_to_check.remove(ix); + break; + } + } + ix += 1; + } + } + } + } + + assert!( + actions_to_check.is_empty(), + "All the actions reported from validation {actions:#?} \ + should have been matched with SM transitions \ + {current_transitions:#?}, but these actions didn't \ + match: {actions_to_check:#?}", + ) + } + + // Put the tx_env back before checking the result + tx_host_env::set_from_vp_env(vp_host_env::take()); + } + } + + /// Generate an arbitrary `TestVecItem` + fn arb_test_vec_item() -> impl Strategy { + (any::(), any::()).prop_map(|(x, y)| TestVecItem { x, y }) + } + + /// Apply `Transition` on an eager `Vec`. + fn apply_transition_on_eager_vec( + vec: &mut Vec, + transition: &Transition, + ) { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => {} + Transition::Push(value) => vec.push(value.clone()), + Transition::Pop => { + let _popped = vec.pop(); + } + Transition::Update { index, value } => { + let entry = vec.get_mut(*index as usize).unwrap(); + *entry = value.clone(); + } + } + } + + /// Normalize transitions: + /// - pop at ix + push(val) at ix -> update(ix, val) + /// - push(val) at ix + update(ix, new_val) -> push(new_val) at ix + /// - update(ix, val) + update(ix, new_val) -> update(ix, new_val) + /// + /// Note that the normalizable transitions pairs do not have to be directly + /// next to each other, but their order does matter. + fn normalize_transitions( + transitions: &[Transition], + new_vec_len: u64, + ) -> Vec> { + let stack_start_pos = ((new_vec_len as i64) + - vec_len_diff_from_transitions(transitions.iter())) + as u64; + let mut stack_pos = stack_start_pos; + let mut collapsed = vec![]; + 'outer: for transition in transitions { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + collapsed.push(transition.clone()) + } + Transition::Push(value) => { + // If there are some pops, the last one can be collapsed + // with this push + if stack_pos < stack_start_pos { + // Find the pop from the back + let mut found_ix = None; + for (ix, transition) in + collapsed.iter().enumerate().rev() + { + if let Transition::Pop = transition { + found_ix = Some(ix); + break; + } + } + let ix = found_ix.expect("Pop must be found"); + // pop at ix + push(val) at ix -> update(ix, val) + + // Replace the Pop with an Update and don't insert the + // Push + *collapsed.get_mut(ix).unwrap() = Transition::Update { + index: stack_pos, + value: value.clone(), + }; + } else { + collapsed.push(transition.clone()); + } + stack_pos += 1; + } + Transition::Pop => { + collapsed.push(transition.clone()); + stack_pos -= 1; + } + Transition::Update { index, value } => { + // If there are some pushes, check if one of them is at the + // same index as this update + if stack_pos > stack_start_pos { + let mut current_pos = stack_start_pos; + for (ix, collapsed_transition) in + collapsed.iter().enumerate() + { + match collapsed_transition { + Transition::CommitTx + | Transition::CommitTxAndBlock => {} + Transition::Push(_) => { + if ¤t_pos == index { + // push(val) at `ix` + update(ix, + // new_val) -> + // push(new_val) at `ix` + + // Replace the Push with the new Push of + // Update's + // value and don't insert the Update + *collapsed.get_mut(ix).unwrap() = + Transition::Push(value.clone()); + continue 'outer; + } + current_pos += 1; + } + Transition::Pop => { + current_pos -= 1; + } + Transition::Update { + index: prev_update_index, + value: _, + } => { + if index == prev_update_index { + // update(ix, val) + update(ix, new_val) + // -> update(ix, new_val) + + // Replace the Update with the new + // Update instead of inserting it + *collapsed.get_mut(ix).unwrap() = + transition.clone(); + continue 'outer; + } + } + } + } + } + collapsed.push(transition.clone()) + } + } + } + collapsed + } +} diff --git a/tests/src/storage_api/collections/mod.rs b/tests/src/storage_api/collections/mod.rs new file mode 100644 index 0000000000..f39b880c09 --- /dev/null +++ b/tests/src/storage_api/collections/mod.rs @@ -0,0 +1,3 @@ +mod lazy_map; +mod lazy_vec; +mod nested_lazy_map; diff --git a/tests/src/storage_api/collections/nested_lazy_map.rs b/tests/src/storage_api/collections/nested_lazy_map.rs new file mode 100644 index 0000000000..037decce46 --- /dev/null +++ b/tests/src/storage_api/collections/nested_lazy_map.rs @@ -0,0 +1,723 @@ +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + use std::convert::TryInto; + + use borsh::{BorshDeserialize, BorshSerialize}; + use namada::types::address::{self, Address}; + use namada::types::storage; + use namada_tx_prelude::storage::KeySeg; + use namada_tx_prelude::storage_api::collections::lazy_map::{ + NestedMap, NestedSubKey, SubKey, + }; + use namada_tx_prelude::storage_api::collections::{ + lazy_map, LazyCollection, LazyMap, + }; + use proptest::prelude::*; + use proptest::prop_state_machine; + use proptest::state_machine::{AbstractStateMachine, StateMachineTest}; + use proptest::test_runner::Config; + use test_log::test; + + use crate::tx::tx_host_env; + use crate::vp::vp_host_env; + + prop_state_machine! { + #![proptest_config(Config { + // Instead of the default 256, we only run 5 because otherwise it + // takes too long and it's preferable to crank up the number of + // transitions instead, to allow each case to run for more epochs as + // some issues only manifest once the model progresses further. + // Additionally, more cases will be explored every time this test is + // executed in the CI. + cases: 5, + .. Config::default() + })] + #[test] + fn nested_lazy_map_api_state_machine_test(sequential 1..100 => ConcreteLazyMapState); + } + + /// Some borsh-serializable type with arbitrary fields to be used inside + /// LazyMap state machine test + #[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + PartialEq, + Eq, + PartialOrd, + Ord, + )] + struct TestVal { + x: u64, + y: bool, + } + + type KeyOuter = u64; + type KeyMiddle = i32; + type KeyInner = i8; + + type NestedTestMap = + NestedMap>>; + + type NestedEagerMap = + BTreeMap>>; + + /// A `StateMachineTest` implemented on this struct manipulates it with + /// `Transition`s, which are also being accumulated into + /// `current_transitions`. It then: + /// + /// - checks its state against an in-memory `std::collections::HashMap` + /// - runs validation and checks that the `LazyMap::Action`s reported from + /// validation match with transitions that were applied + /// + /// Additionally, one of the transitions is to commit a block and/or + /// transaction, during which the currently accumulated state changes are + /// persisted, or promoted from transaction write log to block's write log. + #[derive(Debug)] + struct ConcreteLazyMapState { + /// Address is used to prefix the storage key of the `lazy_map` in + /// order to simulate a transaction and a validity predicate + /// check from changes on the `lazy_map` + address: Address, + /// In the test, we apply the same transitions on the `lazy_map` as on + /// `eager_map` to check that `lazy_map`'s state is consistent with + /// `eager_map`. + eager_map: NestedEagerMap, + /// Handle to a lazy map with nested lazy collections + lazy_map: NestedTestMap, + /// Valid LazyMap changes in the current transaction + current_transitions: Vec, + } + + #[derive(Clone, Debug, Default)] + struct AbstractLazyMapState { + /// Valid LazyMap changes in the current transaction + valid_transitions: Vec, + /// Valid LazyMap changes committed to storage + committed_transitions: Vec, + } + + /// Possible transitions that can modify a [`NestedTestMap`]. + /// This roughly corresponds to the methods that have `StorageWrite` + /// access and is very similar to [`Action`] + #[derive(Clone, Debug)] + enum Transition { + /// Commit all valid transitions in the current transaction + CommitTx, + /// Commit all valid transitions in the current transaction and also + /// commit the current block + CommitTxAndBlock, + /// Insert a key-val into a [`LazyMap`] + Insert(Key, TestVal), + /// Remove a key-val from a [`LazyMap`] + Remove(Key), + /// Update a value at key from pre to post state in a + /// [`LazyMap`] + Update(Key, TestVal), + } + + /// A key for transition + type Key = (KeyOuter, KeyMiddle, KeyInner); + + impl AbstractStateMachine for AbstractLazyMapState { + type State = Self; + type Transition = Transition; + + fn init_state() -> BoxedStrategy { + Just(Self::default()).boxed() + } + + // Apply a random transition to the state + fn transitions(state: &Self::State) -> BoxedStrategy { + let length = state.len(); + if length == 0 { + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => (arb_map_key(), arb_map_val()).prop_map(|(key, val)| Transition::Insert(key, val)) + ] + .boxed() + } else { + let keys = state.find_existing_keys(); + let arb_existing_map_key = + || proptest::sample::select(keys.clone()); + prop_oneof![ + 1 => Just(Transition::CommitTx), + 1 => Just(Transition::CommitTxAndBlock), + 3 => (arb_existing_map_key(), arb_map_val()).prop_map(|(key, val)| + Transition::Update(key, val)), + 3 => arb_existing_map_key().prop_map(Transition::Remove), + 5 => (arb_map_key().prop_filter( + "insert on non-existing keys only", + move |key| !keys.contains(key)), arb_map_val()) + .prop_map(|(key, val)| Transition::Insert(key, val)) + ] + .boxed() + } + } + + fn apply_abstract( + mut state: Self::State, + transition: &Self::Transition, + ) -> Self::State { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + let valid_actions_to_commit = + std::mem::take(&mut state.valid_transitions); + state + .committed_transitions + .extend(valid_actions_to_commit.into_iter()); + } + _ => state.valid_transitions.push(transition.clone()), + } + state + } + + fn preconditions( + state: &Self::State, + transition: &Self::Transition, + ) -> bool { + let length = state.len(); + // Ensure that the remove or update transitions are not applied + // to an empty state + if length == 0 + && matches!( + transition, + Transition::Remove(_) | Transition::Update(_, _) + ) + { + return false; + } + match transition { + Transition::Update(key, _) | Transition::Remove(key) => { + let keys = state.find_existing_keys(); + // Ensure that the update/remove key is an existing one + keys.contains(key) + } + Transition::Insert(key, _) => { + let keys = state.find_existing_keys(); + // Ensure that the insert key is not an existing one + !keys.contains(key) + } + _ => true, + } + } + } + + impl StateMachineTest for ConcreteLazyMapState { + type Abstract = AbstractLazyMapState; + type ConcreteState = Self; + + fn init_test( + _initial_state: ::State, + ) -> Self::ConcreteState { + // Init transaction env in which we'll be applying the transitions + tx_host_env::init(); + + // The lazy_map's path must be prefixed by the address to be able + // to trigger a validity predicate on it + let address = address::testing::established_address_1(); + tx_host_env::with(|env| env.spawn_accounts([&address])); + let lazy_map_prefix: storage::Key = address.to_db_key().into(); + + Self { + address, + eager_map: BTreeMap::new(), + lazy_map: NestedTestMap::open( + lazy_map_prefix.push(&"arbitrary".to_string()).unwrap(), + ), + current_transitions: vec![], + } + } + + fn apply_concrete( + mut state: Self::ConcreteState, + transition: ::Transition, + ) -> Self::ConcreteState { + // Apply transitions in transaction env + let ctx = tx_host_env::ctx(); + + // Persist the transitions in the current tx, or clear previous ones + // if we're committing a tx + match &transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + state.current_transitions = vec![]; + } + _ => { + state.current_transitions.push(transition.clone()); + } + } + + // Transition application on lazy map and post-conditions: + match &transition { + Transition::CommitTx => { + // commit the tx without committing the block + tx_host_env::with(|env| env.write_log.commit_tx()); + } + Transition::CommitTxAndBlock => { + // commit the tx and the block + tx_host_env::commit_tx_and_block(); + } + Transition::Insert( + (key_outer, key_middle, key_inner), + value, + ) => { + let inner = state.lazy_map.at(key_outer).at(key_middle); + + inner.insert(ctx, *key_inner, value.clone()).unwrap(); + + // Post-conditions: + let stored_value = + inner.get(ctx, key_inner).unwrap().unwrap(); + assert_eq!( + &stored_value, value, + "the new item must be added to the back" + ); + + state.assert_validation_accepted(); + } + Transition::Remove((key_outer, key_middle, key_inner)) => { + let inner = state.lazy_map.at(key_outer).at(key_middle); + + let removed = + inner.remove(ctx, key_inner).unwrap().unwrap(); + + // Post-conditions: + assert_eq!( + &removed, + state + .eager_map + .get(key_outer) + .unwrap() + .get(key_middle) + .unwrap() + .get(key_inner) + .unwrap(), + "removed element matches the value in eager map \ + before it's updated" + ); + + state.assert_validation_accepted(); + } + Transition::Update( + (key_outer, key_middle, key_inner), + value, + ) => { + let inner = state.lazy_map.at(key_outer).at(key_middle); + + let old_val = inner.get(ctx, key_inner).unwrap().unwrap(); + + inner.insert(ctx, *key_inner, value.clone()).unwrap(); + + // Post-conditions: + let new_val = inner.get(ctx, key_inner).unwrap().unwrap(); + assert_eq!( + &old_val, + state + .eager_map + .get(key_outer) + .unwrap() + .get(key_middle) + .unwrap() + .get(key_inner) + .unwrap(), + "old value must match the value at the same key in \ + the eager map before it's updated" + ); + assert_eq!( + &new_val, value, + "new value must match that which was passed into the \ + Transition::Update" + ); + + state.assert_validation_accepted(); + } + } + + // Apply transition in the eager map for comparison + apply_transition_on_eager_map(&mut state.eager_map, &transition); + + // Global post-conditions: + + // All items in eager map must be present in lazy map + for (key_outer, middle) in state.eager_map.iter() { + for (key_middle, inner) in middle { + for (key_inner, expected_item) in inner { + let got = state + .lazy_map + .at(key_outer) + .at(key_middle) + .get(ctx, key_inner) + .unwrap() + .expect( + "The expected item must be present in lazy map", + ); + assert_eq!( + expected_item, &got, + "at key {key_outer}, {key_middle} {key_inner}" + ); + } + } + } + + // All items in lazy map must be present in eager map + for key_val in state.lazy_map.iter(ctx).unwrap() { + let ( + NestedSubKey::Data { + key: key_outer, + nested_sub_key: + NestedSubKey::Data { + key: key_middle, + nested_sub_key: SubKey::Data(key_inner), + }, + }, + expected_val, + ) = key_val.unwrap(); + let got = state + .eager_map + .get(&key_outer) + .unwrap() + .get(&key_middle) + .unwrap() + .get(&key_inner) + .expect("The expected item must be present in eager map"); + assert_eq!( + &expected_val, got, + "at key {key_outer}, {key_middle} {key_inner})" + ); + } + + state + } + } + + impl AbstractLazyMapState { + /// Find the length of the map from the applied transitions + fn len(&self) -> u64 { + (map_len_diff_from_transitions(self.committed_transitions.iter()) + + map_len_diff_from_transitions(self.valid_transitions.iter())) + .try_into() + .expect( + "It shouldn't be possible to underflow length from all \ + transactions applied in abstract state", + ) + } + + /// Build an eager map from the committed and current transitions + fn eager_map(&self) -> NestedEagerMap { + let mut eager_map = BTreeMap::new(); + for transition in &self.committed_transitions { + apply_transition_on_eager_map(&mut eager_map, transition); + } + for transition in &self.valid_transitions { + apply_transition_on_eager_map(&mut eager_map, transition); + } + eager_map + } + + /// Find the keys currently present in the map + fn find_existing_keys(&self) -> Vec { + let outer_map = self.eager_map(); + outer_map + .into_iter() + .fold(vec![], |acc, (outer, middle_map)| { + middle_map.into_iter().fold( + acc, + |mut acc, (middle, inner_map)| { + acc.extend( + inner_map + .into_iter() + .map(|(inner, _)| (outer, middle, inner)), + ); + acc + }, + ) + }) + } + } + + /// Find the difference in length of the map from the applied transitions + fn map_len_diff_from_transitions<'a>( + transitions: impl Iterator, + ) -> i64 { + let mut insert_count: i64 = 0; + let mut remove_count: i64 = 0; + + for trans in transitions { + match trans { + Transition::CommitTx + | Transition::CommitTxAndBlock + | Transition::Update(_, _) => {} + Transition::Insert(_, _) => insert_count += 1, + Transition::Remove(_) => remove_count += 1, + } + } + insert_count - remove_count + } + + impl ConcreteLazyMapState { + fn assert_validation_accepted(&self) { + // Init the VP env from tx env in which we applied the map + // transitions + let tx_env = tx_host_env::take(); + vp_host_env::init_from_tx(self.address.clone(), tx_env, |_| {}); + + // Simulate a validity predicate run using the lazy map's validation + // helpers + let changed_keys = + vp_host_env::with(|env| env.all_touched_storage_keys()); + + let mut validation_builder = None; + + // Push followed by pop is a no-op, in which case we'd still see the + // changed keys for these actions, but they wouldn't affect the + // validation result and they never get persisted, but we'd still + // them as changed key here. To guard against this case, + // we check that `map_len_from_transitions` is not empty. + let map_len_diff = + map_len_diff_from_transitions(self.current_transitions.iter()); + + // To help debug validation issues... + dbg!( + &self.current_transitions, + &changed_keys + .iter() + .map(storage::Key::to_string) + .collect::>() + ); + + for key in &changed_keys { + let is_sub_key = self + .lazy_map + .accumulate( + vp_host_env::ctx(), + &mut validation_builder, + key, + ) + .unwrap(); + + assert!( + is_sub_key, + "We're only modifying the lazy_map's keys here. Key: \ + \"{key}\", map length diff {map_len_diff}" + ); + } + if !changed_keys.is_empty() && map_len_diff != 0 { + assert!( + validation_builder.is_some(), + "If some keys were changed, the builder must get filled in" + ); + let actions = + NestedTestMap::validate(validation_builder.unwrap()) + .unwrap(); + let mut actions_to_check = actions.clone(); + + // Check that every transition has a corresponding action from + // validation. We drop the found actions to check that all + // actions are matched too. + let current_transitions = + normalize_transitions(&self.current_transitions); + for transition in ¤t_transitions { + use lazy_map::Action; + use lazy_map::NestedAction::At; + + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => { + } + Transition::Insert(expected_key, expected_val) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let At( + key_outer, + At( + key_middle, + Action::Insert(key_inner, val), + ), + ) = action + { + let key = + (*key_outer, *key_middle, *key_inner); + if expected_key == &key + && expected_val == val + { + actions_to_check.remove(ix); + break; + } + } + } + } + Transition::Remove(expected_key) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let At( + key_outer, + At( + key_middle, + Action::Remove(key_inner, _val), + ), + ) = action + { + let key = + (*key_outer, *key_middle, *key_inner); + if expected_key == &key { + actions_to_check.remove(ix); + break; + } + } + } + } + Transition::Update(expected_key, value) => { + for (ix, action) in + actions_to_check.iter().enumerate() + { + if let At( + key_outer, + At( + key_middle, + Action::Update { + key: key_inner, + pre: _, + post, + }, + ), + ) = action + { + let key = + (*key_outer, *key_middle, *key_inner); + if expected_key == &key && post == value { + actions_to_check.remove(ix); + break; + } + } + } + } + } + } + + assert!( + actions_to_check.is_empty(), + "All the actions reported from validation {actions:#?} \ + should have been matched with SM transitions \ + {current_transitions:#?}, but these actions didn't \ + match: {actions_to_check:#?}", + ) + } + + // Put the tx_env back before checking the result + tx_host_env::set_from_vp_env(vp_host_env::take()); + } + } + + /// Generate an arbitrary `TestKey` + fn arb_map_key() -> impl Strategy { + (any::(), any::(), any::()) + } + + /// Generate an arbitrary `TestVal` + fn arb_map_val() -> impl Strategy { + (any::(), any::()).prop_map(|(x, y)| TestVal { x, y }) + } + + /// Apply `Transition` on an eager `Map`. + fn apply_transition_on_eager_map( + map: &mut NestedEagerMap, + transition: &Transition, + ) { + match transition { + Transition::CommitTx | Transition::CommitTxAndBlock => {} + Transition::Insert((key_outer, key_middle, key_inner), value) + | Transition::Update((key_outer, key_middle, key_inner), value) => { + let middle = + map.entry(*key_outer).or_insert_with(Default::default); + let inner = + middle.entry(*key_middle).or_insert_with(Default::default); + inner.insert(*key_inner, value.clone()); + } + Transition::Remove((key_outer, key_middle, key_inner)) => { + let middle = + map.entry(*key_outer).or_insert_with(Default::default); + let inner = + middle.entry(*key_middle).or_insert_with(Default::default); + let _popped = inner.remove(key_inner); + } + } + } + + /// Normalize transitions: + /// - remove(key) + insert(key, val) -> update(key, val) + /// - insert(key, val) + update(key, new_val) -> insert(key, new_val) + /// - update(key, val) + update(key, new_val) -> update(key, new_val) + /// + /// Note that the normalizable transitions pairs do not have to be directly + /// next to each other, but their order does matter. + fn normalize_transitions(transitions: &[Transition]) -> Vec { + let mut collapsed = vec![]; + 'outer: for transition in transitions { + match transition { + Transition::CommitTx + | Transition::CommitTxAndBlock + | Transition::Remove(_) => collapsed.push(transition.clone()), + Transition::Insert(key, val) => { + for (ix, collapsed_transition) in + collapsed.iter().enumerate() + { + if let Transition::Remove(remove_key) = + collapsed_transition + { + if key == remove_key { + // remove(key) + insert(key, val) -> update(key, + // val) + + // Replace the Remove with an Update instead of + // inserting the Insert + *collapsed.get_mut(ix).unwrap() = + Transition::Update(*key, val.clone()); + continue 'outer; + } + } + } + collapsed.push(transition.clone()); + } + Transition::Update(key, value) => { + for (ix, collapsed_transition) in + collapsed.iter().enumerate() + { + if let Transition::Insert(insert_key, _) = + collapsed_transition + { + if key == insert_key { + // insert(key, val) + update(key, new_val) -> + // insert(key, new_val) + + // Replace the insert with the new update's + // value instead of inserting it + *collapsed.get_mut(ix).unwrap() = + Transition::Insert(*key, value.clone()); + continue 'outer; + } + } else if let Transition::Update(update_key, _) = + collapsed_transition + { + if key == update_key { + // update(key, val) + update(key, new_val) -> + // update(key, new_val) + + // Replace the insert with the new update's + // value instead of inserting it + *collapsed.get_mut(ix).unwrap() = + Transition::Update(*key, value.clone()); + continue 'outer; + } + } + } + collapsed.push(transition.clone()); + } + } + } + collapsed + } +} diff --git a/tests/src/storage_api/mod.rs b/tests/src/storage_api/mod.rs new file mode 100644 index 0000000000..bc487bd59e --- /dev/null +++ b/tests/src/storage_api/mod.rs @@ -0,0 +1 @@ +mod collections; diff --git a/tests/src/vm_host_env/ibc.rs b/tests/src/vm_host_env/ibc.rs index 13e7bd3882..72b33f9e5e 100644 --- a/tests/src/vm_host_env/ibc.rs +++ b/tests/src/vm_host_env/ibc.rs @@ -1,5 +1,5 @@ use core::time::Duration; -use std::collections::{BTreeSet, HashMap}; +use std::collections::HashMap; use std::str::FromStr; use namada::ibc::applications::ics20_fungible_token_transfer::msgs::transfer::MsgTransfer; @@ -60,24 +60,23 @@ use namada::ledger::ibc::vp::{ use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::storage::mockdb::MockDB; use namada::ledger::storage::Sha256Hasher; +use namada::ledger::tx_env::TxEnv; use namada::proto::Tx; use namada::tendermint_proto::Protobuf; use namada::types::address::{self, Address, InternalAddress}; use namada::types::ibc::data::FungibleTokenPacketData; -use namada::types::ibc::IbcEvent; -use namada::types::storage::{BlockHash, BlockHeight, Key}; -use namada::types::time::Rfc3339String; +use namada::types::storage::{self, BlockHash, BlockHeight}; use namada::types::token::{self, Amount}; use namada::vm::{wasm, WasmCacheRwAccess}; -use tempfile::TempDir; +use namada_tx_prelude::StorageWrite; -use crate::tx::*; +use crate::tx::{self, *}; const VP_ALWAYS_TRUE_WASM: &str = "../wasm_for_tests/vp_always_true.wasm"; +const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); pub struct TestIbcVp<'a> { pub ibc: Ibc<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>, - pub keys_changed: BTreeSet, } impl<'a> TestIbcVp<'a> { @@ -85,14 +84,16 @@ impl<'a> TestIbcVp<'a> { &self, tx_data: &[u8], ) -> std::result::Result { - self.ibc - .validate_tx(tx_data, &self.keys_changed, &BTreeSet::new()) + self.ibc.validate_tx( + tx_data, + self.ibc.ctx.keys_changed, + self.ibc.ctx.verifiers, + ) } } pub struct TestIbcTokenVp<'a> { pub token: IbcToken<'a, MockDB, Sha256Hasher, WasmCacheRwAccess>, - pub keys_changed: BTreeSet, } impl<'a> TestIbcTokenVp<'a> { @@ -100,82 +101,19 @@ impl<'a> TestIbcTokenVp<'a> { &self, tx_data: &[u8], ) -> std::result::Result { - self.token - .validate_tx(tx_data, &self.keys_changed, &BTreeSet::new()) + self.token.validate_tx( + tx_data, + self.token.ctx.keys_changed, + self.token.ctx.verifiers, + ) } } -pub struct TestIbcActions; - -impl IbcActions for TestIbcActions { - /// Read IBC-related data - fn read_ibc_data(&self, key: &Key) -> Option> { - tx_host_env::read_bytes(key.to_string()) - } - - /// Write IBC-related data - fn write_ibc_data(&self, key: &Key, data: impl AsRef<[u8]>) { - tx_host_env::write_bytes(key.to_string(), data) - } - - /// Delete IBC-related data - fn delete_ibc_data(&self, key: &Key) { - tx_host_env::delete(key.to_string()) - } - - /// Emit an IBC event - fn emit_ibc_event(&self, event: IbcEvent) { - tx_host_env::emit_ibc_event(&event) - } - - fn transfer_token( - &self, - src: &Address, - dest: &Address, - token: &Address, - amount: Amount, - ) { - let src_key = token::balance_key(token, src); - let dest_key = token::balance_key(token, dest); - let src_bal: Option = tx_host_env::read(&src_key.to_string()); - let mut src_bal = src_bal.unwrap_or_else(|| match src { - Address::Internal(InternalAddress::IbcMint) => Amount::max(), - _ => unreachable!(), - }); - src_bal.spend(&amount); - let mut dest_bal: Amount = - tx_host_env::read(&dest_key.to_string()).unwrap_or_default(); - dest_bal.receive(&amount); - match src { - Address::Internal(InternalAddress::IbcMint) => { - tx_host_env::write_temp(&src_key.to_string(), src_bal) - } - Address::Internal(InternalAddress::IbcBurn) => unreachable!(), - _ => tx_host_env::write(&src_key.to_string(), src_bal), - } - match dest { - Address::Internal(InternalAddress::IbcMint) => unreachable!(), - Address::Internal(InternalAddress::IbcBurn) => { - tx_host_env::write_temp(&dest_key.to_string(), dest_bal) - } - _ => tx_host_env::write(&dest_key.to_string(), dest_bal), - } - } - - fn get_height(&self) -> BlockHeight { - tx_host_env::get_block_height() - } - - fn get_header_time(&self) -> Rfc3339String { - tx_host_env::get_block_time() - } -} - -/// Initialize IBC VP by running a transaction. -pub fn init_ibc_vp_from_tx<'a>( +/// Validate an IBC transaction with IBC VP. +pub fn validate_ibc_vp_from_tx<'a>( tx_env: &'a TestTxEnv, tx: &'a Tx, -) -> (TestIbcVp<'a>, TempDir) { +) -> std::result::Result { let (verifiers, keys_changed) = tx_env .write_log .verifiers_and_changed_keys(&tx_env.verifiers); @@ -186,27 +124,30 @@ pub fn init_ibc_vp_from_tx<'a>( addr, verifiers ); } - let (vp_wasm_cache, vp_cache_dir) = + let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let ctx = Ctx::new( + &ADDRESS, &tx_env.storage, &tx_env.write_log, tx, VpGasMeter::new(0), + &keys_changed, + &verifiers, vp_wasm_cache, ); let ibc = Ibc { ctx }; - (TestIbcVp { ibc, keys_changed }, vp_cache_dir) + TestIbcVp { ibc }.validate(tx.data.as_ref().unwrap()) } -/// Initialize the native token VP for the given address -pub fn init_token_vp_from_tx<'a>( +/// Validate the native token VP for the given address +pub fn validate_token_vp_from_tx<'a>( tx_env: &'a TestTxEnv, tx: &'a Tx, addr: &Address, -) -> (TestIbcTokenVp<'a>, TempDir) { +) -> std::result::Result { let (verifiers, keys_changed) = tx_env .write_log .verifiers_and_changed_keys(&tx_env.verifiers); @@ -217,26 +158,57 @@ pub fn init_token_vp_from_tx<'a>( addr, verifiers ); } - let (vp_wasm_cache, vp_cache_dir) = + let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); let ctx = Ctx::new( + &ADDRESS, &tx_env.storage, &tx_env.write_log, tx, VpGasMeter::new(0), + &keys_changed, + &verifiers, vp_wasm_cache, ); let token = IbcToken { ctx }; - ( - TestIbcTokenVp { - token, - keys_changed, - }, - vp_cache_dir, - ) -} + TestIbcTokenVp { token }.validate(tx.data.as_ref().unwrap()) +} + +// /// Initialize the native token VP for the given address +// pub fn init_token_vp_from_tx<'a>( +// tx_env: &'a TestTxEnv, +// tx: &'a Tx, +// addr: &Address, +// ) -> (TestIbcTokenVp<'a>, TempDir) { +// let (verifiers, keys_changed) = tx_env +// .write_log +// .verifiers_and_changed_keys(&tx_env.verifiers); +// if !verifiers.contains(addr) { +// panic!( +// "The given token address {} isn't part of the tx verifiers set: \ +// {:#?}", +// addr, verifiers +// ); +// } +// let (vp_wasm_cache, vp_cache_dir) = +// wasm::compilation_cache::common::testing::cache(); + +// let ctx = Ctx::new( +// &ADDRESS, +// &tx_env.storage, +// &tx_env.write_log, +// tx, +// VpGasMeter::new(0), +// &keys_changed, +// &verifiers, +// vp_wasm_cache, +// ); +// let token = IbcToken { ctx }; + +// (TestIbcTokenVp { token }, vp_cache_dir) +// } /// Initialize the test storage. Requires initialized [`tx_host_env::ENV`]. pub fn init_storage() -> (Address, Address) { @@ -251,17 +223,18 @@ pub fn init_storage() -> (Address, Address) { // initialize a token let code = std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - let token = tx_host_env::init_account(code.clone()); + let token = tx::ctx().init_account(code.clone()).unwrap(); // initialize an account - let account = tx_host_env::init_account(code); + let account = tx::ctx().init_account(code).unwrap(); let key = token::balance_key(&token, &account); let init_bal = Amount::from(1_000_000_000u64); - tx_host_env::write(key.to_string(), init_bal); + tx::ctx().write(&key, init_bal).unwrap(); (token, account) } -pub fn prepare_client() -> (ClientId, AnyClientState, HashMap>) { +pub fn prepare_client() +-> (ClientId, AnyClientState, HashMap>) { let mut writes = HashMap::new(); let msg = msg_create_client(); @@ -292,7 +265,7 @@ pub fn prepare_client() -> (ClientId, AnyClientState, HashMap>) { pub fn prepare_opened_connection( client_id: &ClientId, -) -> (ConnectionId, HashMap>) { +) -> (ConnectionId, HashMap>) { let mut writes = HashMap::new(); let conn_id = connection_id(0); @@ -313,7 +286,7 @@ pub fn prepare_opened_connection( pub fn prepare_opened_channel( conn_id: &ConnectionId, is_ordered: bool, -) -> (PortId, ChannelId, HashMap>) { +) -> (PortId, ChannelId, HashMap>) { let mut writes = HashMap::new(); // port diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index ce547520f7..e585ab7924 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -26,6 +26,7 @@ mod tests { use namada::ledger::ibc::vp::{ get_dummy_header as tm_dummy_header, Error as IbcError, }; + use namada::ledger::tx_env::TxEnv; use namada::proto::{SignedTxData, Tx}; use namada::tendermint_proto::Protobuf; use namada::types::key::*; @@ -33,16 +34,16 @@ mod tests { use namada::types::time::DateTimeUtc; use namada::types::token::{self, Amount}; use namada::types::{address, key}; - use namada_vm_env::tx_prelude::{ - BorshDeserialize, BorshSerialize, KeyValIterator, + use namada_tx_prelude::{ + BorshDeserialize, BorshSerialize, StorageRead, StorageWrite, }; - use namada_vm_env::vp_prelude::{PostKeyValIterator, PreKeyValIterator}; + use namada_vp_prelude::VpEnv; use prost::Message; use test_log::test; - use super::ibc; - use super::tx::*; - use super::vp::*; + use super::{ibc, tx, vp}; + use crate::tx::{tx_host_env, TestTxEnv}; + use crate::vp::{vp_host_env, TestVpEnv}; // paths to the WASMs used for tests const VP_ALWAYS_TRUE_WASM: &str = "../wasm_for_tests/vp_always_true.wasm"; @@ -53,8 +54,8 @@ mod tests { // The environment must be initialized first tx_host_env::init(); - let key = "key"; - let read_value: Option = tx_host_env::read(key); + let key = storage::Key::parse("key").unwrap(); + let read_value: Option = tx::ctx().read(&key).unwrap(); assert_eq!( None, read_value, "Trying to read a key that doesn't exists shouldn't find any value" @@ -62,9 +63,9 @@ mod tests { // Write some value let value = "test".repeat(4); - tx_host_env::write(key, value.clone()); + tx::ctx().write(&key, value.clone()).unwrap(); - let read_value: Option = tx_host_env::read(key); + let read_value: Option = tx::ctx().read(&key).unwrap(); assert_eq!( Some(value), read_value, @@ -73,8 +74,8 @@ mod tests { ); let value = vec![1_u8; 1000]; - tx_host_env::write(key, value.clone()); - let read_value: Option> = tx_host_env::read(key); + tx::ctx().write(&key, value.clone()).unwrap(); + let read_value: Option> = tx::ctx().read(&key).unwrap(); assert_eq!( Some(value), read_value, @@ -87,18 +88,18 @@ mod tests { // The environment must be initialized first tx_host_env::init(); - let key = "key"; + let key = storage::Key::parse("key").unwrap(); assert!( - !tx_host_env::has_key(key), + !tx::ctx().has_key(&key).unwrap(), "Before a key-value is written, its key shouldn't be found" ); // Write some value let value = "test".to_string(); - tx_host_env::write(key, value); + tx::ctx().write(&key, value).unwrap(); assert!( - tx_host_env::has_key(key), + tx::ctx().has_key(&key).unwrap(), "After a key-value has been written, its key should be found" ); } @@ -112,28 +113,28 @@ mod tests { tx_host_env::set(env); // Trying to delete a key that doesn't exists should be a no-op - let key = "key"; - tx_host_env::delete(key); + let key = storage::Key::parse("key").unwrap(); + tx::ctx().delete(&key).unwrap(); let value = "test".to_string(); - tx_host_env::write(key, value); + tx::ctx().write(&key, value).unwrap(); assert!( - tx_host_env::has_key(key), + tx::ctx().has_key(&key).unwrap(), "After a key-value has been written, its key should be found" ); // Then delete it - tx_host_env::delete(key); + tx::ctx().delete(&key).unwrap(); assert!( - !tx_host_env::has_key(key), + !tx::ctx().has_key(&key).unwrap(), "After a key has been deleted, its key shouldn't be found" ); // Trying to delete a validity predicate should fail - let key = storage::Key::validity_predicate(&test_account).to_string(); + let key = storage::Key::validity_predicate(&test_account); assert!( - panic::catch_unwind(|| { tx_host_env::delete(key) }) + panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) .err() .map(|a| a.downcast_ref::().cloned().unwrap()) .unwrap() @@ -146,19 +147,24 @@ mod tests { // The environment must be initialized first tx_host_env::init(); - let iter: KeyValIterator> = tx_host_env::iter_prefix("empty"); - assert_eq!( - iter.count(), - 0, + let empty_key = storage::Key::parse("empty").unwrap(); + let mut iter = + namada_tx_prelude::iter_prefix_bytes(tx::ctx(), &empty_key) + .unwrap(); + assert!( + iter.next().is_none(), "Trying to iter a prefix that doesn't have any matching keys \ should yield an empty iterator." ); - // Write some values directly into the storage first - let prefix = Key::parse("prefix").unwrap(); + let prefix = storage::Key::parse("prefix").unwrap(); + // We'll write sub-key in some random order to check prefix iter's order + let sub_keys = [2_i32, 1, i32::MAX, -1, 260, -2, i32::MIN, 5, 0]; + + // Write the values directly into the storage first tx_host_env::with(|env| { - for i in 0..10_i32 { - let key = prefix.join(&Key::parse(i.to_string()).unwrap()); + for i in sub_keys.iter() { + let key = prefix.push(i).unwrap(); let value = i.try_to_vec().unwrap(); env.storage.write(&key, value).unwrap(); } @@ -166,10 +172,29 @@ mod tests { }); // Then try to iterate over their prefix - let iter: KeyValIterator = - tx_host_env::iter_prefix(prefix.to_string()); - let expected = (0..10).map(|i| (format!("{}/{}", prefix, i), i)); - itertools::assert_equal(iter.sorted(), expected.sorted()); + let iter = namada_tx_prelude::iter_prefix(tx::ctx(), &prefix) + .unwrap() + .map(Result::unwrap); + + // The order has to be sorted by sub-key value + let expected = sub_keys + .iter() + .sorted() + .map(|i| (prefix.push(i).unwrap(), *i)); + itertools::assert_equal(iter, expected); + + // Try to iterate over their prefix in reverse + let iter = namada_tx_prelude::rev_iter_prefix(tx::ctx(), &prefix) + .unwrap() + .map(Result::unwrap); + + // The order has to be reverse sorted by sub-key value + let expected = sub_keys + .iter() + .sorted() + .rev() + .map(|i| (prefix.push(i).unwrap(), *i)); + itertools::assert_equal(iter, expected); } #[test] @@ -182,7 +207,7 @@ mod tests { "pre-condition" ); let verifier = address::testing::established_address_1(); - tx_host_env::insert_verifier(&verifier); + tx::ctx().insert_verifier(&verifier).unwrap(); assert!( tx_host_env::with(|env| env.verifiers.contains(&verifier)), "The verifier should have been inserted" @@ -201,7 +226,7 @@ mod tests { tx_host_env::init(); let code = vec![]; - tx_host_env::init_account(code); + tx::ctx().init_account(code).unwrap(); } #[test] @@ -211,7 +236,7 @@ mod tests { let code = std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - tx_host_env::init_account(code); + tx::ctx().init_account(code).unwrap(); } #[test] @@ -220,19 +245,19 @@ mod tests { tx_host_env::init(); assert_eq!( - tx_host_env::get_chain_id(), + tx::ctx().get_chain_id().unwrap(), tx_host_env::with(|env| env.storage.get_chain_id().0) ); assert_eq!( - tx_host_env::get_block_height(), + tx::ctx().get_block_height().unwrap(), tx_host_env::with(|env| env.storage.get_block_height().0) ); assert_eq!( - tx_host_env::get_block_hash(), + tx::ctx().get_block_hash().unwrap(), tx_host_env::with(|env| env.storage.get_block_hash().0) ); assert_eq!( - tx_host_env::get_block_epoch(), + tx::ctx().get_block_epoch().unwrap(), tx_host_env::with(|env| env.storage.get_current_epoch().0) ); } @@ -245,16 +270,16 @@ mod tests { // We can add some data to the environment let key_raw = "key"; - let key = Key::parse(key_raw).unwrap(); + let key = storage::Key::parse(key_raw).unwrap(); let value = "test".to_string(); let value_raw = value.try_to_vec().unwrap(); vp_host_env::with(|env| { env.write_log.write(&key, value_raw.clone()).unwrap() }); - let read_pre_value: Option = vp_host_env::read_pre(key_raw); + let read_pre_value: Option = vp::CTX.read_pre(&key).unwrap(); assert_eq!(None, read_pre_value); - let read_post_value: Option = vp_host_env::read_post(key_raw); + let read_post_value: Option = vp::CTX.read_post(&key).unwrap(); assert_eq!(Some(value), read_post_value); } @@ -263,12 +288,11 @@ mod tests { let mut tx_env = TestTxEnv::default(); let addr = address::testing::established_address_1(); - let addr_key = Key::from(addr.to_db_key()); + let addr_key = storage::Key::from(addr.to_db_key()); // Write some value to storage let existing_key = addr_key.join(&Key::parse("existing_key_raw").unwrap()); - let existing_key_raw = existing_key.to_string(); let existing_value = vec![2_u8; 1000]; // Values written to storage have to be encoded with Borsh let existing_value_encoded = existing_value.try_to_vec().unwrap(); @@ -280,25 +304,24 @@ mod tests { // In a transaction, write override the existing key's value and add // another key-value let override_value = "override".to_string(); - let new_key = - addr_key.join(&Key::parse("new_key").unwrap()).to_string(); + let new_key = addr_key.join(&Key::parse("new_key").unwrap()); let new_value = "vp".repeat(4); // Initialize the VP environment via a transaction vp_host_env::init_from_tx(addr, tx_env, |_addr| { // Override the existing key - tx_host_env::write(&existing_key_raw, &override_value); + tx::ctx().write(&existing_key, &override_value).unwrap(); // Write the new key-value - tx_host_env::write(&new_key, new_value.clone()); + tx::ctx().write(&new_key, new_value.clone()).unwrap(); }); assert!( - vp_host_env::has_key_pre(&existing_key_raw), + vp::CTX.has_key_pre(&existing_key).unwrap(), "The existing key before transaction should be found" ); let pre_existing_value: Option> = - vp_host_env::read_pre(&existing_key_raw); + vp::CTX.read_pre(&existing_key).unwrap(); assert_eq!( Some(existing_value), pre_existing_value, @@ -307,10 +330,11 @@ mod tests { ); assert!( - !vp_host_env::has_key_pre(&new_key), + !vp::CTX.has_key_pre(&new_key).unwrap(), "The new key before transaction shouldn't be found" ); - let pre_new_value: Option> = vp_host_env::read_pre(&new_key); + let pre_new_value: Option> = + vp::CTX.read_pre(&new_key).unwrap(); assert_eq!( None, pre_new_value, "The new value read from state before transaction shouldn't yet \ @@ -318,11 +342,11 @@ mod tests { ); assert!( - vp_host_env::has_key_post(&existing_key_raw), + vp::CTX.has_key_post(&existing_key).unwrap(), "The existing key after transaction should still be found" ); let post_existing_value: Option = - vp_host_env::read_post(&existing_key_raw); + vp::CTX.read_post(&existing_key).unwrap(); assert_eq!( Some(override_value), post_existing_value, @@ -331,10 +355,11 @@ mod tests { ); assert!( - vp_host_env::has_key_post(&new_key), + vp::CTX.has_key_post(&new_key).unwrap(), "The new key after transaction should be found" ); - let post_new_value: Option = vp_host_env::read_post(&new_key); + let post_new_value: Option = + vp::CTX.read_post(&new_key).unwrap(); assert_eq!( Some(new_value), post_new_value, @@ -348,12 +373,15 @@ mod tests { let mut tx_env = TestTxEnv::default(); let addr = address::testing::established_address_1(); - let addr_key = Key::from(addr.to_db_key()); + let addr_key = storage::Key::from(addr.to_db_key()); - // Write some value to storage let prefix = addr_key.join(&Key::parse("prefix").unwrap()); - for i in 0..10_i32 { - let key = prefix.join(&Key::parse(i.to_string()).unwrap()); + // We'll write sub-key in some random order to check prefix iter's order + let sub_keys = [2_i32, 1, i32::MAX, -1, 260, -2, i32::MIN, 5, 0]; + + // Write some values to storage + for i in sub_keys.iter() { + let key = prefix.push(i).unwrap(); let value = i.try_to_vec().unwrap(); tx_env.storage.write(&key, value).unwrap(); } @@ -361,32 +389,54 @@ mod tests { // In a transaction, write override the existing key's value and add // another key-value - let existing_key = prefix.join(&Key::parse(5.to_string()).unwrap()); - let existing_key_raw = existing_key.to_string(); - let new_key = prefix.join(&Key::parse(11.to_string()).unwrap()); - let new_key_raw = new_key.to_string(); + let existing_key = prefix.push(&5).unwrap(); + let new_key = prefix.push(&11).unwrap(); // Initialize the VP environment via a transaction vp_host_env::init_from_tx(addr, tx_env, |_addr| { // Override one of the existing keys - tx_host_env::write(&existing_key_raw, 100_i32); + tx::ctx().write(&existing_key, 100_i32).unwrap(); // Write the new key-value under the same prefix - tx_host_env::write(&new_key_raw, 11.try_to_vec().unwrap()); + tx::ctx().write(&new_key, 11_i32).unwrap(); }); - let iter_pre: PreKeyValIterator = - vp_host_env::iter_prefix_pre(prefix.to_string()); - let expected_pre = (0..10).map(|i| (format!("{}/{}", prefix, i), i)); - itertools::assert_equal(iter_pre.sorted(), expected_pre.sorted()); - - let iter_post: PostKeyValIterator = - vp_host_env::iter_prefix_post(prefix.to_string()); - let expected_post = (0..10).map(|i| { - let val = if i == 5 { 100 } else { i }; - (format!("{}/{}", prefix, i), val) + let ctx_pre = vp::CTX.pre(); + let iter_pre = namada_vp_prelude::iter_prefix(&ctx_pre, &prefix) + .unwrap() + .map(|item| item.unwrap()); + + // The order in pre has to be sorted by sub-key value + let expected_pre = sub_keys + .iter() + .sorted() + .map(|i| (prefix.push(i).unwrap(), *i)); + itertools::assert_equal(iter_pre, expected_pre); + + let ctx_post = vp::CTX.post(); + let iter_post = namada_vp_prelude::iter_prefix(&ctx_post, &prefix) + .unwrap() + .map(|item| item.unwrap()); + + // The order in post also has to be sorted + let expected_post = sub_keys.iter().sorted().map(|i| { + let val = if *i == 5 { 100 } else { *i }; + (prefix.push(i).unwrap(), val) }); - itertools::assert_equal(iter_post.sorted(), expected_post.sorted()); + itertools::assert_equal(iter_post, expected_post); + + // Try to iterate over their prefix in reverse + let iter_pre = namada_vp_prelude::rev_iter_prefix(&ctx_pre, &prefix) + .unwrap() + .map(|item| item.unwrap()); + + // The order in has to be reverse sorted by sub-key value + let expected_pre = sub_keys + .iter() + .sorted() + .rev() + .map(|i| (prefix.push(i).unwrap(), *i)); + itertools::assert_equal(iter_pre, expected_pre); } #[test] @@ -421,13 +471,21 @@ mod tests { .expect("decoding signed data we just signed") }); assert_eq!(&signed_tx_data.data, data); - assert!(vp_host_env::verify_tx_signature(&pk, &signed_tx_data.sig)); + assert!( + vp::CTX + .verify_tx_signature(&pk, &signed_tx_data.sig) + .unwrap() + ); let other_keypair = key::testing::keypair_2(); - assert!(!vp_host_env::verify_tx_signature( - &other_keypair.ref_to(), - &signed_tx_data.sig - )); + assert!( + !vp::CTX + .verify_tx_signature( + &other_keypair.ref_to(), + &signed_tx_data.sig + ) + .unwrap() + ); } } @@ -437,19 +495,19 @@ mod tests { vp_host_env::init(); assert_eq!( - vp_host_env::get_chain_id(), + vp::CTX.get_chain_id().unwrap(), vp_host_env::with(|env| env.storage.get_chain_id().0) ); assert_eq!( - vp_host_env::get_block_height(), + vp::CTX.get_block_height().unwrap(), vp_host_env::with(|env| env.storage.get_block_height().0) ); assert_eq!( - vp_host_env::get_block_hash(), + vp::CTX.get_block_hash().unwrap(), vp_host_env::with(|env| env.storage.get_block_hash().0) ); assert_eq!( - vp_host_env::get_block_epoch(), + vp::CTX.get_block_epoch().unwrap(), vp_host_env::with(|env| env.storage.get_current_epoch().0) ); } @@ -462,14 +520,14 @@ mod tests { // evaluating without any code should fail let empty_code = vec![]; let input_data = vec![]; - let result = vp_host_env::eval(empty_code, input_data); + let result = vp::CTX.eval(empty_code, input_data).unwrap(); assert!(!result); // evaluating the VP template which always returns `true` should pass let code = std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); let input_data = vec![]; - let result = vp_host_env::eval(code, input_data); + let result = vp::CTX.eval(code, input_data).unwrap(); assert!(result); // evaluating the VP template which always returns `false` shouldn't @@ -477,7 +535,7 @@ mod tests { let code = std::fs::read(VP_ALWAYS_FALSE_WASM).expect("cannot load wasm"); let input_data = vec![]; - let result = vp_host_env::eval(code, input_data); + let result = vp::CTX.eval(code, input_data).unwrap(); assert!(!result); } @@ -503,25 +561,25 @@ mod tests { .sign(&key::testing::keypair_1()); // get and increment the connection counter let counter_key = ibc::client_counter_key(); - let counter = ibc::TestIbcActions + let counter = tx::ctx() .get_and_inc_counter(&counter_key) .expect("getting the counter failed"); let client_id = ibc::client_id(msg.client_state.client_type(), counter) .expect("invalid client ID"); // only insert a client type - let client_type_key = ibc::client_type_key(&client_id).to_string(); - tx_host_env::write( - &client_type_key, - msg.client_state.client_type().as_str().as_bytes(), - ); + let client_type_key = ibc::client_type_key(&client_id); + tx::ctx() + .write( + &client_type_key, + msg.client_state.client_type().as_str().as_bytes(), + ) + .unwrap(); // Check should fail due to no client state let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(matches!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect_err("validation succeeded unexpectedly"), + result.expect_err("validation succeeded unexpectedly"), IbcError::ClientError(_), )); // drop the transaction @@ -540,18 +598,14 @@ mod tests { .sign(&key::testing::keypair_1()); // create a client with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("creating a client failed"); // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -582,27 +636,28 @@ mod tests { let same_client_state = old_data.client_state.clone(); let height = same_client_state.latest_height(); let same_consensus_state = old_data.consensus_state; - let client_state_key = ibc::client_state_key(&client_id).to_string(); - tx_host_env::write_bytes( - &client_state_key, - same_client_state.encode_vec().unwrap(), - ); - let consensus_state_key = - ibc::consensus_state_key(&client_id, height).to_string(); - tx_host_env::write( - &consensus_state_key, - same_consensus_state.encode_vec().unwrap(), - ); + let client_state_key = ibc::client_state_key(&client_id); + tx::ctx() + .write_bytes( + &client_state_key, + same_client_state.encode_vec().unwrap(), + ) + .unwrap(); + let consensus_state_key = ibc::consensus_state_key(&client_id, height); + tx::ctx() + .write( + &consensus_state_key, + same_consensus_state.encode_vec().unwrap(), + ) + .unwrap(); let event = ibc::make_update_client_event(&client_id, &msg); - tx_host_env::emit_ibc_event(&event.try_into().unwrap()); + TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); // Check should fail due to the invalid updating let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(matches!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect_err("validation succeeded unexpectedly"), + result.expect_err("validation succeeded unexpectedly"), IbcError::ClientError(_), )); // drop the transaction @@ -620,18 +675,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // update the client with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("updating the client failed"); // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -653,18 +704,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // upgrade the client with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("upgrading the client failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -696,25 +743,25 @@ mod tests { .sign(&key::testing::keypair_1()); // get and increment the connection counter let counter_key = ibc::connection_counter_key(); - let counter = ibc::TestIbcActions + let counter = tx::ctx() .get_and_inc_counter(&counter_key) .expect("getting the counter failed"); // insert a new opened connection let conn_id = ibc::connection_id(counter); - let conn_key = ibc::connection_key(&conn_id).to_string(); + let conn_key = ibc::connection_key(&conn_id); let mut connection = ibc::init_connection(&msg); ibc::open_connection(&mut connection); - tx_host_env::write_bytes(&conn_key, connection.encode_vec().unwrap()); + tx::ctx() + .write_bytes(&conn_key, connection.encode_vec().unwrap()) + .unwrap(); let event = ibc::make_open_init_connection_event(&conn_id, &msg); - tx_host_env::emit_ibc_event(&event.try_into().unwrap()); + TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); // Check should fail due to directly opening a connection let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(matches!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect_err("validation succeeded unexpectedly"), + result.expect_err("validation succeeded unexpectedly"), IbcError::ConnectionError(_), )); // drop the transaction @@ -732,18 +779,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // init a connection with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("creating a connection failed"); // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -762,18 +805,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // open the connection with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("opening the connection failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -802,18 +841,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // open try a connection with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("creating a connection failed"); // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -833,18 +868,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // open the connection with the mssage - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("opening the connection failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -880,27 +911,24 @@ mod tests { // not bind a port // get and increment the channel counter let counter_key = ibc::channel_counter_key(); - let counter = ibc::TestIbcActions + let counter = tx::ctx() .get_and_inc_counter(&counter_key) .expect("getting the counter failed"); // channel let channel_id = ibc::channel_id(counter); let port_channel_id = ibc::port_channel_id(port_id, channel_id.clone()); - let channel_key = ibc::channel_key(&port_channel_id).to_string(); - tx_host_env::write_bytes( - &channel_key, - msg.channel.encode_vec().unwrap(), - ); + let channel_key = ibc::channel_key(&port_channel_id); + tx::ctx() + .write_bytes(&channel_key, msg.channel.encode_vec().unwrap()) + .unwrap(); let event = ibc::make_open_init_channel_event(&channel_id, &msg); - tx_host_env::emit_ibc_event(&event.try_into().unwrap()); + TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); // Check should fail due to no port binding let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(matches!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect_err("validation succeeded unexpectedly"), + result.expect_err("validation succeeded unexpectedly"), IbcError::ChannelError(_), )); // drop the transaction @@ -922,32 +950,32 @@ mod tests { } .sign(&key::testing::keypair_1()); // bind a port - ibc::TestIbcActions + tx::ctx() .bind_port(&port_id) .expect("binding the port failed"); // get and increment the channel counter let counter_key = ibc::channel_counter_key(); - let counter = ibc::TestIbcActions + let counter = tx::ctx() .get_and_inc_counter(&counter_key) .expect("getting the counter failed"); // insert a opened channel let channel_id = ibc::channel_id(counter); let port_channel_id = ibc::port_channel_id(port_id, channel_id.clone()); - let channel_key = ibc::channel_key(&port_channel_id).to_string(); + let channel_key = ibc::channel_key(&port_channel_id); let mut channel = msg.channel.clone(); ibc::open_channel(&mut channel); - tx_host_env::write_bytes(&channel_key, channel.encode_vec().unwrap()); + tx::ctx() + .write_bytes(&channel_key, channel.encode_vec().unwrap()) + .unwrap(); let event = ibc::make_open_init_channel_event(&channel_id, &msg); - tx_host_env::emit_ibc_event(&event.try_into().unwrap()); + TxEnv::emit_ibc_event(tx::ctx(), &event.try_into().unwrap()).unwrap(); // Check should fail due to directly opening a channel let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); assert!(matches!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect_err("validation succeeded unexpectedly"), + result.expect_err("validation succeeded unexpectedly"), IbcError::ChannelError(_), )); // drop the transaction @@ -966,18 +994,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // init a channel with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("creating a channel failed"); // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -994,18 +1018,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // open the channle with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("opening the channel failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -1036,18 +1056,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // try open a channel with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("creating a channel failed"); // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -1065,18 +1081,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // open a channel with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("opening the channel failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -1109,18 +1121,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // close the channel with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("closing the channel failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -1154,18 +1162,14 @@ mod tests { .sign(&key::testing::keypair_1()); // close the channel with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("closing the channel failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -1202,18 +1206,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // send the token and a packet with the data - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("sending a packet failed"); // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Check if the token was escrowed let escrow = address::Address::Internal( address::InternalAddress::ibc_escrow_address( @@ -1221,12 +1221,9 @@ mod tests { msg.source_channel.to_string(), ), ); - let (token_vp, _) = ibc::init_token_vp_from_tx(&env, &tx, &escrow); - assert!( - token_vp - .validate(tx.data.as_ref().unwrap()) - .expect("token validation failed unexpectedly") - ); + let token_vp_result = + ibc::validate_token_vp_from_tx(&env, &tx, &escrow); + assert!(token_vp_result.expect("token validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -1246,18 +1243,14 @@ mod tests { } .sign(&key::testing::keypair_1()); // ack the packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("the packet ack failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -1292,27 +1285,19 @@ mod tests { } .sign(&key::testing::keypair_1()); // send the token and a packet with the data - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("sending a packet failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Check if the token was burned let burn = address::Address::Internal(address::InternalAddress::IbcBurn); - let (token_vp, _) = ibc::init_token_vp_from_tx(&env, &tx, &burn); - assert!( - token_vp - .validate(tx.data.as_ref().unwrap()) - .expect("token validation failed unexpectedly") - ); + let result = ibc::validate_token_vp_from_tx(&env, &tx, &burn); + assert!(result.expect("token validation failed unexpectedly")); } #[test] @@ -1354,27 +1339,19 @@ mod tests { } .sign(&key::testing::keypair_1()); // receive a packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("receiving a packet failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Check if the token was minted let mint = address::Address::Internal(address::InternalAddress::IbcMint); - let (token_vp, _) = ibc::init_token_vp_from_tx(&env, &tx, &mint); - assert!( - token_vp - .validate(tx.data.as_ref().unwrap()) - .expect("token validation failed unexpectedly") - ); + let result = ibc::validate_token_vp_from_tx(&env, &tx, &mint); + assert!(result.expect("token validation failed unexpectedly")); } #[test] @@ -1436,25 +1413,17 @@ mod tests { } .sign(&key::testing::keypair_1()); // receive a packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("receiving a packet failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Check if the token was unescrowed - let (token_vp, _) = ibc::init_token_vp_from_tx(&env, &tx, &escrow); - assert!( - token_vp - .validate(tx.data.as_ref().unwrap()) - .expect("token validation failed unexpectedly") - ); + let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow); + assert!(result.expect("token validation failed unexpectedly")); } #[test] @@ -1491,20 +1460,16 @@ mod tests { } .sign(&key::testing::keypair_1()); // send a packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("sending a packet failed"); // the transaction does something before senging a packet // Check let mut env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Commit env.commit_tx_and_block(); @@ -1524,20 +1489,16 @@ mod tests { } .sign(&key::testing::keypair_1()); // ack the packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("the packet ack failed"); // the transaction does something after the ack // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -1579,20 +1540,16 @@ mod tests { } .sign(&key::testing::keypair_1()); // receive a packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("receiving a packet failed"); // the transaction does something according to the packet // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); } #[test] @@ -1624,8 +1581,8 @@ mod tests { .encode(&mut tx_data) .expect("encoding failed"); // send a packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("sending apacket failed"); // Commit @@ -1646,18 +1603,14 @@ mod tests { .sign(&key::testing::keypair_1()); // close the channel with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("closing the channel failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Check if the token was refunded let escrow = address::Address::Internal( address::InternalAddress::ibc_escrow_address( @@ -1665,12 +1618,8 @@ mod tests { packet.source_channel.to_string(), ), ); - let (token_vp, _) = ibc::init_token_vp_from_tx(&env, &tx, &escrow); - assert!( - token_vp - .validate(tx.data.as_ref().unwrap()) - .expect("token validation failed unexpectedly") - ); + let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow); + assert!(result.expect("token validation failed unexpectedly")); } #[test] @@ -1701,8 +1650,8 @@ mod tests { .encode(&mut tx_data) .expect("encoding failed"); // send a packet with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("sending a packet failed"); // Commit @@ -1723,18 +1672,14 @@ mod tests { .sign(&key::testing::keypair_1()); // close the channel with the message - ibc::TestIbcActions - .dispatch(&tx_data) + tx::ctx() + .dispatch_ibc_action(&tx_data) .expect("closing the channel failed"); // Check let env = tx_host_env::take(); - let (ibc_vp, _) = ibc::init_ibc_vp_from_tx(&env, &tx); - assert!( - ibc_vp - .validate(tx.data.as_ref().unwrap()) - .expect("validation failed unexpectedly") - ); + let result = ibc::validate_ibc_vp_from_tx(&env, &tx); + assert!(result.expect("validation failed unexpectedly")); // Check if the token was refunded let escrow = address::Address::Internal( address::InternalAddress::ibc_escrow_address( @@ -1742,11 +1687,7 @@ mod tests { packet.source_channel.to_string(), ), ); - let (token_vp, _) = ibc::init_token_vp_from_tx(&env, &tx, &escrow); - assert!( - token_vp - .validate(tx.data.as_ref().unwrap()) - .expect("token validation failed unexpectedly") - ); + let result = ibc::validate_token_vp_from_tx(&env, &tx, &escrow); + assert!(result.expect("token validation failed unexpectedly")); } } diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 3a684e8382..6e23ced06b 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -15,16 +15,27 @@ use namada::types::{key, token}; use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::{self, TxCache, VpCache}; use namada::vm::{self, WasmCacheRwAccess}; -use namada_vm_env::tx_prelude::BorshSerialize; +use namada_tx_prelude::{BorshSerialize, Ctx}; use tempfile::TempDir; +use crate::vp::TestVpEnv; + +/// Tx execution context provides access to host env functions +static mut CTX: Ctx = unsafe { Ctx::new() }; + +/// Tx execution context provides access to host env functions +pub fn ctx() -> &'static mut Ctx { + unsafe { &mut CTX } +} + /// This module combines the native host function implementations from /// `native_tx_host_env` with the functions exposed to the tx wasm /// that will call to the native functions, instead of interfacing via a /// wasm runtime. It can be used for host environment integration tests. pub mod tx_host_env { - pub use namada_vm_env::tx_prelude::*; + pub use namada_tx_prelude::*; + pub use super::ctx; pub use super::native_tx_host_env::*; } @@ -226,6 +237,29 @@ mod native_tx_host_env { with(|env| env.commit_tx_and_block()) } + /// Set the [`TestTxEnv`] back from a [`TestVpEnv`]. This is useful when + /// testing validation with multiple transactions that accumulate some state + /// changes. + pub fn set_from_vp_env(vp_env: TestVpEnv) { + let TestVpEnv { + storage, + write_log, + tx, + vp_wasm_cache, + vp_cache_dir, + .. + } = vp_env; + let tx_env = TestTxEnv { + storage, + write_log, + vp_wasm_cache, + vp_cache_dir, + tx, + ..Default::default() + }; + set(tx_env); + } + /// A helper macro to create implementations of the host environment /// functions exported to wasm, which uses the environment from the /// `ENV` variable. @@ -326,6 +360,7 @@ mod native_tx_host_env { )); native_host_fn!(tx_delete(key_ptr: u64, key_len: u64)); native_host_fn!(tx_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64); + native_host_fn!(tx_rev_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64); native_host_fn!(tx_iter_next(iter_id: u64) -> i64); native_host_fn!(tx_insert_verifier(addr_ptr: u64, addr_len: u64)); native_host_fn!(tx_update_validity_predicate( diff --git a/tests/src/vm_host_env/vp.rs b/tests/src/vm_host_env/vp.rs index 61b87e1b3b..88aa63d530 100644 --- a/tests/src/vm_host_env/vp.rs +++ b/tests/src/vm_host_env/vp.rs @@ -10,17 +10,27 @@ use namada::types::storage::{self, Key}; use namada::vm::prefix_iter::PrefixIterators; use namada::vm::wasm::{self, VpCache}; use namada::vm::{self, WasmCacheRwAccess}; +use namada_vp_prelude::Ctx; use tempfile::TempDir; use crate::tx::{tx_host_env, TestTxEnv}; +/// VP execution context provides access to host env functions +pub static CTX: Ctx = unsafe { Ctx::new() }; + +/// VP execution context provides access to host env functions +pub fn ctx() -> &'static Ctx { + &CTX +} + /// This module combines the native host function implementations from /// `native_vp_host_env` with the functions exposed to the vp wasm /// that will call to the native functions, instead of interfacing via a /// wasm runtime. It can be used for host environment integration tests. pub mod vp_host_env { - pub use namada_vm_env::vp_prelude::*; + pub use namada_vp_prelude::*; + pub use super::ctx; pub use super::native_vp_host_env::*; } @@ -160,7 +170,7 @@ mod native_vp_host_env { /// Initialize the VP host environment in [`ENV`] by running a transaction. /// The transaction is expected to modify the storage sub-space of the given /// address `addr` or to add it to the set of verifiers using - /// [`tx_host_env::insert_verifier`]. + /// `ctx.insert_verifier`. pub fn init_from_tx( addr: Address, mut tx_env: TestTxEnv, @@ -316,6 +326,7 @@ mod native_vp_host_env { native_host_fn!(vp_has_key_pre(key_ptr: u64, key_len: u64) -> i64); native_host_fn!(vp_has_key_post(key_ptr: u64, key_len: u64) -> i64); native_host_fn!(vp_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64); + native_host_fn!(vp_rev_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64); native_host_fn!(vp_iter_pre_next(iter_id: u64) -> i64); native_host_fn!(vp_iter_post_next(iter_id: u64) -> i64); native_host_fn!(vp_get_chain_id(result_ptr: u64)); diff --git a/tx_prelude/Cargo.toml b/tx_prelude/Cargo.toml index 76419f417a..2b3c34e7f9 100644 --- a/tx_prelude/Cargo.toml +++ b/tx_prelude/Cargo.toml @@ -10,5 +10,9 @@ version = "0.7.1" default = [] [dependencies] +namada = {path = "../shared"} namada_vm_env = {path = "../vm_env"} +namada_macros = {path = "../macros"} +borsh = "0.9.0" sha2 = "0.10.1" +thiserror = "1.0.30" diff --git a/tx_prelude/src/governance.rs b/tx_prelude/src/governance.rs new file mode 100644 index 0000000000..a0dfce2c77 --- /dev/null +++ b/tx_prelude/src/governance.rs @@ -0,0 +1,78 @@ +//! Governance + +use namada::ledger::governance::storage; +use namada::ledger::governance::vp::ADDRESS as governance_address; +use namada::types::address::xan as m1t; +use namada::types::token::Amount; +use namada::types::transaction::governance::{ + InitProposalData, VoteProposalData, +}; + +use super::*; +use crate::token::transfer; + +/// A proposal creation transaction. +pub fn init_proposal(ctx: &mut Ctx, data: InitProposalData) -> TxResult { + let counter_key = storage::get_counter_key(); + let proposal_id = if let Some(id) = data.id { + id + } else { + ctx.read(&counter_key)?.unwrap() + }; + + let content_key = storage::get_content_key(proposal_id); + ctx.write_bytes(&content_key, data.content)?; + + let author_key = storage::get_author_key(proposal_id); + ctx.write(&author_key, data.author.clone())?; + + let voting_start_epoch_key = + storage::get_voting_start_epoch_key(proposal_id); + ctx.write(&voting_start_epoch_key, data.voting_start_epoch)?; + + let voting_end_epoch_key = storage::get_voting_end_epoch_key(proposal_id); + ctx.write(&voting_end_epoch_key, data.voting_end_epoch)?; + + let grace_epoch_key = storage::get_grace_epoch_key(proposal_id); + ctx.write(&grace_epoch_key, data.grace_epoch)?; + + if let Some(proposal_code) = data.proposal_code { + let proposal_code_key = storage::get_proposal_code_key(proposal_id); + ctx.write_bytes(&proposal_code_key, proposal_code)?; + } + + ctx.write(&counter_key, proposal_id + 1)?; + + let min_proposal_funds_key = storage::get_min_proposal_fund_key(); + let min_proposal_funds: Amount = + ctx.read(&min_proposal_funds_key)?.unwrap(); + + let funds_key = storage::get_funds_key(proposal_id); + ctx.write(&funds_key, min_proposal_funds)?; + + // this key must always be written for each proposal + let committing_proposals_key = + storage::get_committing_proposals_key(proposal_id, data.grace_epoch.0); + ctx.write(&committing_proposals_key, ())?; + + transfer( + ctx, + &data.author, + &governance_address, + &m1t(), + min_proposal_funds, + ) +} + +/// A proposal vote transaction. +pub fn vote_proposal(ctx: &mut Ctx, data: VoteProposalData) -> TxResult { + for delegation in data.delegations { + let vote_key = storage::get_vote_proposal_key( + data.id, + data.voter.clone(), + delegation, + ); + ctx.write(&vote_key, data.vote.clone())?; + } + Ok(()) +} diff --git a/tx_prelude/src/ibc.rs b/tx_prelude/src/ibc.rs new file mode 100644 index 0000000000..494d5e7cd3 --- /dev/null +++ b/tx_prelude/src/ibc.rs @@ -0,0 +1,73 @@ +//! IBC lower-level functions for transactions. + +pub use namada::ledger::ibc::handler::{Error, IbcActions, Result}; +use namada::ledger::storage_api::{StorageRead, StorageWrite}; +use namada::ledger::tx_env::TxEnv; +use namada::types::address::Address; +pub use namada::types::ibc::IbcEvent; +use namada::types::storage::{BlockHeight, Key}; +use namada::types::time::Rfc3339String; +use namada::types::token::Amount; + +use crate::token::transfer; +use crate::Ctx; + +impl IbcActions for Ctx { + type Error = crate::Error; + + fn read_ibc_data( + &self, + key: &Key, + ) -> std::result::Result>, Self::Error> { + let data = self.read_bytes(key)?; + Ok(data) + } + + fn write_ibc_data( + &mut self, + key: &Key, + data: impl AsRef<[u8]>, + ) -> std::result::Result<(), Self::Error> { + self.write_bytes(key, data)?; + Ok(()) + } + + fn delete_ibc_data( + &mut self, + key: &Key, + ) -> std::result::Result<(), Self::Error> { + self.delete(key)?; + Ok(()) + } + + fn emit_ibc_event( + &mut self, + event: IbcEvent, + ) -> std::result::Result<(), Self::Error> { + ::emit_ibc_event(self, &event)?; + Ok(()) + } + + fn transfer_token( + &mut self, + src: &Address, + dest: &Address, + token: &Address, + amount: Amount, + ) -> std::result::Result<(), Self::Error> { + transfer(self, src, dest, token, amount)?; + Ok(()) + } + + fn get_height(&self) -> std::result::Result { + let val = self.get_block_height()?; + Ok(val) + } + + fn get_header_time( + &self, + ) -> std::result::Result { + let val = self.get_block_time()?; + Ok(val) + } +} diff --git a/tx_prelude/src/intent.rs b/tx_prelude/src/intent.rs new file mode 100644 index 0000000000..05f7cede91 --- /dev/null +++ b/tx_prelude/src/intent.rs @@ -0,0 +1,19 @@ +use std::collections::HashSet; + +use namada::proto::Signed; +use namada::types::intent; +pub use namada::types::intent::*; +use namada::types::key::*; + +use super::*; +pub fn invalidate_exchange( + ctx: &mut Ctx, + intent: &Signed, +) -> TxResult { + let key = intent::invalid_intent_key(&intent.data.addr); + let mut invalid_intent: HashSet = + ctx.read(&key)?.unwrap_or_default(); + invalid_intent.insert(intent.sig.clone()); + ctx.write(&key, &invalid_intent)?; + Ok(()) +} diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index 315c68384e..4b44bda10d 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -6,7 +6,50 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -pub use namada_vm_env::tx_prelude::*; +pub mod governance; +pub mod ibc; +pub mod intent; +pub mod nft; +pub mod proof_of_stake; +pub mod token; + +use core::slice; +use std::marker::PhantomData; + +pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use namada::ledger::governance::storage as gov_storage; +pub use namada::ledger::parameters::storage as parameters_storage; +pub use namada::ledger::storage::types::encode; +pub use namada::ledger::storage_api::{ + self, iter_prefix, iter_prefix_bytes, rev_iter_prefix, + rev_iter_prefix_bytes, Error, OptionExt, ResultExt, StorageRead, + StorageWrite, +}; +pub use namada::ledger::treasury::storage as treasury_storage; +pub use namada::ledger::tx_env::TxEnv; +pub use namada::proto::{Signed, SignedTxData}; +pub use namada::types::address::Address; +use namada::types::chain::CHAIN_ID_LENGTH; +use namada::types::internal::HostEnvResult; +use namada::types::storage::{ + BlockHash, BlockHeight, Epoch, BLOCK_HASH_LENGTH, +}; +use namada::types::time::Rfc3339String; +pub use namada::types::*; +pub use namada_macros::transaction; +use namada_vm_env::tx::*; +use namada_vm_env::{read_from_buffer, read_key_val_bytes_from_buffer}; + +pub use crate::ibc::IbcActions; +pub use crate::proof_of_stake::{PosRead, PosWrite}; + +/// Log a string. The message will be printed at the `tracing::Level::Info`. +pub fn log_string>(msg: T) { + let msg = msg.as_ref(); + unsafe { + anoma_tx_log_string(msg.as_ptr() as _, msg.len() as _); + } +} /// Log a string in a debug build. The message will be printed at the /// `tracing::Level::Info`. Any `debug_log!` statements are only enabled in @@ -19,3 +62,252 @@ macro_rules! debug_log { (if cfg!(debug_assertions) { log_string(format!($($arg)*)) }) }} } + +/// Execution context provides access to the host environment functions +pub struct Ctx(()); + +impl Ctx { + /// Create a host context. The context on WASM side is only provided by + /// the VM once its being executed (in here it's implicit). But + /// because we want to have interface identical with the native + /// VPs, in which the context is explicit, in here we're just + /// using an empty `Ctx` to "fake" it. + /// + /// # Safety + /// + /// When using `#[transaction]` macro from `anoma_macros`, + /// the constructor should not be called from transactions and validity + /// predicates implementation directly - they receive `&Self` as + /// an argument provided by the macro that wrap the low-level WASM + /// interface with Rust native types. + /// + /// Otherwise, this should only be called once to initialize this "fake" + /// context in order to benefit from type-safety of the host environment + /// methods implemented on the context. + #[allow(clippy::new_without_default)] + pub const unsafe fn new() -> Self { + Self(()) + } +} + +/// Result of `TxEnv`, `storage_api::StorageRead` or `storage_api::StorageWrite` +/// method call +pub type EnvResult = Result; + +/// Transaction result +pub type TxResult = EnvResult<()>; + +#[derive(Debug)] +pub struct KeyValIterator(pub u64, pub PhantomData); + +impl StorageRead<'_> for Ctx { + type PrefixIter = KeyValIterator<(String, Vec)>; + + fn read_bytes( + &self, + key: &namada::types::storage::Key, + ) -> Result>, Error> { + let key = key.to_string(); + let read_result = + unsafe { anoma_tx_read(key.as_ptr() as _, key.len() as _) }; + Ok(read_from_buffer(read_result, anoma_tx_result_buffer)) + } + + fn has_key( + &self, + key: &namada::types::storage::Key, + ) -> Result { + let key = key.to_string(); + let found = + unsafe { anoma_tx_has_key(key.as_ptr() as _, key.len() as _) }; + Ok(HostEnvResult::is_success(found)) + } + + fn get_chain_id(&self) -> Result { + let result = Vec::with_capacity(CHAIN_ID_LENGTH); + unsafe { + anoma_tx_get_chain_id(result.as_ptr() as _); + } + let slice = + unsafe { slice::from_raw_parts(result.as_ptr(), CHAIN_ID_LENGTH) }; + Ok(String::from_utf8(slice.to_vec()) + .expect("Cannot convert the ID string")) + } + + fn get_block_height( + &self, + ) -> Result { + Ok(BlockHeight(unsafe { anoma_tx_get_block_height() })) + } + + fn get_block_hash( + &self, + ) -> Result { + let result = Vec::with_capacity(BLOCK_HASH_LENGTH); + unsafe { + anoma_tx_get_block_hash(result.as_ptr() as _); + } + let slice = unsafe { + slice::from_raw_parts(result.as_ptr(), BLOCK_HASH_LENGTH) + }; + Ok(BlockHash::try_from(slice).expect("Cannot convert the hash")) + } + + fn get_block_epoch(&self) -> Result { + Ok(Epoch(unsafe { anoma_tx_get_block_epoch() })) + } + + fn iter_prefix( + &self, + prefix: &namada::types::storage::Key, + ) -> Result { + let prefix = prefix.to_string(); + let iter_id = unsafe { + anoma_tx_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) + }; + Ok(KeyValIterator(iter_id, PhantomData)) + } + + fn rev_iter_prefix( + &self, + prefix: &storage::Key, + ) -> Result { + let prefix = prefix.to_string(); + let iter_id = unsafe { + anoma_tx_rev_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) + }; + Ok(KeyValIterator(iter_id, PhantomData)) + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> Result)>, Error> { + let read_result = unsafe { anoma_tx_iter_next(iter.0) }; + Ok(read_key_val_bytes_from_buffer( + read_result, + anoma_tx_result_buffer, + )) + } +} + +impl StorageWrite for Ctx { + fn write_bytes( + &mut self, + key: &namada::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> storage_api::Result<()> { + let key = key.to_string(); + unsafe { + anoma_tx_write( + key.as_ptr() as _, + key.len() as _, + val.as_ref().as_ptr() as _, + val.as_ref().len() as _, + ) + }; + Ok(()) + } + + fn delete( + &mut self, + key: &namada::types::storage::Key, + ) -> storage_api::Result<()> { + let key = key.to_string(); + unsafe { anoma_tx_delete(key.as_ptr() as _, key.len() as _) }; + Ok(()) + } +} + +impl TxEnv<'_> for Ctx { + fn get_block_time(&self) -> Result { + let read_result = unsafe { anoma_tx_get_block_time() }; + let time_value = read_from_buffer(read_result, anoma_tx_result_buffer) + .expect("The block time should exist"); + Ok(Rfc3339String( + String::try_from_slice(&time_value[..]) + .expect("The conversion shouldn't fail"), + )) + } + + fn write_temp( + &mut self, + key: &namada::types::storage::Key, + val: T, + ) -> Result<(), Error> { + let buf = val.try_to_vec().unwrap(); + self.write_bytes_temp(key, buf) + } + + fn write_bytes_temp( + &mut self, + key: &namada::types::storage::Key, + val: impl AsRef<[u8]>, + ) -> Result<(), Error> { + let key = key.to_string(); + unsafe { + anoma_tx_write_temp( + key.as_ptr() as _, + key.len() as _, + val.as_ref().as_ptr() as _, + val.as_ref().len() as _, + ) + }; + Ok(()) + } + + fn insert_verifier(&mut self, addr: &Address) -> Result<(), Error> { + let addr = addr.encode(); + unsafe { anoma_tx_insert_verifier(addr.as_ptr() as _, addr.len() as _) } + Ok(()) + } + + fn init_account( + &mut self, + code: impl AsRef<[u8]>, + ) -> Result { + let code = code.as_ref(); + let result = Vec::with_capacity(address::ESTABLISHED_ADDRESS_BYTES_LEN); + unsafe { + anoma_tx_init_account( + code.as_ptr() as _, + code.len() as _, + result.as_ptr() as _, + ) + }; + let slice = unsafe { + slice::from_raw_parts( + result.as_ptr(), + address::ESTABLISHED_ADDRESS_BYTES_LEN, + ) + }; + Ok(Address::try_from_slice(slice) + .expect("Decoding address created by the ledger shouldn't fail")) + } + + fn update_validity_predicate( + &mut self, + addr: &Address, + code: impl AsRef<[u8]>, + ) -> Result<(), Error> { + let addr = addr.encode(); + let code = code.as_ref(); + unsafe { + anoma_tx_update_validity_predicate( + addr.as_ptr() as _, + addr.len() as _, + code.as_ptr() as _, + code.len() as _, + ) + }; + Ok(()) + } + + fn emit_ibc_event(&mut self, event: &ibc::IbcEvent) -> Result<(), Error> { + let event = BorshSerialize::try_to_vec(event).unwrap(); + unsafe { + anoma_tx_emit_ibc_event(event.as_ptr() as _, event.len() as _) + }; + Ok(()) + } +} diff --git a/tx_prelude/src/nft.rs b/tx_prelude/src/nft.rs new file mode 100644 index 0000000000..4ed179fe27 --- /dev/null +++ b/tx_prelude/src/nft.rs @@ -0,0 +1,89 @@ +use namada::types::address::Address; +use namada::types::nft; +use namada::types::nft::NftToken; +use namada::types::transaction::nft::{CreateNft, MintNft}; + +use super::*; + +/// Initialize a new NFT token address. +pub fn init_nft(ctx: &mut Ctx, nft: CreateNft) -> EnvResult
{ + let address = ctx.init_account(&nft.vp_code)?; + + // write tag + let tag_key = nft::get_tag_key(&address); + ctx.write(&tag_key, &nft.tag)?; + + // write creator + let creator_key = nft::get_creator_key(&address); + ctx.write(&creator_key, &nft.creator)?; + + // write keys + let keys_key = nft::get_keys_key(&address); + ctx.write(&keys_key, &nft.keys)?; + + // write optional keys + let optional_keys_key = nft::get_optional_keys_key(&address); + ctx.write(&optional_keys_key, nft.opt_keys)?; + + // mint tokens + aux_mint_token(ctx, &address, &nft.creator, nft.tokens, &nft.creator)?; + + ctx.insert_verifier(&nft.creator)?; + + Ok(address) +} + +pub fn mint_tokens(ctx: &mut Ctx, nft: MintNft) -> TxResult { + aux_mint_token(ctx, &nft.address, &nft.creator, nft.tokens, &nft.creator) +} + +fn aux_mint_token( + ctx: &mut Ctx, + nft_address: &Address, + creator_address: &Address, + tokens: Vec, + verifier: &Address, +) -> TxResult { + for token in tokens { + // write token metadata + let metadata_key = + nft::get_token_metadata_key(nft_address, &token.id.to_string()); + ctx.write(&metadata_key, &token.metadata)?; + + // write current owner token as creator + let current_owner_key = nft::get_token_current_owner_key( + nft_address, + &token.id.to_string(), + ); + ctx.write( + ¤t_owner_key, + &token + .current_owner + .unwrap_or_else(|| creator_address.clone()), + )?; + + // write value key + let value_key = + nft::get_token_value_key(nft_address, &token.id.to_string()); + ctx.write(&value_key, &token.values)?; + + // write optional value keys + let optional_value_key = nft::get_token_optional_value_key( + nft_address, + &token.id.to_string(), + ); + ctx.write(&optional_value_key, &token.opt_values)?; + + // write approval addresses + let approval_key = + nft::get_token_approval_key(nft_address, &token.id.to_string()); + ctx.write(&approval_key, &token.approvals)?; + + // write burnt propriety + let burnt_key = + nft::get_token_burnt_key(nft_address, &token.id.to_string()); + ctx.write(&burnt_key, token.burnt)?; + } + ctx.insert_verifier(verifier)?; + Ok(()) +} diff --git a/tx_prelude/src/proof_of_stake.rs b/tx_prelude/src/proof_of_stake.rs new file mode 100644 index 0000000000..97a258365c --- /dev/null +++ b/tx_prelude/src/proof_of_stake.rs @@ -0,0 +1,229 @@ +//! Proof of Stake system integration with functions for transactions + +pub use namada::ledger::pos::*; +use namada::ledger::pos::{ + bond_key, namada_proof_of_stake, params_key, total_voting_power_key, + unbond_key, validator_address_raw_hash_key, validator_consensus_key_key, + validator_set_key, validator_slashes_key, + validator_staking_reward_address_key, validator_state_key, + validator_total_deltas_key, validator_voting_power_key, +}; +use namada::types::address::Address; +use namada::types::transaction::InitValidator; +use namada::types::{key, token}; +pub use namada_proof_of_stake::{ + epoched, parameters, types, PosActions as PosWrite, PosReadOnly as PosRead, +}; + +use super::*; + +impl Ctx { + /// Self-bond tokens to a validator when `source` is `None` or equal to + /// the `validator` address, or delegate tokens from the `source` to the + /// `validator`. + pub fn bond_tokens( + &mut self, + source: Option<&Address>, + validator: &Address, + amount: token::Amount, + ) -> TxResult { + let current_epoch = self.get_block_epoch()?; + namada_proof_of_stake::PosActions::bond_tokens( + self, + source, + validator, + amount, + current_epoch, + ) + } + + /// Unbond self-bonded tokens from a validator when `source` is `None` or + /// equal to the `validator` address, or unbond delegated tokens from + /// the `source` to the `validator`. + pub fn unbond_tokens( + &mut self, + source: Option<&Address>, + validator: &Address, + amount: token::Amount, + ) -> TxResult { + let current_epoch = self.get_block_epoch()?; + namada_proof_of_stake::PosActions::unbond_tokens( + self, + source, + validator, + amount, + current_epoch, + ) + } + + /// Withdraw unbonded tokens from a self-bond to a validator when `source` + /// is `None` or equal to the `validator` address, or withdraw unbonded + /// tokens delegated to the `validator` to the `source`. + pub fn withdraw_tokens( + &mut self, + source: Option<&Address>, + validator: &Address, + ) -> EnvResult { + let current_epoch = self.get_block_epoch()?; + namada_proof_of_stake::PosActions::withdraw_tokens( + self, + source, + validator, + current_epoch, + ) + } + + /// Attempt to initialize a validator account. On success, returns the + /// initialized validator account's address and its staking reward address. + pub fn init_validator( + &mut self, + InitValidator { + account_key, + consensus_key, + rewards_account_key, + protocol_key, + dkg_key, + validator_vp_code, + rewards_vp_code, + }: InitValidator, + ) -> EnvResult<(Address, Address)> { + let current_epoch = self.get_block_epoch()?; + // Init validator account + let validator_address = self.init_account(&validator_vp_code)?; + let pk_key = key::pk_key(&validator_address); + self.write(&pk_key, &account_key)?; + let protocol_pk_key = key::protocol_pk_key(&validator_address); + self.write(&protocol_pk_key, &protocol_key)?; + let dkg_pk_key = key::dkg_session_keys::dkg_pk_key(&validator_address); + self.write(&dkg_pk_key, &dkg_key)?; + + // Init staking reward account + let rewards_address = self.init_account(&rewards_vp_code)?; + let pk_key = key::pk_key(&rewards_address); + self.write(&pk_key, &rewards_account_key)?; + + self.become_validator( + &validator_address, + &rewards_address, + &consensus_key, + current_epoch, + )?; + + Ok((validator_address, rewards_address)) + } +} + +namada::impl_pos_read_only! { + type Error = crate::Error; + impl namada_proof_of_stake::PosReadOnly for Ctx +} + +impl namada_proof_of_stake::PosActions for Ctx { + type BecomeValidatorError = crate::Error; + type BondError = crate::Error; + type UnbondError = crate::Error; + type WithdrawError = crate::Error; + + fn write_pos_params( + &mut self, + params: &PosParams, + ) -> Result<(), Self::Error> { + self.write(¶ms_key(), params) + } + + fn write_validator_address_raw_hash( + &mut self, + address: &Self::Address, + ) -> Result<(), Self::Error> { + let raw_hash = address.raw_hash().unwrap().to_owned(); + self.write(&validator_address_raw_hash_key(raw_hash), address) + } + + fn write_validator_staking_reward_address( + &mut self, + key: &Self::Address, + value: Self::Address, + ) -> Result<(), Self::Error> { + self.write(&validator_staking_reward_address_key(key), &value) + } + + fn write_validator_consensus_key( + &mut self, + key: &Self::Address, + value: ValidatorConsensusKeys, + ) -> Result<(), Self::Error> { + self.write(&validator_consensus_key_key(key), &value) + } + + fn write_validator_state( + &mut self, + key: &Self::Address, + value: ValidatorStates, + ) -> Result<(), Self::Error> { + self.write(&validator_state_key(key), &value) + } + + fn write_validator_total_deltas( + &mut self, + key: &Self::Address, + value: ValidatorTotalDeltas, + ) -> Result<(), Self::Error> { + self.write(&validator_total_deltas_key(key), &value) + } + + fn write_validator_voting_power( + &mut self, + key: &Self::Address, + value: ValidatorVotingPowers, + ) -> Result<(), Self::Error> { + self.write(&validator_voting_power_key(key), &value) + } + + fn write_bond( + &mut self, + key: &BondId, + value: Bonds, + ) -> Result<(), Self::Error> { + self.write(&bond_key(key), &value) + } + + fn write_unbond( + &mut self, + key: &BondId, + value: Unbonds, + ) -> Result<(), Self::Error> { + self.write(&unbond_key(key), &value) + } + + fn write_validator_set( + &mut self, + value: ValidatorSets, + ) -> Result<(), Self::Error> { + self.write(&validator_set_key(), &value) + } + + fn write_total_voting_power( + &mut self, + value: TotalVotingPowers, + ) -> Result<(), Self::Error> { + self.write(&total_voting_power_key(), &value) + } + + fn delete_bond(&mut self, key: &BondId) -> Result<(), Self::Error> { + self.delete(&bond_key(key)) + } + + fn delete_unbond(&mut self, key: &BondId) -> Result<(), Self::Error> { + self.delete(&unbond_key(key)) + } + + fn transfer( + &mut self, + token: &Self::Address, + amount: Self::TokenAmount, + src: &Self::Address, + dest: &Self::Address, + ) -> Result<(), Self::Error> { + crate::token::transfer(self, src, dest, token, amount) + } +} diff --git a/tx_prelude/src/token.rs b/tx_prelude/src/token.rs new file mode 100644 index 0000000000..2fa86efd45 --- /dev/null +++ b/tx_prelude/src/token.rs @@ -0,0 +1,53 @@ +use namada::types::address::{Address, InternalAddress}; +use namada::types::token; +pub use namada::types::token::*; + +use super::*; + +/// A token transfer that can be used in a transaction. +pub fn transfer( + ctx: &mut Ctx, + src: &Address, + dest: &Address, + token: &Address, + amount: Amount, +) -> TxResult { + let src_key = token::balance_key(token, src); + let dest_key = token::balance_key(token, dest); + let src_bal: Option = ctx.read(&src_key)?; + let mut src_bal = src_bal.unwrap_or_else(|| match src { + Address::Internal(InternalAddress::IbcMint) => Amount::max(), + _ => { + log_string(format!("src {} has no balance", src)); + unreachable!() + } + }); + src_bal.spend(&amount); + let mut dest_bal: Amount = ctx.read(&dest_key)?.unwrap_or_default(); + dest_bal.receive(&amount); + match src { + Address::Internal(InternalAddress::IbcMint) => { + ctx.write_temp(&src_key, src_bal)?; + } + Address::Internal(InternalAddress::IbcBurn) => { + log_string("invalid transfer from the burn address"); + unreachable!() + } + _ => { + ctx.write(&src_key, src_bal)?; + } + } + match dest { + Address::Internal(InternalAddress::IbcMint) => { + log_string("invalid transfer to the mint address"); + unreachable!() + } + Address::Internal(InternalAddress::IbcBurn) => { + ctx.write_temp(&dest_key, dest_bal)?; + } + _ => { + ctx.write(&dest_key, dest_bal)?; + } + } + Ok(()) +} diff --git a/vm_env/Cargo.toml b/vm_env/Cargo.toml index cebcd4c0ca..0e48665e5a 100644 --- a/vm_env/Cargo.toml +++ b/vm_env/Cargo.toml @@ -11,6 +11,4 @@ default = [] [dependencies] namada = {path = "../shared"} -namada_macros = {path = "../macros"} borsh = "0.9.0" -hex = "0.4.3" diff --git a/vm_env/src/governance.rs b/vm_env/src/governance.rs deleted file mode 100644 index db4ea7916f..0000000000 --- a/vm_env/src/governance.rs +++ /dev/null @@ -1,81 +0,0 @@ -/// Tx imports and functions. -pub mod tx { - - use namada::ledger::governance::storage; - use namada::ledger::governance::vp::ADDRESS as governance_address; - use namada::types::address::xan as m1t; - use namada::types::token::Amount; - use namada::types::transaction::governance::{ - InitProposalData, VoteProposalData, - }; - - use crate::imports::tx; - use crate::token::tx::transfer; - - /// A proposal creation transaction. - pub fn init_proposal(data: InitProposalData) { - let counter_key = storage::get_counter_key(); - let proposal_id = if let Some(id) = data.id { - id - } else { - tx::read(&counter_key.to_string()).unwrap() - }; - - let content_key = storage::get_content_key(proposal_id); - tx::write_bytes(&content_key.to_string(), data.content); - - let author_key = storage::get_author_key(proposal_id); - tx::write(&author_key.to_string(), data.author.clone()); - - let voting_start_epoch_key = - storage::get_voting_start_epoch_key(proposal_id); - tx::write(&voting_start_epoch_key.to_string(), data.voting_start_epoch); - - let voting_end_epoch_key = - storage::get_voting_end_epoch_key(proposal_id); - tx::write(&voting_end_epoch_key.to_string(), data.voting_end_epoch); - - let grace_epoch_key = storage::get_grace_epoch_key(proposal_id); - tx::write(&grace_epoch_key.to_string(), data.grace_epoch); - - if let Some(proposal_code) = data.proposal_code { - let proposal_code_key = storage::get_proposal_code_key(proposal_id); - tx::write_bytes(&proposal_code_key.to_string(), proposal_code); - } - - tx::write(&counter_key.to_string(), proposal_id + 1); - - let min_proposal_funds_key = storage::get_min_proposal_fund_key(); - let min_proposal_funds: Amount = - tx::read(&min_proposal_funds_key.to_string()).unwrap(); - - let funds_key = storage::get_funds_key(proposal_id); - tx::write(&funds_key.to_string(), min_proposal_funds); - - // this key must always be written for each proposal - let committing_proposals_key = storage::get_committing_proposals_key( - proposal_id, - data.grace_epoch.0, - ); - tx::write(&committing_proposals_key.to_string(), ()); - - transfer( - &data.author, - &governance_address, - &m1t(), - min_proposal_funds, - ); - } - - /// A proposal vote transaction. - pub fn vote_proposal(data: VoteProposalData) { - for delegation in data.delegations { - let vote_key = storage::get_vote_proposal_key( - data.id, - data.voter.clone(), - delegation, - ); - tx::write(&vote_key.to_string(), data.vote.clone()); - } - } -} diff --git a/vm_env/src/ibc.rs b/vm_env/src/ibc.rs deleted file mode 100644 index febaa78560..0000000000 --- a/vm_env/src/ibc.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! IBC functions for transactions. - -pub use namada::ledger::ibc::handler::IbcActions; -use namada::types::address::Address; -use namada::types::ibc::IbcEvent; -use namada::types::storage::{BlockHeight, Key}; -use namada::types::time::Rfc3339String; -use namada::types::token::Amount; - -use crate::imports::tx; -use crate::token::tx::transfer; - -/// This struct integrates and gives access to lower-level IBC functions. -pub struct Ibc; - -impl IbcActions for Ibc { - fn read_ibc_data(&self, key: &Key) -> Option> { - tx::read_bytes(key.to_string()) - } - - fn write_ibc_data(&self, key: &Key, data: impl AsRef<[u8]>) { - tx::write_bytes(key.to_string(), data) - } - - fn delete_ibc_data(&self, key: &Key) { - tx::delete(key.to_string()) - } - - fn emit_ibc_event(&self, event: IbcEvent) { - tx::emit_ibc_event(&event) - } - - fn transfer_token( - &self, - src: &Address, - dest: &Address, - token: &Address, - amount: Amount, - ) { - transfer(src, dest, token, amount) - } - - fn get_height(&self) -> BlockHeight { - tx::get_block_height() - } - - fn get_header_time(&self) -> Rfc3339String { - tx::get_block_time() - } -} diff --git a/vm_env/src/imports.rs b/vm_env/src/imports.rs deleted file mode 100644 index 2eabe77e54..0000000000 --- a/vm_env/src/imports.rs +++ /dev/null @@ -1,665 +0,0 @@ -use std::mem::ManuallyDrop; - -use borsh::BorshDeserialize; -use namada::types::internal::HostEnvResult; -use namada::vm::types::KeyVal; - -/// This function is a helper to handle the second step of reading var-len -/// values from the host. -/// -/// In cases where we're reading a value from the host in the guest and -/// we don't know the byte size up-front, we have to read it in 2-steps. The -/// first step reads the value into a result buffer and returns the size (if -/// any) back to the guest, the second step reads the value from cache into a -/// pre-allocated buffer with the obtained size. -fn read_from_buffer( - read_result: i64, - result_buffer: unsafe extern "C" fn(u64), -) -> Option> { - if HostEnvResult::is_fail(read_result) { - None - } else { - let result: Vec = Vec::with_capacity(read_result as _); - // The `result` will be dropped from the `target`, which is - // reconstructed from the same memory - let result = ManuallyDrop::new(result); - let offset = result.as_slice().as_ptr() as u64; - unsafe { result_buffer(offset) }; - let target = unsafe { - Vec::from_raw_parts(offset as _, read_result as _, read_result as _) - }; - Some(target) - } -} - -/// This function is a helper to handle the second step of reading var-len -/// values in a key-value pair from the host. -fn read_key_val_from_buffer( - read_result: i64, - result_buffer: unsafe extern "C" fn(u64), -) -> Option<(String, T)> { - let key_val = read_from_buffer(read_result, result_buffer) - .and_then(|t| KeyVal::try_from_slice(&t[..]).ok()); - key_val.and_then(|key_val| { - // decode the value - T::try_from_slice(&key_val.val) - .map(|val| (key_val.key, val)) - .ok() - }) -} - -/// Transaction environment imports -pub mod tx { - use core::slice; - use std::convert::TryFrom; - use std::marker::PhantomData; - - pub use borsh::{BorshDeserialize, BorshSerialize}; - use namada::types::address; - use namada::types::address::Address; - use namada::types::chain::CHAIN_ID_LENGTH; - use namada::types::ibc::IbcEvent; - use namada::types::internal::HostEnvResult; - use namada::types::storage::{ - BlockHash, BlockHeight, Epoch, BLOCK_HASH_LENGTH, - }; - use namada::types::time::Rfc3339String; - - #[derive(Debug)] - pub struct KeyValIterator(pub u64, pub PhantomData); - - /// Try to read a Borsh encoded variable-length value at the given key from - /// storage. - pub fn read(key: impl AsRef) -> Option { - let key = key.as_ref(); - let read_result = - unsafe { anoma_tx_read(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_tx_result_buffer) - .and_then(|t| T::try_from_slice(&t[..]).ok()) - } - - /// Try to read a variable-length value as bytes at the given key from - /// storage. - pub fn read_bytes(key: impl AsRef) -> Option> { - let key = key.as_ref(); - let read_result = - unsafe { anoma_tx_read(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_tx_result_buffer) - } - - /// Check if the given key is present in storage. - pub fn has_key(key: impl AsRef) -> bool { - let key = key.as_ref(); - let found = - unsafe { anoma_tx_has_key(key.as_ptr() as _, key.len() as _) }; - HostEnvResult::is_success(found) - } - - /// Write a value to be encoded with Borsh at the given key to storage. - pub fn write(key: impl AsRef, val: T) { - let buf = val.try_to_vec().unwrap(); - write_bytes(key, buf); - } - - /// Write a value as bytes at the given key to storage. - pub fn write_bytes(key: impl AsRef, val: impl AsRef<[u8]>) { - let key = key.as_ref(); - unsafe { - anoma_tx_write( - key.as_ptr() as _, - key.len() as _, - val.as_ref().as_ptr() as _, - val.as_ref().len() as _, - ) - }; - } - - /// Write a temporary value to be encoded with Borsh at the given key to - /// storage. - pub fn write_temp(key: impl AsRef, val: T) { - let buf = val.try_to_vec().unwrap(); - write_bytes_temp(key, buf); - } - - /// Write a temporary value as bytes at the given key to storage. - pub fn write_bytes_temp(key: impl AsRef, val: impl AsRef<[u8]>) { - let key = key.as_ref(); - unsafe { - anoma_tx_write_temp( - key.as_ptr() as _, - key.len() as _, - val.as_ref().as_ptr() as _, - val.as_ref().len() as _, - ) - }; - } - - /// Delete a value at the given key from storage. - pub fn delete(key: impl AsRef) { - let key = key.as_ref(); - unsafe { anoma_tx_delete(key.as_ptr() as _, key.len() as _) }; - } - - /// Get an iterator with the given prefix. - /// - /// Important note: The prefix iterator will ignore keys that are not yet - /// committed to storage from the block in which this transaction is being - /// applied. It will only find keys that are already committed to - /// storage (i.e. from predecessor blocks). However, it will provide the - /// most up-to-date value for such keys. - pub fn iter_prefix( - prefix: impl AsRef, - ) -> KeyValIterator { - let prefix = prefix.as_ref(); - let iter_id = unsafe { - anoma_tx_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) - }; - KeyValIterator(iter_id, PhantomData) - } - - impl Iterator for KeyValIterator { - type Item = (String, T); - - fn next(&mut self) -> Option<(String, T)> { - let read_result = unsafe { anoma_tx_iter_next(self.0) }; - super::read_key_val_from_buffer(read_result, anoma_tx_result_buffer) - } - } - - /// Insert a verifier address. This address must exist on chain, otherwise - /// the transaction will be rejected. - /// - /// Validity predicates of each verifier addresses inserted in the - /// transaction will validate the transaction and will receive all the - /// changed storage keys and initialized accounts in their inputs. - pub fn insert_verifier(addr: &Address) { - let addr = addr.encode(); - unsafe { anoma_tx_insert_verifier(addr.as_ptr() as _, addr.len() as _) } - } - - /// Update a validity predicate - pub fn update_validity_predicate(addr: &Address, code: impl AsRef<[u8]>) { - let addr = addr.encode(); - let code = code.as_ref(); - unsafe { - anoma_tx_update_validity_predicate( - addr.as_ptr() as _, - addr.len() as _, - code.as_ptr() as _, - code.len() as _, - ) - }; - } - - // Initialize a new account - pub fn init_account(code: impl AsRef<[u8]>) -> Address { - let code = code.as_ref(); - let result = Vec::with_capacity(address::ESTABLISHED_ADDRESS_BYTES_LEN); - unsafe { - anoma_tx_init_account( - code.as_ptr() as _, - code.len() as _, - result.as_ptr() as _, - ) - }; - let slice = unsafe { - slice::from_raw_parts( - result.as_ptr(), - address::ESTABLISHED_ADDRESS_BYTES_LEN, - ) - }; - Address::try_from_slice(slice) - .expect("Decoding address created by the ledger shouldn't fail") - } - - /// Emit an IBC event. There can be only one event per transaction. On - /// multiple calls, only the last emitted event will be used. - pub fn emit_ibc_event(event: &IbcEvent) { - let event = BorshSerialize::try_to_vec(event).unwrap(); - unsafe { - anoma_tx_emit_ibc_event(event.as_ptr() as _, event.len() as _) - }; - } - - /// Get the chain ID - pub fn get_chain_id() -> String { - let result = Vec::with_capacity(CHAIN_ID_LENGTH); - unsafe { - anoma_tx_get_chain_id(result.as_ptr() as _); - } - let slice = - unsafe { slice::from_raw_parts(result.as_ptr(), CHAIN_ID_LENGTH) }; - String::from_utf8(slice.to_vec()).expect("Cannot convert the ID string") - } - - /// Get height of the current block - pub fn get_block_height() -> BlockHeight { - BlockHeight(unsafe { anoma_tx_get_block_height() }) - } - - /// Get time of the current block header as rfc 3339 string - pub fn get_block_time() -> Rfc3339String { - let read_result = unsafe { anoma_tx_get_block_time() }; - let time_value = - super::read_from_buffer(read_result, anoma_tx_result_buffer) - .expect("The block time should exist"); - Rfc3339String( - String::try_from_slice(&time_value[..]) - .expect("The conversion shouldn't fail"), - ) - } - - /// Get hash of the current block - pub fn get_block_hash() -> BlockHash { - let result = Vec::with_capacity(BLOCK_HASH_LENGTH); - unsafe { - anoma_tx_get_block_hash(result.as_ptr() as _); - } - let slice = unsafe { - slice::from_raw_parts(result.as_ptr(), BLOCK_HASH_LENGTH) - }; - BlockHash::try_from(slice).expect("Cannot convert the hash") - } - - /// Get epoch of the current block - pub fn get_block_epoch() -> Epoch { - Epoch(unsafe { anoma_tx_get_block_epoch() }) - } - - /// Log a string. The message will be printed at the `tracing::Level::Info`. - pub fn log_string>(msg: T) { - let msg = msg.as_ref(); - unsafe { - anoma_tx_log_string(msg.as_ptr() as _, msg.len() as _); - } - } - - // These host functions are implemented in the Anoma's [`host_env`] - // module. The environment provides calls to them via this C interface. - extern "C" { - // Read variable-length data when we don't know the size up-front, - // returns the size of the value (can be 0), or -1 if the key is - // not present. If a value is found, it will be placed in the read - // cache, because we cannot allocate a buffer for it before we know - // its size. - fn anoma_tx_read(key_ptr: u64, key_len: u64) -> i64; - - // Read a value from result buffer. - fn anoma_tx_result_buffer(result_ptr: u64); - - // Returns 1 if the key is present, -1 otherwise. - fn anoma_tx_has_key(key_ptr: u64, key_len: u64) -> i64; - - // Write key/value - fn anoma_tx_write( - key_ptr: u64, - key_len: u64, - val_ptr: u64, - val_len: u64, - ); - - // Write a temporary key/value - fn anoma_tx_write_temp( - key_ptr: u64, - key_len: u64, - val_ptr: u64, - val_len: u64, - ); - - // Delete the given key and its value - fn anoma_tx_delete(key_ptr: u64, key_len: u64); - - // Get an ID of a data iterator with key prefix - fn anoma_tx_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64; - - // Returns the size of the value (can be 0), or -1 if there's no next - // value. If a value is found, it will be placed in the read - // cache, because we cannot allocate a buffer for it before we know - // its size. - fn anoma_tx_iter_next(iter_id: u64) -> i64; - - // Insert a verifier - fn anoma_tx_insert_verifier(addr_ptr: u64, addr_len: u64); - - // Update a validity predicate - fn anoma_tx_update_validity_predicate( - addr_ptr: u64, - addr_len: u64, - code_ptr: u64, - code_len: u64, - ); - - // Initialize a new account - fn anoma_tx_init_account(code_ptr: u64, code_len: u64, result_ptr: u64); - - // Emit an IBC event - fn anoma_tx_emit_ibc_event(event_ptr: u64, event_len: u64); - - // Get the chain ID - fn anoma_tx_get_chain_id(result_ptr: u64); - - // Get the current block height - fn anoma_tx_get_block_height() -> u64; - - // Get the time of the current block header - fn anoma_tx_get_block_time() -> i64; - - // Get the current block hash - fn anoma_tx_get_block_hash(result_ptr: u64); - - // Get the current block epoch - fn anoma_tx_get_block_epoch() -> u64; - - // Requires a node running with "Info" log level - fn anoma_tx_log_string(str_ptr: u64, str_len: u64); - } -} - -/// Validity predicate environment imports -pub mod vp { - use core::slice; - use std::convert::TryFrom; - use std::marker::PhantomData; - - pub use borsh::{BorshDeserialize, BorshSerialize}; - use namada::types::chain::CHAIN_ID_LENGTH; - use namada::types::hash::{Hash, HASH_LENGTH}; - use namada::types::internal::HostEnvResult; - use namada::types::key::*; - use namada::types::storage::{ - BlockHash, BlockHeight, Epoch, BLOCK_HASH_LENGTH, - }; - - pub struct PreKeyValIterator(pub u64, pub PhantomData); - - pub struct PostKeyValIterator(pub u64, pub PhantomData); - - /// Try to read a Borsh encoded variable-length value at the given key from - /// storage before transaction execution. - pub fn read_pre(key: impl AsRef) -> Option { - let key = key.as_ref(); - let read_result = - unsafe { anoma_vp_read_pre(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_vp_result_buffer) - .and_then(|t| T::try_from_slice(&t[..]).ok()) - } - - /// Try to read a variable-length value as bytesat the given key from - /// storage before transaction execution. - pub fn read_bytes_pre(key: impl AsRef) -> Option> { - let key = key.as_ref(); - let read_result = - unsafe { anoma_vp_read_pre(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_vp_result_buffer) - } - - /// Try to read a Borsh encoded variable-length value at the given key from - /// storage after transaction execution. - pub fn read_post(key: impl AsRef) -> Option { - let key = key.as_ref(); - let read_result = - unsafe { anoma_vp_read_post(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_vp_result_buffer) - .and_then(|t| T::try_from_slice(&t[..]).ok()) - } - - /// Try to read a variable-length value as bytes at the given key from - /// storage after transaction execution. - pub fn read_bytes_post(key: impl AsRef) -> Option> { - let key = key.as_ref(); - let read_result = - unsafe { anoma_vp_read_post(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_vp_result_buffer) - } - - /// Try to read a Borsh encoded variable-length value at the given key from - /// storage before transaction execution. - pub fn read_temp(key: impl AsRef) -> Option { - let key = key.as_ref(); - let read_result = - unsafe { anoma_vp_read_temp(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_vp_result_buffer) - .and_then(|t| T::try_from_slice(&t[..]).ok()) - } - - /// Try to read a variable-length value as bytes at the given key from - /// storage before transaction execution. - pub fn read_bytes_temp(key: impl AsRef) -> Option> { - let key = key.as_ref(); - let read_result = - unsafe { anoma_vp_read_temp(key.as_ptr() as _, key.len() as _) }; - super::read_from_buffer(read_result, anoma_vp_result_buffer) - } - - /// Check if the given key was present in storage before transaction - /// execution. - pub fn has_key_pre(key: impl AsRef) -> bool { - let key = key.as_ref(); - let found = - unsafe { anoma_vp_has_key_pre(key.as_ptr() as _, key.len() as _) }; - HostEnvResult::is_success(found) - } - - /// Check if the given key is present in storage after transaction - /// execution. - pub fn has_key_post(key: impl AsRef) -> bool { - let key = key.as_ref(); - let found = - unsafe { anoma_vp_has_key_post(key.as_ptr() as _, key.len() as _) }; - HostEnvResult::is_success(found) - } - - /// Get an iterator with the given prefix before transaction execution - pub fn iter_prefix_pre( - prefix: impl AsRef, - ) -> PreKeyValIterator { - let prefix = prefix.as_ref(); - let iter_id = unsafe { - anoma_vp_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) - }; - PreKeyValIterator(iter_id, PhantomData) - } - - impl Iterator for PreKeyValIterator { - type Item = (String, T); - - fn next(&mut self) -> Option<(String, T)> { - let read_result = unsafe { anoma_vp_iter_pre_next(self.0) }; - super::read_key_val_from_buffer(read_result, anoma_vp_result_buffer) - } - } - - /// Get an iterator with the given prefix after transaction execution - pub fn iter_prefix_post( - prefix: impl AsRef, - ) -> PostKeyValIterator { - let prefix = prefix.as_ref(); - let iter_id = unsafe { - anoma_vp_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) - }; - PostKeyValIterator(iter_id, PhantomData) - } - - impl Iterator for PostKeyValIterator { - type Item = (String, T); - - fn next(&mut self) -> Option<(String, T)> { - let read_result = unsafe { anoma_vp_iter_post_next(self.0) }; - super::read_key_val_from_buffer(read_result, anoma_vp_result_buffer) - } - } - - /// Get the chain ID - pub fn get_chain_id() -> String { - let result = Vec::with_capacity(CHAIN_ID_LENGTH); - unsafe { - anoma_vp_get_chain_id(result.as_ptr() as _); - } - let slice = - unsafe { slice::from_raw_parts(result.as_ptr(), CHAIN_ID_LENGTH) }; - String::from_utf8(slice.to_vec()).expect("Cannot convert the ID string") - } - - /// Get height of the current block - pub fn get_block_height() -> BlockHeight { - BlockHeight(unsafe { anoma_vp_get_block_height() }) - } - - /// Get a block hash - pub fn get_block_hash() -> BlockHash { - let result = Vec::with_capacity(BLOCK_HASH_LENGTH); - unsafe { - anoma_vp_get_block_hash(result.as_ptr() as _); - } - let slice = unsafe { - slice::from_raw_parts(result.as_ptr(), BLOCK_HASH_LENGTH) - }; - BlockHash::try_from(slice).expect("Cannot convert the hash") - } - - /// Get a tx hash - pub fn get_tx_code_hash() -> Hash { - let result = Vec::with_capacity(HASH_LENGTH); - unsafe { - anoma_vp_get_tx_code_hash(result.as_ptr() as _); - } - let slice = - unsafe { slice::from_raw_parts(result.as_ptr(), HASH_LENGTH) }; - Hash::try_from(slice).expect("Cannot convert the hash") - } - - /// Get epoch of the current block - pub fn get_block_epoch() -> Epoch { - Epoch(unsafe { anoma_vp_get_block_epoch() }) - } - - /// Verify a transaction signature. The signature is expected to have been - /// produced on the encoded transaction [`namada::proto::Tx`] - /// using [`namada::proto::Tx::sign`]. - pub fn verify_tx_signature( - pk: &common::PublicKey, - sig: &common::Signature, - ) -> bool { - let pk = BorshSerialize::try_to_vec(pk).unwrap(); - let sig = BorshSerialize::try_to_vec(sig).unwrap(); - let valid = unsafe { - anoma_vp_verify_tx_signature( - pk.as_ptr() as _, - pk.len() as _, - sig.as_ptr() as _, - sig.len() as _, - ) - }; - HostEnvResult::is_success(valid) - } - - /// Log a string. The message will be printed at the `tracing::Level::Info`. - pub fn log_string>(msg: T) { - let msg = msg.as_ref(); - unsafe { - anoma_vp_log_string(msg.as_ptr() as _, msg.len() as _); - } - } - - /// Evaluate a validity predicate with given data. The address, changed - /// storage keys and verifiers will have the same values as the input to - /// caller's validity predicate. - /// - /// If the execution fails for whatever reason, this will return `false`. - /// Otherwise returns the result of evaluation. - pub fn eval(vp_code: Vec, input_data: Vec) -> bool { - let result = unsafe { - anoma_vp_eval( - vp_code.as_ptr() as _, - vp_code.len() as _, - input_data.as_ptr() as _, - input_data.len() as _, - ) - }; - HostEnvResult::is_success(result) - } - - // These host functions are implemented in the Anoma's [`host_env`] - // module. The environment provides calls to them via this C interface. - extern "C" { - // Read variable-length prior state when we don't know the size - // up-front, returns the size of the value (can be 0), or -1 if - // the key is not present. If a value is found, it will be placed in the - // result buffer, because we cannot allocate a buffer for it before - // we know its size. - fn anoma_vp_read_pre(key_ptr: u64, key_len: u64) -> i64; - - // Read variable-length posterior state when we don't know the size - // up-front, returns the size of the value (can be 0), or -1 if - // the key is not present. If a value is found, it will be placed in the - // result buffer, because we cannot allocate a buffer for it before - // we know its size. - fn anoma_vp_read_post(key_ptr: u64, key_len: u64) -> i64; - - // Read variable-length temporary state when we don't know the size - // up-front, returns the size of the value (can be 0), or -1 if - // the key is not present. If a value is found, it will be placed in the - // result buffer, because we cannot allocate a buffer for it before - // we know its size. - fn anoma_vp_read_temp(key_ptr: u64, key_len: u64) -> i64; - - // Read a value from result buffer. - fn anoma_vp_result_buffer(result_ptr: u64); - - // Returns 1 if the key is present in prior state, -1 otherwise. - fn anoma_vp_has_key_pre(key_ptr: u64, key_len: u64) -> i64; - - // Returns 1 if the key is present in posterior state, -1 otherwise. - fn anoma_vp_has_key_post(key_ptr: u64, key_len: u64) -> i64; - - // Get an ID of a data iterator with key prefix - fn anoma_vp_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64; - - // Read variable-length prior state when we don't know the size - // up-front, returns the size of the value (can be 0), or -1 if - // the key is not present. If a value is found, it will be placed in the - // result buffer, because we cannot allocate a buffer for it before - // we know its size. - fn anoma_vp_iter_pre_next(iter_id: u64) -> i64; - - // Read variable-length posterior state when we don't know the size - // up-front, returns the size of the value (can be 0), or -1 if the - // key is not present. If a value is found, it will be placed in the - // result buffer, because we cannot allocate a buffer for it before - // we know its size. - fn anoma_vp_iter_post_next(iter_id: u64) -> i64; - - // Get the chain ID - fn anoma_vp_get_chain_id(result_ptr: u64); - - // Get the current block height - fn anoma_vp_get_block_height() -> u64; - - // Get the current block hash - fn anoma_vp_get_block_hash(result_ptr: u64); - - // Get the current tx hash - fn anoma_vp_get_tx_code_hash(result_ptr: u64); - - // Get the current block epoch - fn anoma_vp_get_block_epoch() -> u64; - - // Verify a transaction signature - fn anoma_vp_verify_tx_signature( - pk_ptr: u64, - pk_len: u64, - sig_ptr: u64, - sig_len: u64, - ) -> i64; - - // Requires a node running with "Info" log level - fn anoma_vp_log_string(str_ptr: u64, str_len: u64); - - fn anoma_vp_eval( - vp_code_ptr: u64, - vp_code_len: u64, - input_data_ptr: u64, - input_data_len: u64, - ) -> i64; - } -} diff --git a/vm_env/src/intent.rs b/vm_env/src/intent.rs deleted file mode 100644 index 226cb708db..0000000000 --- a/vm_env/src/intent.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::collections::HashSet; - -use namada::proto::Signed; -use namada::types::intent; -use namada::types::key::*; - -/// Tx imports and functions. -pub mod tx { - pub use namada::types::intent::*; - - use super::*; - pub fn invalidate_exchange(intent: &Signed) { - use crate::imports::tx; - let key = intent::invalid_intent_key(&intent.data.addr); - let mut invalid_intent: HashSet = - tx::read(&key.to_string()).unwrap_or_default(); - invalid_intent.insert(intent.sig.clone()); - tx::write(&key.to_string(), &invalid_intent) - } -} - -/// Vp imports and functions. -pub mod vp { - pub use namada::types::intent::*; - - use super::*; - - pub fn vp_exchange(intent: &Signed) -> bool { - use crate::imports::vp; - let key = intent::invalid_intent_key(&intent.data.addr); - - let invalid_intent_pre: HashSet = - vp::read_pre(&key.to_string()).unwrap_or_default(); - let invalid_intent_post: HashSet = - vp::read_post(&key.to_string()).unwrap_or_default(); - !invalid_intent_pre.contains(&intent.sig) - && invalid_intent_post.contains(&intent.sig) - } -} diff --git a/vm_env/src/key/ed25519.rs b/vm_env/src/key/ed25519.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vm_env/src/key/mod.rs b/vm_env/src/key/mod.rs deleted file mode 100644 index 30aea96c46..0000000000 --- a/vm_env/src/key/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -use namada::types::address::Address; - -/// Vp imports and functions. -pub mod vp { - pub use namada::types::key::*; - - use super::*; - use crate::imports::vp; - - /// Get the public key associated with the given address. Panics if not - /// found. - pub fn get(owner: &Address) -> Option { - let key = pk_key(owner).to_string(); - vp::read_pre(&key) - } -} diff --git a/vm_env/src/lib.rs b/vm_env/src/lib.rs index 578079d695..1421dbde48 100644 --- a/vm_env/src/lib.rs +++ b/vm_env/src/lib.rs @@ -1,55 +1,241 @@ -//! This crate contains library code for wasm. Some of the code is re-exported -//! from the `shared` crate. +//! This crate contains the WASM VM low-level interface. #![doc(html_favicon_url = "https://dev.anoma.net/master/favicon.png")] #![doc(html_logo_url = "https://dev.anoma.net/master/rustdoc-logo.png")] #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] -pub mod governance; -pub mod ibc; -pub mod imports; -pub mod intent; -pub mod key; -pub mod nft; -pub mod proof_of_stake; -pub mod token; - -pub mod tx_prelude { - pub use namada::ledger::governance::storage; - pub use namada::ledger::parameters::storage as parameters_storage; - pub use namada::ledger::storage::types::encode; - pub use namada::ledger::treasury::storage as treasury_storage; - pub use namada::proto::{Signed, SignedTxData}; - pub use namada::types::address::Address; - pub use namada::types::storage::Key; - pub use namada::types::*; - pub use namada_macros::transaction; - - pub use crate::governance::tx as governance; - pub use crate::ibc::{Ibc, IbcActions}; - pub use crate::imports::tx::*; - pub use crate::intent::tx as intent; - pub use crate::nft::tx as nft; - pub use crate::proof_of_stake::{self, PoS, PosRead, PosWrite}; - pub use crate::token::tx as token; +use std::mem::ManuallyDrop; + +use borsh::BorshDeserialize; +use namada::types::internal::HostEnvResult; +use namada::vm::types::KeyVal; + +/// Transaction environment imports +pub mod tx { + // These host functions are implemented in the Anoma's [`host_env`] + // module. The environment provides calls to them via this C interface. + extern "C" { + // Read variable-length data when we don't know the size up-front, + // returns the size of the value (can be 0), or -1 if the key is + // not present. If a value is found, it will be placed in the read + // cache, because we cannot allocate a buffer for it before we know + // its size. + pub fn anoma_tx_read(key_ptr: u64, key_len: u64) -> i64; + + // Read a value from result buffer. + pub fn anoma_tx_result_buffer(result_ptr: u64); + + // Returns 1 if the key is present, -1 otherwise. + pub fn anoma_tx_has_key(key_ptr: u64, key_len: u64) -> i64; + + // Write key/value + pub fn anoma_tx_write( + key_ptr: u64, + key_len: u64, + val_ptr: u64, + val_len: u64, + ); + + // Write a temporary key/value + pub fn anoma_tx_write_temp( + key_ptr: u64, + key_len: u64, + val_ptr: u64, + val_len: u64, + ); + + // Delete the given key and its value + pub fn anoma_tx_delete(key_ptr: u64, key_len: u64); + + // Get an ID of a data iterator with key prefix, ordered by storage + // keys. + pub fn anoma_tx_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64; + + // Get an ID of a data iterator with key prefix, reverse ordered by + // storage keys. + pub fn anoma_tx_rev_iter_prefix( + prefix_ptr: u64, + prefix_len: u64, + ) -> u64; + + // Returns the size of the value (can be 0), or -1 if there's no next + // value. If a value is found, it will be placed in the read + // cache, because we cannot allocate a buffer for it before we know + // its size. + pub fn anoma_tx_iter_next(iter_id: u64) -> i64; + + // Insert a verifier + pub fn anoma_tx_insert_verifier(addr_ptr: u64, addr_len: u64); + + // Update a validity predicate + pub fn anoma_tx_update_validity_predicate( + addr_ptr: u64, + addr_len: u64, + code_ptr: u64, + code_len: u64, + ); + + // Initialize a new account + pub fn anoma_tx_init_account( + code_ptr: u64, + code_len: u64, + result_ptr: u64, + ); + + // Emit an IBC event + pub fn anoma_tx_emit_ibc_event(event_ptr: u64, event_len: u64); + + // Get the chain ID + pub fn anoma_tx_get_chain_id(result_ptr: u64); + + // Get the current block height + pub fn anoma_tx_get_block_height() -> u64; + + // Get the time of the current block header + pub fn anoma_tx_get_block_time() -> i64; + + // Get the current block hash + pub fn anoma_tx_get_block_hash(result_ptr: u64); + + // Get the current block epoch + pub fn anoma_tx_get_block_epoch() -> u64; + + // Requires a node running with "Info" log level + pub fn anoma_tx_log_string(str_ptr: u64, str_len: u64); + } +} + +/// Validity predicate environment imports +pub mod vp { + // These host functions are implemented in the Anoma's [`host_env`] + // module. The environment provides calls to them via this C interface. + extern "C" { + // Read variable-length prior state when we don't know the size + // up-front, returns the size of the value (can be 0), or -1 if + // the key is not present. If a value is found, it will be placed in the + // result buffer, because we cannot allocate a buffer for it before + // we know its size. + pub fn anoma_vp_read_pre(key_ptr: u64, key_len: u64) -> i64; + + // Read variable-length posterior state when we don't know the size + // up-front, returns the size of the value (can be 0), or -1 if + // the key is not present. If a value is found, it will be placed in the + // result buffer, because we cannot allocate a buffer for it before + // we know its size. + pub fn anoma_vp_read_post(key_ptr: u64, key_len: u64) -> i64; + + // Read variable-length temporary state when we don't know the size + // up-front, returns the size of the value (can be 0), or -1 if + // the key is not present. If a value is found, it will be placed in the + // result buffer, because we cannot allocate a buffer for it before + // we know its size. + pub fn anoma_vp_read_temp(key_ptr: u64, key_len: u64) -> i64; + + // Read a value from result buffer. + pub fn anoma_vp_result_buffer(result_ptr: u64); + + // Returns 1 if the key is present in prior state, -1 otherwise. + pub fn anoma_vp_has_key_pre(key_ptr: u64, key_len: u64) -> i64; + + // Returns 1 if the key is present in posterior state, -1 otherwise. + pub fn anoma_vp_has_key_post(key_ptr: u64, key_len: u64) -> i64; + + // Get an ID of a data iterator with key prefix, ordered by storage + // keys. + pub fn anoma_vp_iter_prefix(prefix_ptr: u64, prefix_len: u64) -> u64; + + // Get an ID of a data iterator with key prefix, reverse ordered by + // storage keys. + pub fn anoma_vp_rev_iter_prefix( + prefix_ptr: u64, + prefix_len: u64, + ) -> u64; + + // Read variable-length prior state when we don't know the size + // up-front, returns the size of the value (can be 0), or -1 if + // the key is not present. If a value is found, it will be placed in the + // result buffer, because we cannot allocate a buffer for it before + // we know its size. + pub fn anoma_vp_iter_pre_next(iter_id: u64) -> i64; + + // Read variable-length posterior state when we don't know the size + // up-front, returns the size of the value (can be 0), or -1 if the + // key is not present. If a value is found, it will be placed in the + // result buffer, because we cannot allocate a buffer for it before + // we know its size. + pub fn anoma_vp_iter_post_next(iter_id: u64) -> i64; + + // Get the chain ID + pub fn anoma_vp_get_chain_id(result_ptr: u64); + + // Get the current block height + pub fn anoma_vp_get_block_height() -> u64; + + // Get the current block hash + pub fn anoma_vp_get_block_hash(result_ptr: u64); + + // Get the current tx hash + pub fn anoma_vp_get_tx_code_hash(result_ptr: u64); + + // Get the current block epoch + pub fn anoma_vp_get_block_epoch() -> u64; + + // Verify a transaction signature + pub fn anoma_vp_verify_tx_signature( + pk_ptr: u64, + pk_len: u64, + sig_ptr: u64, + sig_len: u64, + ) -> i64; + + // Requires a node running with "Info" log level + pub fn anoma_vp_log_string(str_ptr: u64, str_len: u64); + + pub fn anoma_vp_eval( + vp_code_ptr: u64, + vp_code_len: u64, + input_data_ptr: u64, + input_data_len: u64, + ) -> i64; + } +} + +/// This function is a helper to handle the second step of reading var-len +/// values from the host. +/// +/// In cases where we're reading a value from the host in the guest and +/// we don't know the byte size up-front, we have to read it in 2-steps. The +/// first step reads the value into a result buffer and returns the size (if +/// any) back to the guest, the second step reads the value from cache into a +/// pre-allocated buffer with the obtained size. +pub fn read_from_buffer( + read_result: i64, + result_buffer: unsafe extern "C" fn(u64), +) -> Option> { + if HostEnvResult::is_fail(read_result) { + None + } else { + let result: Vec = Vec::with_capacity(read_result as _); + // The `result` will be dropped from the `target`, which is + // reconstructed from the same memory + let result = ManuallyDrop::new(result); + let offset = result.as_slice().as_ptr() as u64; + unsafe { result_buffer(offset) }; + let target = unsafe { + Vec::from_raw_parts(offset as _, read_result as _, read_result as _) + }; + Some(target) + } } -pub mod vp_prelude { - // used in the VP input - pub use std::collections::{BTreeSet, HashSet}; - - pub use namada::ledger::governance::storage as gov_storage; - pub use namada::ledger::{parameters, pos as proof_of_stake}; - pub use namada::proto::{Signed, SignedTxData}; - pub use namada::types::address::Address; - pub use namada::types::storage::Key; - pub use namada::types::*; - pub use namada_macros::validity_predicate; - - pub use crate::imports::vp::*; - pub use crate::intent::vp as intent; - pub use crate::key::vp as key; - pub use crate::nft::vp as nft; - pub use crate::token::vp as token; +/// This function is a helper to handle the second step of reading var-len +/// values in a key-value pair from the host. +pub fn read_key_val_bytes_from_buffer( + read_result: i64, + result_buffer: unsafe extern "C" fn(u64), +) -> Option<(String, Vec)> { + let key_val = read_from_buffer(read_result, result_buffer) + .and_then(|t| KeyVal::try_from_slice(&t[..]).ok()); + key_val.map(|key_val| (key_val.key, key_val.val)) } diff --git a/vm_env/src/nft.rs b/vm_env/src/nft.rs deleted file mode 100644 index 4a685acd72..0000000000 --- a/vm_env/src/nft.rs +++ /dev/null @@ -1,194 +0,0 @@ -use namada::types::nft; - -/// Tx imports and functions. -pub mod tx { - use namada::types::address::Address; - use namada::types::nft::NftToken; - use namada::types::transaction::nft::{CreateNft, MintNft}; - - use super::*; - use crate::imports::tx; - pub fn init_nft(nft: CreateNft) -> Address { - let address = tx::init_account(&nft.vp_code); - - // write tag - let tag_key = nft::get_tag_key(&address); - tx::write(&tag_key.to_string(), &nft.tag); - - // write creator - let creator_key = nft::get_creator_key(&address); - tx::write(&creator_key.to_string(), &nft.creator); - - // write keys - let keys_key = nft::get_keys_key(&address); - tx::write(&keys_key.to_string(), &nft.keys); - - // write optional keys - let optional_keys_key = nft::get_optional_keys_key(&address); - tx::write(&optional_keys_key.to_string(), nft.opt_keys); - - // mint tokens - aux_mint_token(&address, &nft.creator, nft.tokens, &nft.creator); - - tx::insert_verifier(&nft.creator); - - address - } - - pub fn mint_tokens(nft: MintNft) { - aux_mint_token(&nft.address, &nft.creator, nft.tokens, &nft.creator); - } - - fn aux_mint_token( - nft_address: &Address, - creator_address: &Address, - tokens: Vec, - verifier: &Address, - ) { - for token in tokens { - // write token metadata - let metadata_key = - nft::get_token_metadata_key(nft_address, &token.id.to_string()); - tx::write(&metadata_key.to_string(), &token.metadata); - - // write current owner token as creator - let current_owner_key = nft::get_token_current_owner_key( - nft_address, - &token.id.to_string(), - ); - tx::write( - ¤t_owner_key.to_string(), - &token - .current_owner - .unwrap_or_else(|| creator_address.clone()), - ); - - // write value key - let value_key = - nft::get_token_value_key(nft_address, &token.id.to_string()); - tx::write(&value_key.to_string(), &token.values); - - // write optional value keys - let optional_value_key = nft::get_token_optional_value_key( - nft_address, - &token.id.to_string(), - ); - tx::write(&optional_value_key.to_string(), &token.opt_values); - - // write approval addresses - let approval_key = - nft::get_token_approval_key(nft_address, &token.id.to_string()); - tx::write(&approval_key.to_string(), &token.approvals); - - // write burnt propriety - let burnt_key = - nft::get_token_burnt_key(nft_address, &token.id.to_string()); - tx::write(&burnt_key.to_string(), token.burnt); - } - tx::insert_verifier(verifier); - } -} - -/// A Nft validity predicate -pub mod vp { - use std::collections::BTreeSet; - - use namada::types::address::Address; - pub use namada::types::nft::*; - use namada::types::storage::Key; - - use crate::imports::vp; - - enum KeyType { - Metadata(Address, String), - Approval(Address, String), - CurrentOwner(Address, String), - Creator(Address), - PastOwners(Address, String), - Unknown, - } - - pub fn vp( - _tx_da_ta: Vec, - nft_address: &Address, - keys_changed: &BTreeSet, - verifiers: &BTreeSet
, - ) -> bool { - keys_changed - .iter() - .all(|key| match get_key_type(key, nft_address) { - KeyType::Creator(_creator_addr) => { - vp::log_string("creator cannot be changed."); - false - } - KeyType::Approval(nft_address, token_id) => { - vp::log_string(format!( - "nft vp, checking approvals with token id: {}", - token_id - )); - - is_creator(&nft_address, verifiers) - || is_approved( - &nft_address, - token_id.as_ref(), - verifiers, - ) - } - KeyType::Metadata(nft_address, token_id) => { - vp::log_string(format!( - "nft vp, checking if metadata changed: {}", - token_id - )); - is_creator(&nft_address, verifiers) - } - _ => is_creator(nft_address, verifiers), - }) - } - - fn is_approved( - nft_address: &Address, - nft_token_id: &str, - verifiers: &BTreeSet
, - ) -> bool { - let approvals_key = - get_token_approval_key(nft_address, nft_token_id).to_string(); - let approval_addresses: Vec
= - vp::read_pre(approvals_key).unwrap_or_default(); - return approval_addresses - .iter() - .any(|addr| verifiers.contains(addr)); - } - - fn is_creator( - nft_address: &Address, - verifiers: &BTreeSet
, - ) -> bool { - let creator_key = get_creator_key(nft_address).to_string(); - let creator_address: Address = vp::read_pre(creator_key).unwrap(); - verifiers.contains(&creator_address) - } - - fn get_key_type(key: &Key, nft_address: &Address) -> KeyType { - let is_creator_key = is_nft_creator_key(key, nft_address); - let is_metadata_key = is_nft_metadata_key(key, nft_address); - let is_approval_key = is_nft_approval_key(key, nft_address); - let is_current_owner_key = is_nft_current_owner_key(key, nft_address); - let is_past_owner_key = is_nft_past_owners_key(key, nft_address); - if let Some(nft_address) = is_creator_key { - return KeyType::Creator(nft_address); - } - if let Some((nft_address, token_id)) = is_metadata_key { - return KeyType::Metadata(nft_address, token_id); - } - if let Some((nft_address, token_id)) = is_approval_key { - return KeyType::Approval(nft_address, token_id); - } - if let Some((nft_address, token_id)) = is_current_owner_key { - return KeyType::CurrentOwner(nft_address, token_id); - } - if let Some((nft_address, token_id)) = is_past_owner_key { - return KeyType::PastOwners(nft_address, token_id); - } - KeyType::Unknown - } -} diff --git a/vm_env/src/proof_of_stake.rs b/vm_env/src/proof_of_stake.rs deleted file mode 100644 index 8e4bba4223..0000000000 --- a/vm_env/src/proof_of_stake.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Proof of Stake system integration with functions for transactions - -use namada::ledger::pos::namada_proof_of_stake::{ - BecomeValidatorError, BondError, UnbondError, WithdrawError, -}; -use namada::ledger::pos::types::Slash; -pub use namada::ledger::pos::*; -use namada::ledger::pos::{ - bond_key, namada_proof_of_stake, params_key, total_voting_power_key, - unbond_key, validator_address_raw_hash_key, validator_consensus_key_key, - validator_set_key, validator_slashes_key, - validator_staking_reward_address_key, validator_state_key, - validator_total_deltas_key, validator_voting_power_key, -}; -use namada::types::address::{self, Address, InternalAddress}; -use namada::types::transaction::InitValidator; -use namada::types::{key, token}; -pub use namada_proof_of_stake::{ - epoched, parameters, types, PosActions as PosWrite, PosReadOnly as PosRead, -}; - -use crate::imports::tx; - -/// Self-bond tokens to a validator when `source` is `None` or equal to -/// the `validator` address, or delegate tokens from the `source` to the -/// `validator`. -pub fn bond_tokens( - source: Option<&Address>, - validator: &Address, - amount: token::Amount, -) -> Result<(), BondError
> { - let current_epoch = tx::get_block_epoch(); - PoS.bond_tokens(source, validator, amount, current_epoch) -} - -/// Unbond self-bonded tokens from a validator when `source` is `None` or -/// equal to the `validator` address, or unbond delegated tokens from -/// the `source` to the `validator`. -pub fn unbond_tokens( - source: Option<&Address>, - validator: &Address, - amount: token::Amount, -) -> Result<(), UnbondError> { - let current_epoch = tx::get_block_epoch(); - PoS.unbond_tokens(source, validator, amount, current_epoch) -} - -/// Withdraw unbonded tokens from a self-bond to a validator when `source` -/// is `None` or equal to the `validator` address, or withdraw unbonded -/// tokens delegated to the `validator` to the `source`. -pub fn withdraw_tokens( - source: Option<&Address>, - validator: &Address, -) -> Result> { - let current_epoch = tx::get_block_epoch(); - PoS.withdraw_tokens(source, validator, current_epoch) -} - -/// Attempt to initialize a validator account. On success, returns the -/// initialized validator account's address and its staking reward address. -pub fn init_validator( - InitValidator { - account_key, - consensus_key, - rewards_account_key, - protocol_key, - dkg_key, - validator_vp_code, - rewards_vp_code, - }: InitValidator, -) -> Result<(Address, Address), BecomeValidatorError
> { - let current_epoch = tx::get_block_epoch(); - // Init validator account - let validator_address = tx::init_account(&validator_vp_code); - let pk_key = key::pk_key(&validator_address); - tx::write(&pk_key.to_string(), &account_key); - let protocol_pk_key = key::protocol_pk_key(&validator_address); - tx::write(&protocol_pk_key.to_string(), &protocol_key); - let dkg_pk_key = key::dkg_session_keys::dkg_pk_key(&validator_address); - tx::write(&dkg_pk_key.to_string(), &dkg_key); - - // Init staking reward account - let rewards_address = tx::init_account(&rewards_vp_code); - let pk_key = key::pk_key(&rewards_address); - tx::write(&pk_key.to_string(), &rewards_account_key); - - PoS.become_validator( - &validator_address, - &rewards_address, - &consensus_key, - current_epoch, - )?; - Ok((validator_address, rewards_address)) -} - -/// Proof of Stake system. This struct integrates and gives access to -/// lower-level PoS functions. -pub struct PoS; - -impl namada_proof_of_stake::PosReadOnly for PoS { - type Address = Address; - type PublicKey = key::common::PublicKey; - type TokenAmount = token::Amount; - type TokenChange = token::Change; - - const POS_ADDRESS: Self::Address = Address::Internal(InternalAddress::PoS); - - fn staking_token_address() -> Self::Address { - address::xan() - } - - fn read_pos_params(&self) -> PosParams { - tx::read(params_key().to_string()).unwrap() - } - - fn read_validator_staking_reward_address( - &self, - key: &Self::Address, - ) -> Option { - tx::read(validator_staking_reward_address_key(key).to_string()) - } - - fn read_validator_consensus_key( - &self, - key: &Self::Address, - ) -> Option { - tx::read(validator_consensus_key_key(key).to_string()) - } - - fn read_validator_state( - &self, - key: &Self::Address, - ) -> Option { - tx::read(validator_state_key(key).to_string()) - } - - fn read_validator_total_deltas( - &self, - key: &Self::Address, - ) -> Option { - tx::read(validator_total_deltas_key(key).to_string()) - } - - fn read_validator_voting_power( - &self, - key: &Self::Address, - ) -> Option { - tx::read(validator_voting_power_key(key).to_string()) - } - - fn read_validator_slashes(&self, key: &Self::Address) -> Vec { - tx::read(validator_slashes_key(key).to_string()).unwrap_or_default() - } - - fn read_bond(&self, key: &BondId) -> Option { - tx::read(bond_key(key).to_string()) - } - - fn read_unbond(&self, key: &BondId) -> Option { - tx::read(unbond_key(key).to_string()) - } - - fn read_validator_set(&self) -> ValidatorSets { - tx::read(validator_set_key().to_string()).unwrap() - } - - fn read_total_voting_power(&self) -> TotalVotingPowers { - tx::read(total_voting_power_key().to_string()).unwrap() - } -} - -impl namada_proof_of_stake::PosActions for PoS { - fn write_pos_params(&mut self, params: &PosParams) { - tx::write(params_key().to_string(), params) - } - - fn write_validator_address_raw_hash(&mut self, address: &Self::Address) { - let raw_hash = address.raw_hash().unwrap().to_owned(); - tx::write( - validator_address_raw_hash_key(raw_hash).to_string(), - address, - ) - } - - fn write_validator_staking_reward_address( - &mut self, - key: &Self::Address, - value: Self::Address, - ) { - tx::write( - validator_staking_reward_address_key(key).to_string(), - &value, - ) - } - - fn write_validator_consensus_key( - &mut self, - key: &Self::Address, - value: ValidatorConsensusKeys, - ) { - tx::write(validator_consensus_key_key(key).to_string(), &value) - } - - fn write_validator_state( - &mut self, - key: &Self::Address, - value: ValidatorStates, - ) { - tx::write(validator_state_key(key).to_string(), &value) - } - - fn write_validator_total_deltas( - &mut self, - key: &Self::Address, - value: ValidatorTotalDeltas, - ) { - tx::write(validator_total_deltas_key(key).to_string(), &value) - } - - fn write_validator_voting_power( - &mut self, - key: &Self::Address, - value: ValidatorVotingPowers, - ) { - tx::write(validator_voting_power_key(key).to_string(), &value) - } - - fn write_bond(&mut self, key: &BondId, value: Bonds) { - tx::write(bond_key(key).to_string(), &value) - } - - fn write_unbond(&mut self, key: &BondId, value: Unbonds) { - tx::write(unbond_key(key).to_string(), &value) - } - - fn write_validator_set(&mut self, value: ValidatorSets) { - tx::write(validator_set_key().to_string(), &value) - } - - fn write_total_voting_power(&mut self, value: TotalVotingPowers) { - tx::write(total_voting_power_key().to_string(), &value) - } - - fn delete_bond(&mut self, key: &BondId) { - tx::delete(bond_key(key).to_string()) - } - - fn delete_unbond(&mut self, key: &BondId) { - tx::delete(unbond_key(key).to_string()) - } - - fn transfer( - &mut self, - token: &Self::Address, - amount: Self::TokenAmount, - src: &Self::Address, - dest: &Self::Address, - ) { - crate::token::tx::transfer(src, dest, token, amount) - } -} diff --git a/vm_env/src/token.rs b/vm_env/src/token.rs deleted file mode 100644 index 8a7367afb9..0000000000 --- a/vm_env/src/token.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::collections::BTreeSet; - -use namada::types::address::{Address, InternalAddress}; -use namada::types::storage::Key; -use namada::types::token; - -/// Vp imports and functions. -pub mod vp { - use namada::types::storage::KeySeg; - pub use namada::types::token::*; - - use super::*; - use crate::imports::vp; - - /// A token validity predicate. - pub fn vp( - token: &Address, - keys_changed: &BTreeSet, - verifiers: &BTreeSet
, - ) -> bool { - let mut change: Change = 0; - let all_checked = keys_changed.iter().all(|key| { - match token::is_balance_key(token, key) { - None => { - // Unknown changes to this address space are disallowed, but - // unknown changes anywhere else are permitted - key.segments.get(0) != Some(&token.to_db_key()) - } - Some(owner) => { - // accumulate the change - let key = key.to_string(); - let pre: Amount = match owner { - Address::Internal(InternalAddress::IbcMint) => { - Amount::max() - } - Address::Internal(InternalAddress::IbcBurn) => { - Amount::default() - } - _ => vp::read_pre(&key).unwrap_or_default(), - }; - let post: Amount = match owner { - Address::Internal(InternalAddress::IbcMint) => { - vp::read_temp(&key).unwrap_or_else(Amount::max) - } - Address::Internal(InternalAddress::IbcBurn) => { - vp::read_temp(&key).unwrap_or_default() - } - _ => vp::read_post(&key).unwrap_or_default(), - }; - let this_change = post.change() - pre.change(); - change += this_change; - // make sure that the spender approved the transaction - if this_change < 0 { - return verifiers.contains(owner); - } - true - } - } - }); - all_checked && change == 0 - } -} - -/// Tx imports and functions. -pub mod tx { - pub use namada::types::token::*; - - use super::*; - use crate::imports::tx; - - /// A token transfer that can be used in a transaction. - pub fn transfer( - src: &Address, - dest: &Address, - token: &Address, - amount: Amount, - ) { - let src_key = token::balance_key(token, src); - let dest_key = token::balance_key(token, dest); - let src_bal: Option = tx::read(&src_key.to_string()); - let mut src_bal = src_bal.unwrap_or_else(|| match src { - Address::Internal(InternalAddress::IbcMint) => Amount::max(), - _ => { - tx::log_string(format!("src {} has no balance", src)); - unreachable!() - } - }); - src_bal.spend(&amount); - let mut dest_bal: Amount = - tx::read(&dest_key.to_string()).unwrap_or_default(); - dest_bal.receive(&amount); - match src { - Address::Internal(InternalAddress::IbcMint) => { - tx::write_temp(&src_key.to_string(), src_bal) - } - Address::Internal(InternalAddress::IbcBurn) => { - tx::log_string("invalid transfer from the burn address"); - unreachable!() - } - _ => tx::write(&src_key.to_string(), src_bal), - } - match dest { - Address::Internal(InternalAddress::IbcMint) => { - tx::log_string("invalid transfer to the mint address"); - unreachable!() - } - Address::Internal(InternalAddress::IbcBurn) => { - tx::write_temp(&dest_key.to_string(), dest_bal) - } - _ => tx::write(&dest_key.to_string(), dest_bal), - } - } -} diff --git a/vp_prelude/Cargo.toml b/vp_prelude/Cargo.toml index d915826e49..00273c6622 100644 --- a/vp_prelude/Cargo.toml +++ b/vp_prelude/Cargo.toml @@ -10,5 +10,9 @@ version = "0.7.1" default = [] [dependencies] +namada = {path = "../shared"} namada_vm_env = {path = "../vm_env"} +namada_macros = {path = "../macros"} +borsh = "0.9.0" sha2 = "0.10.1" +thiserror = "1.0.30" diff --git a/vp_prelude/src/intent.rs b/vp_prelude/src/intent.rs new file mode 100644 index 0000000000..93a93e1183 --- /dev/null +++ b/vp_prelude/src/intent.rs @@ -0,0 +1,19 @@ +use std::collections::HashSet; + +use namada::proto::Signed; +use namada::types::intent; +pub use namada::types::intent::*; +use namada::types::key::*; + +use super::*; + +pub fn vp_exchange(ctx: &Ctx, intent: &Signed) -> EnvResult { + let key = intent::invalid_intent_key(&intent.data.addr); + + let invalid_intent_pre: HashSet = + ctx.read_pre(&key)?.unwrap_or_default(); + let invalid_intent_post: HashSet = + ctx.read_post(&key)?.unwrap_or_default(); + Ok(!invalid_intent_pre.contains(&intent.sig) + && invalid_intent_post.contains(&intent.sig)) +} diff --git a/vp_prelude/src/key.rs b/vp_prelude/src/key.rs new file mode 100644 index 0000000000..5ef2a5e28c --- /dev/null +++ b/vp_prelude/src/key.rs @@ -0,0 +1,13 @@ +//! Cryptographic signature keys + +use namada::types::address::Address; +pub use namada::types::key::*; + +use super::*; + +/// Get the public key associated with the given address. Panics if not +/// found. +pub fn get(ctx: &Ctx, owner: &Address) -> EnvResult> { + let key = pk_key(owner); + ctx.read_pre(&key) +} diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index 957354d848..fa14a25989 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -6,10 +6,39 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] +pub mod intent; +pub mod key; +pub mod nft; +pub mod token; + +// used in the VP input use core::convert::AsRef; +use core::slice; +pub use std::collections::{BTreeSet, HashSet}; +use std::convert::TryFrom; +use std::marker::PhantomData; -use namada_vm_env::vp_prelude::hash::Hash; -pub use namada_vm_env::vp_prelude::*; +pub use borsh::{BorshDeserialize, BorshSerialize}; +pub use namada::ledger::governance::storage as gov_storage; +pub use namada::ledger::storage_api::{ + self, iter_prefix, iter_prefix_bytes, rev_iter_prefix, + rev_iter_prefix_bytes, Error, OptionExt, ResultExt, StorageRead, +}; +pub use namada::ledger::vp_env::VpEnv; +pub use namada::ledger::{parameters, pos as proof_of_stake}; +pub use namada::proto::{Signed, SignedTxData}; +pub use namada::types::address::Address; +use namada::types::chain::CHAIN_ID_LENGTH; +use namada::types::hash::{Hash, HASH_LENGTH}; +use namada::types::internal::HostEnvResult; +use namada::types::key::*; +use namada::types::storage::{ + BlockHash, BlockHeight, Epoch, BLOCK_HASH_LENGTH, +}; +pub use namada::types::*; +pub use namada_macros::validity_predicate; +use namada_vm_env::vp::*; +use namada_vm_env::{read_from_buffer, read_key_val_bytes_from_buffer}; pub use sha2::{Digest, Sha256, Sha384, Sha512}; pub fn sha256(bytes: &[u8]) -> Hash { @@ -17,20 +46,28 @@ pub fn sha256(bytes: &[u8]) -> Hash { Hash(*digest.as_ref()) } -pub fn is_tx_whitelisted() -> bool { - let tx_hash = get_tx_code_hash(); +pub fn is_tx_whitelisted(ctx: &Ctx) -> VpResult { + let tx_hash = ctx.get_tx_code_hash()?; let key = parameters::storage::get_tx_whitelist_storage_key(); - let whitelist: Vec = read_pre(&key.to_string()).unwrap_or_default(); + let whitelist: Vec = ctx.read_pre(&key)?.unwrap_or_default(); // if whitelist is empty, allow any transaction - whitelist.is_empty() || whitelist.contains(&tx_hash.to_string()) + Ok(whitelist.is_empty() || whitelist.contains(&tx_hash.to_string())) } -pub fn is_vp_whitelisted(vp_bytes: &[u8]) -> bool { +pub fn is_vp_whitelisted(ctx: &Ctx, vp_bytes: &[u8]) -> VpResult { let vp_hash = sha256(vp_bytes); let key = parameters::storage::get_vp_whitelist_storage_key(); - let whitelist: Vec = read_pre(&key.to_string()).unwrap_or_default(); + let whitelist: Vec = ctx.read_pre(&key)?.unwrap_or_default(); // if whitelist is empty, allow any transaction - whitelist.is_empty() || whitelist.contains(&vp_hash.to_string()) + Ok(whitelist.is_empty() || whitelist.contains(&vp_hash.to_string())) +} + +/// Log a string. The message will be printed at the `tracing::Level::Info`. +pub fn log_string>(msg: T) { + let msg = msg.as_ref(); + unsafe { + anoma_vp_log_string(msg.as_ptr() as _, msg.len() as _); + } } /// Log a string in a debug build. The message will be printed at the @@ -44,3 +81,363 @@ macro_rules! debug_log { (if cfg!(debug_assertions) { log_string(format!($($arg)*)) }) }} } + +#[derive(Debug)] +pub struct Ctx(()); + +impl Ctx { + /// Create a host context. The context on WASM side is only provided by + /// the VM once its being executed (in here it's implicit). But + /// because we want to have interface identical with the native + /// VPs, in which the context is explicit, in here we're just + /// using an empty `Ctx` to "fake" it. + /// + /// # Safety + /// + /// When using `#[validity_predicate]` macro from `anoma_macros`, + /// the constructor should not be called from transactions and validity + /// predicates implementation directly - they receive `&Self` as + /// an argument provided by the macro that wrap the low-level WASM + /// interface with Rust native types. + /// + /// Otherwise, this should only be called once to initialize this "fake" + /// context in order to benefit from type-safety of the host environment + /// methods implemented on the context. + #[allow(clippy::new_without_default)] + pub const unsafe fn new() -> Self { + Self(()) + } + + /// Read access to the prior storage (state before tx execution) + /// via [`trait@StorageRead`]. + pub fn pre(&self) -> CtxPreStorageRead<'_> { + CtxPreStorageRead { _ctx: self } + } + + /// Read access to the posterior storage (state after tx execution) + /// via [`trait@StorageRead`]. + pub fn post(&self) -> CtxPostStorageRead<'_> { + CtxPostStorageRead { _ctx: self } + } +} + +/// Read access to the prior storage (state before tx execution) via +/// [`trait@StorageRead`]. +#[derive(Debug)] +pub struct CtxPreStorageRead<'a> { + _ctx: &'a Ctx, +} + +/// Read access to the posterior storage (state after tx execution) via +/// [`trait@StorageRead`]. +#[derive(Debug)] +pub struct CtxPostStorageRead<'a> { + _ctx: &'a Ctx, +} + +/// Result of `VpEnv` or `storage_api::StorageRead` method call +pub type EnvResult = Result; + +/// Validity predicate result +pub type VpResult = EnvResult; + +/// Accept a transaction +pub fn accept() -> VpResult { + Ok(true) +} + +/// Reject a transaction +pub fn reject() -> VpResult { + Ok(false) +} + +#[derive(Debug)] +pub struct KeyValIterator(pub u64, pub PhantomData); + +impl<'view> VpEnv<'view> for Ctx { + type Post = CtxPostStorageRead<'view>; + type Pre = CtxPreStorageRead<'view>; + type PrefixIter = KeyValIterator<(String, Vec)>; + + fn pre(&'view self) -> Self::Pre { + CtxPreStorageRead { _ctx: self } + } + + fn post(&'view self) -> Self::Post { + CtxPostStorageRead { _ctx: self } + } + + fn read_temp( + &self, + key: &storage::Key, + ) -> Result, Error> { + let key = key.to_string(); + let read_result = + unsafe { anoma_vp_read_temp(key.as_ptr() as _, key.len() as _) }; + Ok(read_from_buffer(read_result, anoma_vp_result_buffer) + .and_then(|t| T::try_from_slice(&t[..]).ok())) + } + + fn read_bytes_temp( + &self, + key: &storage::Key, + ) -> Result>, Error> { + let key = key.to_string(); + let read_result = + unsafe { anoma_vp_read_temp(key.as_ptr() as _, key.len() as _) }; + Ok(read_from_buffer(read_result, anoma_vp_result_buffer)) + } + + fn get_chain_id(&'view self) -> Result { + // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl + get_chain_id() + } + + fn get_block_height(&'view self) -> Result { + // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl + get_block_height() + } + + fn get_block_hash(&'view self) -> Result { + // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl + get_block_hash() + } + + fn get_block_epoch(&'view self) -> Result { + // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl + get_block_epoch() + } + + fn iter_prefix( + &self, + prefix: &storage::Key, + ) -> Result { + // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl + self.pre().iter_prefix(prefix) + } + + fn rev_iter_prefix( + &self, + prefix: &storage::Key, + ) -> Result { + // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl + self.pre().rev_iter_prefix(prefix) + } + + fn eval( + &self, + vp_code: Vec, + input_data: Vec, + ) -> Result { + let result = unsafe { + anoma_vp_eval( + vp_code.as_ptr() as _, + vp_code.len() as _, + input_data.as_ptr() as _, + input_data.len() as _, + ) + }; + Ok(HostEnvResult::is_success(result)) + } + + fn verify_tx_signature( + &self, + pk: &common::PublicKey, + sig: &common::Signature, + ) -> Result { + let pk = BorshSerialize::try_to_vec(pk).unwrap(); + let sig = BorshSerialize::try_to_vec(sig).unwrap(); + let valid = unsafe { + anoma_vp_verify_tx_signature( + pk.as_ptr() as _, + pk.len() as _, + sig.as_ptr() as _, + sig.len() as _, + ) + }; + Ok(HostEnvResult::is_success(valid)) + } + + fn get_tx_code_hash(&self) -> Result { + let result = Vec::with_capacity(HASH_LENGTH); + unsafe { + anoma_vp_get_tx_code_hash(result.as_ptr() as _); + } + let slice = + unsafe { slice::from_raw_parts(result.as_ptr(), HASH_LENGTH) }; + Ok(Hash::try_from(slice).expect("Cannot convert the hash")) + } +} + +impl StorageRead<'_> for CtxPreStorageRead<'_> { + type PrefixIter = KeyValIterator<(String, Vec)>; + + fn read_bytes(&self, key: &storage::Key) -> Result>, Error> { + let key = key.to_string(); + let read_result = + unsafe { anoma_vp_read_pre(key.as_ptr() as _, key.len() as _) }; + Ok(read_from_buffer(read_result, anoma_vp_result_buffer)) + } + + fn has_key(&self, key: &storage::Key) -> Result { + let key = key.to_string(); + let found = + unsafe { anoma_vp_has_key_pre(key.as_ptr() as _, key.len() as _) }; + Ok(HostEnvResult::is_success(found)) + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> Result)>, Error> { + let read_result = unsafe { anoma_vp_iter_pre_next(iter.0) }; + Ok(read_key_val_bytes_from_buffer( + read_result, + anoma_vp_result_buffer, + )) + } + + // ---- Methods below share the same implementation in `pre/post` ---- + + fn iter_prefix( + &self, + prefix: &storage::Key, + ) -> Result { + iter_prefix_impl(prefix) + } + + fn rev_iter_prefix( + &self, + prefix: &storage::Key, + ) -> Result { + rev_iter_prefix_impl(prefix) + } + + fn get_chain_id(&self) -> Result { + get_chain_id() + } + + fn get_block_height(&self) -> Result { + get_block_height() + } + + fn get_block_hash(&self) -> Result { + get_block_hash() + } + + fn get_block_epoch(&self) -> Result { + get_block_epoch() + } +} + +impl StorageRead<'_> for CtxPostStorageRead<'_> { + type PrefixIter = KeyValIterator<(String, Vec)>; + + fn read_bytes(&self, key: &storage::Key) -> Result>, Error> { + let key = key.to_string(); + let read_result = + unsafe { anoma_vp_read_post(key.as_ptr() as _, key.len() as _) }; + Ok(read_from_buffer(read_result, anoma_vp_result_buffer)) + } + + fn has_key(&self, key: &storage::Key) -> Result { + let key = key.to_string(); + let found = + unsafe { anoma_vp_has_key_post(key.as_ptr() as _, key.len() as _) }; + Ok(HostEnvResult::is_success(found)) + } + + fn iter_next( + &self, + iter: &mut Self::PrefixIter, + ) -> Result)>, Error> { + let read_result = unsafe { anoma_vp_iter_post_next(iter.0) }; + Ok(read_key_val_bytes_from_buffer( + read_result, + anoma_vp_result_buffer, + )) + } + + // ---- Methods below share the same implementation in `pre/post` ---- + + fn iter_prefix( + &self, + prefix: &storage::Key, + ) -> Result { + iter_prefix_impl(prefix) + } + + fn rev_iter_prefix( + &self, + prefix: &storage::Key, + ) -> storage_api::Result { + rev_iter_prefix_impl(prefix) + } + + fn get_chain_id(&self) -> Result { + get_chain_id() + } + + fn get_block_height(&self) -> Result { + get_block_height() + } + + fn get_block_hash(&self) -> Result { + get_block_hash() + } + + fn get_block_epoch(&self) -> Result { + get_block_epoch() + } +} + +fn iter_prefix_impl( + prefix: &storage::Key, +) -> Result)>, Error> { + let prefix = prefix.to_string(); + let iter_id = unsafe { + anoma_vp_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) + }; + Ok(KeyValIterator(iter_id, PhantomData)) +} + +fn rev_iter_prefix_impl( + prefix: &storage::Key, +) -> Result)>, Error> { + let prefix = prefix.to_string(); + let iter_id = unsafe { + anoma_vp_rev_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) + }; + Ok(KeyValIterator(iter_id, PhantomData)) +} + +fn get_chain_id() -> Result { + let result = Vec::with_capacity(CHAIN_ID_LENGTH); + unsafe { + anoma_vp_get_chain_id(result.as_ptr() as _); + } + let slice = + unsafe { slice::from_raw_parts(result.as_ptr(), CHAIN_ID_LENGTH) }; + Ok( + String::from_utf8(slice.to_vec()) + .expect("Cannot convert the ID string"), + ) +} + +fn get_block_height() -> Result { + Ok(BlockHeight(unsafe { anoma_vp_get_block_height() })) +} + +fn get_block_hash() -> Result { + let result = Vec::with_capacity(BLOCK_HASH_LENGTH); + unsafe { + anoma_vp_get_block_hash(result.as_ptr() as _); + } + let slice = + unsafe { slice::from_raw_parts(result.as_ptr(), BLOCK_HASH_LENGTH) }; + Ok(BlockHash::try_from(slice).expect("Cannot convert the hash")) +} + +fn get_block_epoch() -> Result { + Ok(Epoch(unsafe { anoma_vp_get_block_epoch() })) +} diff --git a/vp_prelude/src/nft.rs b/vp_prelude/src/nft.rs new file mode 100644 index 0000000000..1d5d019169 --- /dev/null +++ b/vp_prelude/src/nft.rs @@ -0,0 +1,116 @@ +//! NFT validity predicate + +use std::collections::BTreeSet; + +use namada::ledger::native_vp::VpEnv; +use namada::types::address::Address; +pub use namada::types::nft::*; +use namada::types::storage::Key; + +use super::{accept, reject, Ctx, EnvResult, VpResult}; + +enum KeyType { + Metadata(Address, String), + Approval(Address, String), + CurrentOwner(Address, String), + Creator(Address), + PastOwners(Address, String), + Unknown, +} + +pub fn vp( + ctx: &Ctx, + _tx_da_ta: Vec, + nft_address: &Address, + keys_changed: &BTreeSet, + verifiers: &BTreeSet
, +) -> VpResult { + for key in keys_changed { + match get_key_type(key, nft_address) { + KeyType::Creator(_creator_addr) => { + super::log_string("creator cannot be changed."); + return reject(); + } + KeyType::Approval(nft_address, token_id) => { + super::log_string(format!( + "nft vp, checking approvals with token id: {}", + token_id + )); + + if !(is_creator(ctx, &nft_address, verifiers)? + || is_approved( + ctx, + &nft_address, + token_id.as_ref(), + verifiers, + )?) + { + return reject(); + } + } + KeyType::Metadata(nft_address, token_id) => { + super::log_string(format!( + "nft vp, checking if metadata changed: {}", + token_id + )); + if !is_creator(ctx, &nft_address, verifiers)? { + return reject(); + } + } + _ => { + if !is_creator(ctx, nft_address, verifiers)? { + return reject(); + } + } + } + } + accept() +} + +fn is_approved( + ctx: &Ctx, + nft_address: &Address, + nft_token_id: &str, + verifiers: &BTreeSet
, +) -> EnvResult { + let approvals_key = get_token_approval_key(nft_address, nft_token_id); + let approval_addresses: Vec
= + ctx.read_pre(&approvals_key)?.unwrap_or_default(); + return Ok(approval_addresses + .iter() + .any(|addr| verifiers.contains(addr))); +} + +fn is_creator( + ctx: &Ctx, + nft_address: &Address, + verifiers: &BTreeSet
, +) -> EnvResult { + let creator_key = get_creator_key(nft_address); + let creator_address: Address = ctx.read_pre(&creator_key)?.unwrap(); + Ok(verifiers.contains(&creator_address)) +} + +fn get_key_type(key: &Key, nft_address: &Address) -> KeyType { + let is_creator_key = is_nft_creator_key(key, nft_address); + let is_metadata_key = is_nft_metadata_key(key, nft_address); + let is_approval_key = is_nft_approval_key(key, nft_address); + let is_current_owner_key = is_nft_current_owner_key(key, nft_address); + let is_past_owner_key = is_nft_past_owners_key(key, nft_address); + if let Some(nft_address) = is_creator_key { + return KeyType::Creator(nft_address); + } + if let Some((nft_address, token_id)) = is_metadata_key { + return KeyType::Metadata(nft_address, token_id); + } + if let Some((nft_address, token_id)) = is_approval_key { + return KeyType::Approval(nft_address, token_id); + } + if let Some((nft_address, token_id)) = is_current_owner_key { + return KeyType::CurrentOwner(nft_address, token_id); + } + if let Some((nft_address, token_id)) = is_past_owner_key { + return KeyType::PastOwners(nft_address, token_id); + } + KeyType::Unknown +} diff --git a/vp_prelude/src/token.rs b/vp_prelude/src/token.rs new file mode 100644 index 0000000000..6670386c4a --- /dev/null +++ b/vp_prelude/src/token.rs @@ -0,0 +1,61 @@ +//! A fungible token validity predicate. + +use std::collections::BTreeSet; + +use namada::types::address::{Address, InternalAddress}; +use namada::types::storage::Key; +/// Vp imports and functions. +use namada::types::storage::KeySeg; +use namada::types::token; +pub use namada::types::token::*; + +use super::*; + +/// A token validity predicate. +pub fn vp( + ctx: &Ctx, + token: &Address, + keys_changed: &BTreeSet, + verifiers: &BTreeSet
, +) -> VpResult { + let mut change: Change = 0; + for key in keys_changed.iter() { + match token::is_balance_key(token, key) { + None => { + // Unknown changes to this address space are disallowed, but + // unknown changes anywhere else are permitted + if key.segments.get(0) == Some(&token.to_db_key()) { + return reject(); + } + } + Some(owner) => { + // accumulate the change + let pre: Amount = match owner { + Address::Internal(InternalAddress::IbcMint) => { + Amount::max() + } + Address::Internal(InternalAddress::IbcBurn) => { + Amount::default() + } + _ => ctx.read_pre(key)?.unwrap_or_default(), + }; + let post: Amount = match owner { + Address::Internal(InternalAddress::IbcMint) => { + ctx.read_temp(key)?.unwrap_or_else(Amount::max) + } + Address::Internal(InternalAddress::IbcBurn) => { + ctx.read_temp(key)?.unwrap_or_default() + } + _ => ctx.read_post(key)?.unwrap_or_default(), + }; + let this_change = post.change() - pre.change(); + change += this_change; + // make sure that the spender approved the transaction + if this_change < 0 && !verifiers.contains(owner) { + return reject(); + } + } + } + } + Ok(change == 0) +} diff --git a/wasm/checksums.json b/wasm/checksums.json index 898f6e9763..2247dd1683 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,19 +1,19 @@ { - "tx_bond.wasm": "tx_bond.16097490afa7378c79e6216751b20796cde3a9026c34255c3f1e5ec5a4c9482e.wasm", - "tx_from_intent.wasm": "tx_from_intent.f8d1937b17a3abaf7ea595526c870b3d57ddef8e0c1bc96f8e0a448864b186c7.wasm", - "tx_ibc.wasm": "tx_ibc.378b10551c0b22c2c892d24e2676ee5160d654e2e53a50e7925e0f2c6321497b.wasm", - "tx_init_account.wasm": "tx_init_account.adab66c2b4d635e9c42133936aafb143363f91dddff2a60f94df504ffec951a6.wasm", - "tx_init_nft.wasm": "tx_init_nft.d1065ebd80ba6ea97f29bc2268becf9ba3ba2952641992464f3e9e868df17447.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.184131576a579f9ece96460d1eb20e5970fcd149b0527c8e56b711e5c535aa5f.wasm", - "tx_init_validator.wasm": "tx_init_validator.2990747d24d467b56e19724c5d13df826a3aab83f7e1bf26558dbdf44e260f8a.wasm", - "tx_mint_nft.wasm": "tx_mint_nft.33db14dea4a03ff7508ca44f3ae956d83c0abceb3dae5be844668e54ac22b273.wasm", - "tx_transfer.wasm": "tx_transfer.a601d62296f56f6b4dabb0a2ad082478d195e667c7469f363bdfd5fe41349bd8.wasm", - "tx_unbond.wasm": "tx_unbond.014cbf5b0aa3ac592c0a6940dd502ec8569a3af4d12782e3a5931c15dc13042f.wasm", - "tx_update_vp.wasm": "tx_update_vp.83d4caeb5a9ca3009cd899810493a6b87b4c07fa9ed36f297db99dc881fb9a1c.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.bcb5280be9dfeed0a7650ba5e4a3cebc2c19b76780fd74dcb345be3da766b64a.wasm", - "tx_withdraw.wasm": "tx_withdraw.8fc0a3439ee9ae66047c520519877bc1f540e0cb02abfa31afa8cce8cd069b6f.wasm", - "vp_nft.wasm": "vp_nft.2c820c728d241b82bf0ed3c552ee9e7c046bceaa4f7b6f12d3236a1a3d7c1589.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.6e762f3fda8aa7a252e2b29a4a312db91ded062d6c18b8b489883733c89dc227.wasm", - "vp_token.wasm": "vp_token.c45cc3848f12fc47713702dc206d1312ad740a6bbee7f141556714f6f89d4985.wasm", - "vp_user.wasm": "vp_user.d6cd2f4b5bc26f96df6aa300fddf4d25e1656920d59896209bd54ae8d407ecde.wasm" + "tx_bond.wasm": "tx_bond.7bfc18f1969d0ba119e7ca2fe00e7ca156ff2b60277a380e97b4cdefeb20ea51.wasm", + "tx_from_intent.wasm": "tx_from_intent.229e4c974480899212c1c30374c1344aa95c0366109ff56b316fcfcc0120c732.wasm", + "tx_ibc.wasm": "tx_ibc.7341c8abc400599cbe4787f7f9fbd84f2ca18959f70d61c2a389f3a4a3ef34d3.wasm", + "tx_init_account.wasm": "tx_init_account.ced4788ea1837d0cacd6ba691f09181555c846f420cb46d32e76cccae9cad0e5.wasm", + "tx_init_nft.wasm": "tx_init_nft.411a1fb5c2f5ef8a9484443fa3f2affddb6b11779c961abc274288e3cd4aba28.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.c3e13023ae8444a10c0fcc50fa27344d6c35367966e41cd1f95a609add0aa26a.wasm", + "tx_init_validator.wasm": "tx_init_validator.707d0798265ba501a813321b5d9a1222bda8448650460e78518e2796f0b42c30.wasm", + "tx_mint_nft.wasm": "tx_mint_nft.0910d261e037e3c0633a3898fc00a08a953d87c8a4b9db2b4041877b91f8317e.wasm", + "tx_transfer.wasm": "tx_transfer.486ffcee9265df25b01751d6007b7f07a083288b985396a8b6fd2aeaacd3e7a8.wasm", + "tx_unbond.wasm": "tx_unbond.dbc10595136c99949a86567561857bb7c465a7a1ea6e21a2b9d261510867ec63.wasm", + "tx_update_vp.wasm": "tx_update_vp.a04692ad8b2715c6262b4e3342591ab7bbb3e6577458979c33d196e8d80146fc.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.25dbae81da4255cae3ffeab6857646af46ef76d70984acfc7c89a3aeb8249679.wasm", + "tx_withdraw.wasm": "tx_withdraw.599ecc125b197b22b27127ce61bc17138a4dd05eb1598a64862496f301c0bc28.wasm", + "vp_nft.wasm": "vp_nft.a7f25944fba3d9a2b00e98482535ed4591282bbf794d64cad18d3c7d15a6318c.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.e28867d79578ce4a32d027b6e50580e8e5e0a19b44c60dc10cacb41dfe07e28c.wasm", + "vp_token.wasm": "vp_token.d24443f5683d0d7d0259dab878f811a56c3d19f3158aecfaae6ce7627cb40884.wasm", + "vp_user.wasm": "vp_user.1aa3756e386a883f523534ac76fb4b75e01c06fcde647c2b6fcca807ba683497.wasm" } \ No newline at end of file diff --git a/wasm/tx_template/Cargo.lock b/wasm/tx_template/Cargo.lock index 4e85d68f2c..f4eb47bcf9 100644 --- a/wasm/tx_template/Cargo.lock +++ b/wasm/tx_template/Cargo.lock @@ -642,6 +642,12 @@ dependencies = [ "syn", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "der" version = "0.5.1" @@ -1524,10 +1530,10 @@ dependencies = [ "borsh", "chrono", "clru", + "data-encoding", "derivative", "ed25519-consensus", "ferveo-common", - "hex", "ibc", "ibc-proto", "ics23", @@ -1588,7 +1594,8 @@ dependencies = [ "concat-idents", "derivative", "namada", - "namada_vm_env", + "namada_tx_prelude", + "namada_vp_prelude", "prost", "serde_json", "sha2 0.9.9", @@ -1602,8 +1609,12 @@ dependencies = [ name = "namada_tx_prelude" version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] @@ -1611,9 +1622,19 @@ name = "namada_vm_env" version = "0.7.1" dependencies = [ "borsh", - "hex", + "namada", +] + +[[package]] +name = "namada_vp_prelude" +version = "0.7.1" +dependencies = [ + "borsh", "namada", "namada_macros", + "namada_vm_env", + "sha2 0.10.2", + "thiserror", ] [[package]] diff --git a/wasm/tx_template/src/lib.rs b/wasm/tx_template/src/lib.rs index f507e90bed..473984aa31 100644 --- a/wasm/tx_template/src/lib.rs +++ b/wasm/tx_template/src/lib.rs @@ -1,8 +1,9 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { +fn apply_tx(_ctx: &mut Ctx, tx_data: Vec) -> TxResult { log_string(format!("apply_tx called with data: {:#?}", tx_data)); + Ok(()) } #[cfg(test)] @@ -19,7 +20,7 @@ mod tests { tx_host_env::init(); let tx_data = vec![]; - apply_tx(tx_data); + apply_tx(ctx(), tx_data).unwrap(); let env = tx_host_env::take(); assert!(env.all_touched_storage_keys().is_empty()); diff --git a/wasm/vp_template/Cargo.lock b/wasm/vp_template/Cargo.lock index 25070a9319..0210a75d4f 100644 --- a/wasm/vp_template/Cargo.lock +++ b/wasm/vp_template/Cargo.lock @@ -642,6 +642,12 @@ dependencies = [ "syn", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "der" version = "0.5.1" @@ -1524,10 +1530,10 @@ dependencies = [ "borsh", "chrono", "clru", + "data-encoding", "derivative", "ed25519-consensus", "ferveo-common", - "hex", "ibc", "ibc-proto", "ics23", @@ -1588,7 +1594,8 @@ dependencies = [ "concat-idents", "derivative", "namada", - "namada_vm_env", + "namada_tx_prelude", + "namada_vp_prelude", "prost", "serde_json", "sha2 0.9.9", @@ -1599,21 +1606,35 @@ dependencies = [ ] [[package]] -name = "namada_vm_env" +name = "namada_tx_prelude" version = "0.7.1" dependencies = [ "borsh", - "hex", "namada", "namada_macros", + "namada_vm_env", + "sha2 0.10.2", + "thiserror", +] + +[[package]] +name = "namada_vm_env" +version = "0.7.1" +dependencies = [ + "borsh", + "namada", ] [[package]] name = "namada_vp_prelude" version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] diff --git a/wasm/vp_template/src/lib.rs b/wasm/vp_template/src/lib.rs index 7918072266..35cdabd1c5 100644 --- a/wasm/vp_template/src/lib.rs +++ b/wasm/vp_template/src/lib.rs @@ -2,25 +2,25 @@ use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + ctx: &Ctx, tx_data: Vec, addr: Address, keys_changed: BTreeSet, verifiers: BTreeSet
, -) -> bool { +) -> VpResult { log_string(format!( "validate_tx called with addr: {}, key_changed: {:#?}, tx_data: \ {:#?}, verifiers: {:?}", addr, keys_changed, tx_data, verifiers )); - for key in keys_changed.iter() { - let key = key.to_string(); - let pre: Option = read_pre(&key); - let post: Option = read_post(&key); + for key in keys_changed { + let pre: Option = ctx.read_pre(&key)?; + let post: Option = ctx.read_post(&key)?; log_string(format!( "validate_tx key: {}, pre: {:#?}, post: {:#?}", key, pre, post, )); } - true + accept() } diff --git a/wasm/wasm_source/Cargo.lock b/wasm/wasm_source/Cargo.lock index 269535735e..05251166aa 100644 --- a/wasm/wasm_source/Cargo.lock +++ b/wasm/wasm_source/Cargo.lock @@ -642,6 +642,12 @@ dependencies = [ "syn", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "der" version = "0.5.1" @@ -1524,10 +1530,10 @@ dependencies = [ "borsh", "chrono", "clru", + "data-encoding", "derivative", "ed25519-consensus", "ferveo-common", - "hex", "ibc", "ibc-proto", "ics23", @@ -1588,7 +1594,8 @@ dependencies = [ "concat-idents", "derivative", "namada", - "namada_vm_env", + "namada_tx_prelude", + "namada_vp_prelude", "prost", "serde_json", "sha2 0.9.9", @@ -1602,8 +1609,12 @@ dependencies = [ name = "namada_tx_prelude" version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] @@ -1611,17 +1622,19 @@ name = "namada_vm_env" version = "0.7.1" dependencies = [ "borsh", - "hex", "namada", - "namada_macros", ] [[package]] name = "namada_vp_prelude" version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 9a5309f927..e880380802 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -1,19 +1,14 @@ //! A tx for a PoS bond that stakes tokens via a self-bond or delegation. -use namada_tx_prelude::proof_of_stake::bond_tokens; use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let bond = - transaction::pos::Bond::try_from_slice(&signed.data.unwrap()[..]) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let bond = transaction::pos::Bond::try_from_slice(&data[..]) + .wrap_err("failed to decode Bond")?; - if let Err(err) = - bond_tokens(bond.source.as_ref(), &bond.validator, bond.amount) - { - debug_log!("Bond failed with: {}", err); - panic!() - } + ctx.bond_tokens(bond.source.as_ref(), &bond.validator, bond.amount) } diff --git a/wasm/wasm_source/src/tx_from_intent.rs b/wasm/wasm_source/src/tx_from_intent.rs index e39963fae7..deeb5f3eb0 100644 --- a/wasm/wasm_source/src/tx_from_intent.rs +++ b/wasm/wasm_source/src/tx_from_intent.rs @@ -5,16 +5,15 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - - let tx_data = - intent::IntentTransfers::try_from_slice(&signed.data.unwrap()[..]); - - let tx_data = tx_data.unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let tx_data = intent::IntentTransfers::try_from_slice(&data[..]) + .wrap_err("failed to decode IntentTransfers")?; // make sure that the matchmaker has to validate this tx - insert_verifier(&tx_data.source); + ctx.insert_verifier(&tx_data.source)?; for token::Transfer { source, @@ -23,13 +22,11 @@ fn apply_tx(tx_data: Vec) { amount, } in tx_data.matches.transfers { - token::transfer(&source, &target, &token, amount); + token::transfer(ctx, &source, &target, &token, amount)?; } - tx_data - .matches - .exchanges - .values() - .into_iter() - .for_each(intent::invalidate_exchange); + for intent in tx_data.matches.exchanges.values() { + intent::invalidate_exchange(ctx, intent)?; + } + Ok(()) } diff --git a/wasm/wasm_source/src/tx_ibc.rs b/wasm/wasm_source/src/tx_ibc.rs index e38aa2f856..79cbc6cf96 100644 --- a/wasm/wasm_source/src/tx_ibc.rs +++ b/wasm/wasm_source/src/tx_ibc.rs @@ -6,7 +6,9 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - Ibc.dispatch(&signed.data.unwrap()).unwrap() +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + ctx.dispatch_ibc_action(&data) } diff --git a/wasm/wasm_source/src/tx_init_account.rs b/wasm/wasm_source/src/tx_init_account.rs index e976c38941..e0fe700d63 100644 --- a/wasm/wasm_source/src/tx_init_account.rs +++ b/wasm/wasm_source/src/tx_init_account.rs @@ -4,14 +4,16 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let tx_data = - transaction::InitAccount::try_from_slice(&signed.data.unwrap()[..]) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let tx_data = transaction::InitAccount::try_from_slice(&data[..]) + .wrap_err("failed to decode InitAccount")?; debug_log!("apply_tx called to init a new established account"); - let address = init_account(&tx_data.vp_code); + let address = ctx.init_account(&tx_data.vp_code)?; let pk_key = key::pk_key(&address); - write(&pk_key.to_string(), &tx_data.public_key); + ctx.write(&pk_key, &tx_data.public_key)?; + Ok(()) } diff --git a/wasm/wasm_source/src/tx_init_nft.rs b/wasm/wasm_source/src/tx_init_nft.rs index e26d656b57..de67dfbb53 100644 --- a/wasm/wasm_source/src/tx_init_nft.rs +++ b/wasm/wasm_source/src/tx_init_nft.rs @@ -3,12 +3,14 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let tx_data = - transaction::nft::CreateNft::try_from_slice(&signed.data.unwrap()[..]) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let tx_data = transaction::nft::CreateNft::try_from_slice(&data[..]) + .wrap_err("failed to decode CreateNft")?; log_string("apply_tx called to create a new NFT"); - nft::init_nft(tx_data); + let _address = nft::init_nft(ctx, tx_data)?; + Ok(()) } diff --git a/wasm/wasm_source/src/tx_init_proposal.rs b/wasm/wasm_source/src/tx_init_proposal.rs index 3cb1c3d5de..cb7fe9ffbb 100644 --- a/wasm/wasm_source/src/tx_init_proposal.rs +++ b/wasm/wasm_source/src/tx_init_proposal.rs @@ -3,13 +3,14 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let tx_data = transaction::governance::InitProposalData::try_from_slice( - &signed.data.unwrap()[..], - ) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let tx_data = + transaction::governance::InitProposalData::try_from_slice(&data[..]) + .wrap_err("failed to decode InitProposalData")?; log_string("apply_tx called to create a new governance proposal"); - governance::init_proposal(tx_data); + governance::init_proposal(ctx, tx_data) } diff --git a/wasm/wasm_source/src/tx_init_validator.rs b/wasm/wasm_source/src/tx_init_validator.rs index 79dfedad56..2d5f1a6256 100644 --- a/wasm/wasm_source/src/tx_init_validator.rs +++ b/wasm/wasm_source/src/tx_init_validator.rs @@ -5,14 +5,16 @@ use namada_tx_prelude::transaction::InitValidator; use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let init_validator = - InitValidator::try_from_slice(&signed.data.unwrap()[..]).unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let init_validator = InitValidator::try_from_slice(&data[..]) + .wrap_err("failed to decode InitValidator")?; debug_log!("apply_tx called to init a new validator account"); // Register the validator in PoS - match proof_of_stake::init_validator(init_validator) { + match ctx.init_validator(init_validator) { Ok((validator_address, staking_reward_address)) => { debug_log!( "Created validator {} and staking reward account {}", @@ -25,4 +27,5 @@ fn apply_tx(tx_data: Vec) { panic!() } } + Ok(()) } diff --git a/wasm/wasm_source/src/tx_mint_nft.rs b/wasm/wasm_source/src/tx_mint_nft.rs index 692155432c..d3ab17e7ad 100644 --- a/wasm/wasm_source/src/tx_mint_nft.rs +++ b/wasm/wasm_source/src/tx_mint_nft.rs @@ -3,12 +3,13 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let tx_data = - transaction::nft::MintNft::try_from_slice(&signed.data.unwrap()[..]) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let tx_data = transaction::nft::MintNft::try_from_slice(&data[..]) + .wrap_err("failed to decode MintNft")?; log_string("apply_tx called to mint a new NFT tokens"); - nft::mint_tokens(tx_data); + nft::mint_tokens(ctx, tx_data) } diff --git a/wasm/wasm_source/src/tx_transfer.rs b/wasm/wasm_source/src/tx_transfer.rs index f0ab0ad2d0..eccddee2f0 100644 --- a/wasm/wasm_source/src/tx_transfer.rs +++ b/wasm/wasm_source/src/tx_transfer.rs @@ -5,10 +5,12 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let transfer = - token::Transfer::try_from_slice(&signed.data.unwrap()[..]).unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let transfer = token::Transfer::try_from_slice(&data[..]) + .wrap_err("failed to decode token::Transfer")?; debug_log!("apply_tx called with transfer: {:#?}", transfer); let token::Transfer { source, @@ -16,5 +18,5 @@ fn apply_tx(tx_data: Vec) { token, amount, } = transfer; - token::transfer(&source, &target, &token, amount) + token::transfer(ctx, &source, &target, &token, amount) } diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 5d2662ed5c..5d1765bb38 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -1,20 +1,15 @@ //! A tx for a PoS unbond that removes staked tokens from a self-bond or a //! delegation to be withdrawn in or after unbonding epoch. -use namada_tx_prelude::proof_of_stake::unbond_tokens; use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let unbond = - transaction::pos::Unbond::try_from_slice(&signed.data.unwrap()[..]) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let unbond = transaction::pos::Unbond::try_from_slice(&data[..]) + .wrap_err("failed to decode Unbond")?; - if let Err(err) = - unbond_tokens(unbond.source.as_ref(), &unbond.validator, unbond.amount) - { - debug_log!("Unbonding failed with: {}", err); - panic!() - } + ctx.unbond_tokens(unbond.source.as_ref(), &unbond.validator, unbond.amount) } diff --git a/wasm/wasm_source/src/tx_update_vp.rs b/wasm/wasm_source/src/tx_update_vp.rs index 4b68f11170..0bb819f026 100644 --- a/wasm/wasm_source/src/tx_update_vp.rs +++ b/wasm/wasm_source/src/tx_update_vp.rs @@ -5,11 +5,14 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let update_vp = - transaction::UpdateVp::try_from_slice(&signed.data.unwrap()[..]) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let update_vp = transaction::UpdateVp::try_from_slice(&data[..]) + .wrap_err("failed to decode UpdateVp")?; + debug_log!("update VP for: {:#?}", update_vp.addr); - update_validity_predicate(&update_vp.addr, update_vp.vp_code) + + ctx.update_validity_predicate(&update_vp.addr, update_vp.vp_code) } diff --git a/wasm/wasm_source/src/tx_vote_proposal.rs b/wasm/wasm_source/src/tx_vote_proposal.rs index cae8c4ef33..92c7af4c7f 100644 --- a/wasm/wasm_source/src/tx_vote_proposal.rs +++ b/wasm/wasm_source/src/tx_vote_proposal.rs @@ -3,13 +3,15 @@ use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let tx_data = transaction::governance::VoteProposalData::try_from_slice( - &signed.data.unwrap()[..], - ) - .unwrap(); - log_string("apply_tx called to vote a governance proposal"); - - governance::vote_proposal(tx_data); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let tx_data = + transaction::governance::VoteProposalData::try_from_slice(&data[..]) + .wrap_err("failed to decode VoteProposalData")?; + + debug_log!("apply_tx called to vote a governance proposal"); + + governance::vote_proposal(ctx, tx_data) } diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index 27bd984a66..3c841d88b0 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -1,23 +1,20 @@ //! A tx for a PoS unbond that removes staked tokens from a self-bond or a //! delegation to be withdrawn in or after unbonding epoch. -use namada_tx_prelude::proof_of_stake::withdraw_tokens; use namada_tx_prelude::*; #[transaction] -fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); - let withdraw = - transaction::pos::Withdraw::try_from_slice(&signed.data.unwrap()[..]) - .unwrap(); +fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; + let data = signed.data.ok_or_err_msg("Missing data")?; + let withdraw = transaction::pos::Withdraw::try_from_slice(&data[..]) + .wrap_err("failed to decode Withdraw")?; - match withdraw_tokens(withdraw.source.as_ref(), &withdraw.validator) { - Ok(slashed) => { - debug_log!("Withdrawal slashed for {}", slashed); - } - Err(err) => { - debug_log!("Withdrawal failed with: {}", err); - panic!() - } + let slashed = + ctx.withdraw_tokens(withdraw.source.as_ref(), &withdraw.validator)?; + if slashed != token::Amount::default() { + debug_log!("Withdrawal slashed for {}", slashed); } + Ok(()) } diff --git a/wasm/wasm_source/src/vp_nft.rs b/wasm/wasm_source/src/vp_nft.rs index f1e6dd587b..956a0a5d42 100644 --- a/wasm/wasm_source/src/vp_nft.rs +++ b/wasm/wasm_source/src/vp_nft.rs @@ -4,33 +4,36 @@ use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + ctx: &Ctx, tx_data: Vec, addr: Address, keys_changed: BTreeSet, verifiers: BTreeSet
, -) -> bool { +) -> VpResult { log_string(format!( "validate_tx called with token addr: {}, key_changed: {:#?}, \ verifiers: {:?}", addr, keys_changed, verifiers )); - if !is_tx_whitelisted() { - return false; + if !is_tx_whitelisted(ctx)? { + return reject(); } - let vp_check = - keys_changed - .iter() - .all(|key| match key.is_validity_predicate() { - Some(_) => { - let vp: Vec = read_bytes_post(key.to_string()).unwrap(); - is_vp_whitelisted(&vp) + let vp_check = keys_changed.iter().all(|key| { + if key.is_validity_predicate().is_some() { + match ctx.read_bytes_post(key) { + Ok(Some(vp)) => { + matches!(is_vp_whitelisted(ctx, &vp), Ok(true)) } - None => true, - }); - - vp_check && nft::vp(tx_data, &addr, &keys_changed, &verifiers) + _ => false, + } + } else { + true + } + }); + + Ok(vp_check && nft::vp(ctx, tx_data, &addr, &keys_changed, &verifiers)?) } #[cfg(test)] @@ -38,8 +41,9 @@ mod tests { use namada::types::nft::{self, NftToken}; use namada::types::transaction::nft::{CreateNft, MintNft}; use namada_tests::log::test; - use namada_tests::tx::{tx_host_env, TestTxEnv}; + use namada_tests::tx::{self, tx_host_env, TestTxEnv}; use namada_tests::vp::*; + use namada_tx_prelude::{StorageWrite, TxEnv}; use super::*; @@ -59,21 +63,25 @@ mod tests { std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft(CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }); + let nft_address = tx_host_env::nft::init_nft( + tx::ctx(), + CreateNft { + tag: "v1".to_string(), + creator: nft_creator.clone(), + vp_code, + keys: vec![], + opt_keys: vec![], + tokens: vec![], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.write_log.commit_tx(); vp_host_env::init_from_tx(nft_address.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::insert_verifier(address) + tx::ctx().insert_verifier(address).unwrap() }); let vp_env = vp_host_env::take(); @@ -82,7 +90,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= vp_env.get_verifiers(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, nft_address, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) + .unwrap() + ); } /// Test that you can create an nft without tokens @@ -98,26 +109,34 @@ mod tests { std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft(CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }); + let nft_address = tx_host_env::nft::init_nft( + tx::ctx(), + CreateNft { + tag: "v1".to_string(), + creator: nft_creator.clone(), + vp_code, + keys: vec![], + opt_keys: vec![], + tokens: vec![], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.write_log.commit_tx(); vp_host_env::init_from_tx(nft_address.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::nft::mint_tokens(MintNft { - address: nft_address.clone(), - tokens: vec![], - creator: nft_creator.clone(), - }); - tx_host_env::insert_verifier(address) + tx_host_env::nft::mint_tokens( + tx::ctx(), + MintNft { + address: nft_address.clone(), + tokens: vec![], + creator: nft_creator.clone(), + }, + ) + .unwrap(); + tx::ctx().insert_verifier(address).unwrap() }); let vp_env = vp_host_env::take(); @@ -127,7 +146,10 @@ mod tests { let verifiers: BTreeSet
= vp_env.get_verifiers(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, nft_address, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) + .unwrap() + ); } /// Test that you can create an nft with tokens @@ -144,34 +166,42 @@ mod tests { std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft(CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }); + let nft_address = tx_host_env::nft::init_nft( + tx::ctx(), + CreateNft { + tag: "v1".to_string(), + creator: nft_creator.clone(), + vp_code, + keys: vec![], + opt_keys: vec![], + tokens: vec![], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.commit_tx_and_block(); vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { // Apply transfer in a transaction - tx_host_env::nft::mint_tokens(MintNft { - address: nft_address.clone(), - creator: nft_creator.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![], - current_owner: Some(nft_token_owner.clone()), - past_owners: vec![], - burnt: false, - }], - }); + tx_host_env::nft::mint_tokens( + tx::ctx(), + MintNft { + address: nft_address.clone(), + creator: nft_creator.clone(), + tokens: vec![NftToken { + id: 1, + values: vec![], + opt_values: vec![], + metadata: "".to_string(), + approvals: vec![], + current_owner: Some(nft_token_owner.clone()), + past_owners: vec![], + burnt: false, + }], + }, + ) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -181,7 +211,10 @@ mod tests { let verifiers: BTreeSet
= vp_env.get_verifiers(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, nft_address, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) + .unwrap() + ); } /// Test that only owner can mint new tokens @@ -198,34 +231,42 @@ mod tests { std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft(CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }); + let nft_address = tx_host_env::nft::init_nft( + tx::ctx(), + CreateNft { + tag: "v1".to_string(), + creator: nft_creator.clone(), + vp_code, + keys: vec![], + opt_keys: vec![], + tokens: vec![], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.commit_tx_and_block(); vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { // Apply transfer in a transaction - tx_host_env::nft::mint_tokens(MintNft { - address: nft_address.clone(), - creator: nft_token_owner.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![], - current_owner: Some(nft_token_owner.clone()), - past_owners: vec![], - burnt: false, - }], - }); + tx_host_env::nft::mint_tokens( + tx::ctx(), + MintNft { + address: nft_address.clone(), + creator: nft_token_owner.clone(), + tokens: vec![NftToken { + id: 1, + values: vec![], + opt_values: vec![], + metadata: "".to_string(), + approvals: vec![], + current_owner: Some(nft_token_owner.clone()), + past_owners: vec![], + burnt: false, + }], + }, + ) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -235,7 +276,10 @@ mod tests { let verifiers: BTreeSet
= vp_env.get_verifiers(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, nft_address, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) + .unwrap() + ); } /// Test that an approval can add another approval @@ -259,45 +303,54 @@ mod tests { std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft(CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }); + let nft_address = tx_host_env::nft::init_nft( + tx::ctx(), + CreateNft { + tag: "v1".to_string(), + creator: nft_creator.clone(), + vp_code, + keys: vec![], + opt_keys: vec![], + tokens: vec![], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.commit_tx_and_block(); tx_host_env::set(tx_env); - tx_host_env::nft::mint_tokens(MintNft { - address: nft_address.clone(), - creator: nft_creator.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![nft_token_approval.clone()], - current_owner: None, - past_owners: vec![], - burnt: false, - }], - }); + tx_host_env::nft::mint_tokens( + tx::ctx(), + MintNft { + address: nft_address.clone(), + creator: nft_creator.clone(), + tokens: vec![NftToken { + id: 1, + values: vec![], + opt_values: vec![], + metadata: "".to_string(), + approvals: vec![nft_token_approval.clone()], + current_owner: None, + past_owners: vec![], + burnt: false, + }], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.commit_tx_and_block(); vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - let approval_key = - nft::get_token_approval_key(&nft_address, "1").to_string(); - tx_host_env::write( - approval_key, - [&nft_token_approval_2, &nft_token_approval], - ); - tx_host_env::insert_verifier(&nft_token_approval); + let approval_key = nft::get_token_approval_key(&nft_address, "1"); + tx::ctx() + .write( + &approval_key, + [&nft_token_approval_2, &nft_token_approval], + ) + .unwrap(); + tx::ctx().insert_verifier(&nft_token_approval).unwrap(); }); let vp_env = vp_host_env::take(); @@ -307,7 +360,10 @@ mod tests { let verifiers: BTreeSet
= vp_env.get_verifiers(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, nft_address, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) + .unwrap() + ); } /// Test that an approval can add another approval @@ -331,45 +387,54 @@ mod tests { std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft(CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }); + let nft_address = tx_host_env::nft::init_nft( + tx::ctx(), + CreateNft { + tag: "v1".to_string(), + creator: nft_creator.clone(), + vp_code, + keys: vec![], + opt_keys: vec![], + tokens: vec![], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.commit_tx_and_block(); tx_host_env::set(tx_env); - tx_host_env::nft::mint_tokens(MintNft { - address: nft_address.clone(), - creator: nft_creator.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![nft_token_approval.clone()], - current_owner: None, - past_owners: vec![], - burnt: false, - }], - }); + tx_host_env::nft::mint_tokens( + tx::ctx(), + MintNft { + address: nft_address.clone(), + creator: nft_creator.clone(), + tokens: vec![NftToken { + id: 1, + values: vec![], + opt_values: vec![], + metadata: "".to_string(), + approvals: vec![nft_token_approval.clone()], + current_owner: None, + past_owners: vec![], + burnt: false, + }], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.commit_tx_and_block(); vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - let approval_key = - nft::get_token_approval_key(&nft_address, "1").to_string(); - tx_host_env::write( - approval_key, - [&nft_token_approval_2, &nft_token_approval], - ); - tx_host_env::insert_verifier(&nft_token_approval_2); + let approval_key = nft::get_token_approval_key(&nft_address, "1"); + tx::ctx() + .write( + &approval_key, + [&nft_token_approval_2, &nft_token_approval], + ) + .unwrap(); + tx::ctx().insert_verifier(&nft_token_approval_2).unwrap(); }); let vp_env = vp_host_env::take(); @@ -379,7 +444,10 @@ mod tests { let verifiers: BTreeSet
= vp_env.get_verifiers(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, nft_address, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) + .unwrap() + ); } /// Test nft address cannot be changed @@ -396,21 +464,25 @@ mod tests { std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft(CreateNft { - tag: "v1".to_string(), - creator: nft_owner.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }); + let nft_address = tx_host_env::nft::init_nft( + tx::ctx(), + CreateNft { + tag: "v1".to_string(), + creator: nft_owner.clone(), + vp_code, + keys: vec![], + opt_keys: vec![], + tokens: vec![], + }, + ) + .unwrap(); let mut tx_env = tx_host_env::take(); tx_env.commit_tx_and_block(); vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - let creator_key = nft::get_creator_key(&nft_address).to_string(); - tx_host_env::write(creator_key, &another_address); + let creator_key = nft::get_creator_key(&nft_address); + tx::ctx().write(&creator_key, &another_address).unwrap(); }); let vp_env = vp_host_env::take(); @@ -420,6 +492,9 @@ mod tests { let verifiers: BTreeSet
= vp_env.get_verifiers(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, nft_address, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) + .unwrap() + ); } } diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 553288926e..b599f82251 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -13,11 +13,12 @@ pub const MAX_FREE_DEBIT: i128 = 1_000_000_000; // in micro units #[validity_predicate] fn validate_tx( + ctx: &Ctx, tx_data: Vec, addr: Address, keys_changed: BTreeSet, verifiers: BTreeSet
, -) -> bool { +) -> VpResult { debug_log!( "vp_testnet_faucet called with user addr: {}, key_changed: {:?}, \ verifiers: {:?}", @@ -31,26 +32,31 @@ fn validate_tx( let valid_sig = Lazy::new(|| match &*signed_tx_data { Ok(signed_tx_data) => { - let pk = key::get(&addr); + let pk = key::get(ctx, &addr); match pk { - Some(pk) => verify_tx_signature(&pk, &signed_tx_data.sig), - None => false, + Ok(Some(pk)) => { + matches!( + ctx.verify_tx_signature(&pk, &signed_tx_data.sig), + Ok(true) + ) + } + _ => false, } } _ => false, }); - if !is_tx_whitelisted() { - return false; + if !is_tx_whitelisted(ctx)? { + return reject(); } for key in keys_changed.iter() { let is_valid = if let Some(owner) = token::is_any_token_balance_key(key) { if owner == &addr { - let key = key.to_string(); - let pre: token::Amount = read_pre(&key).unwrap_or_default(); - let post: token::Amount = read_post(&key).unwrap_or_default(); + let pre: token::Amount = ctx.read_pre(key)?.unwrap_or_default(); + let post: token::Amount = + ctx.read_post(key)?.unwrap_or_default(); let change = post.change() - pre.change(); // Debit over `MAX_FREE_DEBIT` has to signed, credit doesn't change >= -MAX_FREE_DEBIT || change >= 0 || *valid_sig @@ -59,18 +65,17 @@ fn validate_tx( true } } else if let Some(owner) = key.is_validity_predicate() { - let key = key.to_string(); - let has_post: bool = has_key_post(&key); + let has_post: bool = ctx.has_key_post(key)?; if owner == &addr { if has_post { - let vp: Vec = read_bytes_post(&key).unwrap(); - return *valid_sig && is_vp_whitelisted(&vp); + let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); + return Ok(*valid_sig && is_vp_whitelisted(ctx, &vp)?); } else { - return false; + return reject(); } } else { - let vp: Vec = read_bytes_post(&key).unwrap(); - return is_vp_whitelisted(&vp); + let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); + return is_vp_whitelisted(ctx, &vp); } } else { // Allow any other key change if authorized by a signature @@ -78,10 +83,10 @@ fn validate_tx( }; if !is_valid { debug_log!("key {} modification failed vp", key); - return false; + return reject(); } } - true + accept() } #[cfg(test)] @@ -89,9 +94,10 @@ mod tests { use address::testing::arb_non_internal_address; // Use this as `#[test]` annotation to enable logging use namada_tests::log::test; - use namada_tests::tx::{tx_host_env, TestTxEnv}; + use namada_tests::tx::{self, tx_host_env, TestTxEnv}; use namada_tests::vp::vp_host_env::storage::Key; use namada_tests::vp::*; + use namada_tx_prelude::{StorageWrite, TxEnv}; use namada_vp_prelude::key::RefTo; use proptest::prelude::*; use storage::testing::arb_account_storage_key_no_vp; @@ -112,7 +118,9 @@ mod tests { // The VP env must be initialized before calling `validate_tx` vp_host_env::init(); - assert!(validate_tx(tx_data, addr, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).unwrap() + ); } /// Test that a credit transfer is accepted. @@ -136,7 +144,14 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(&source, address, &token, amount); + tx_host_env::token::transfer( + tx_host_env::ctx(), + &source, + address, + &token, + amount, + ) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -145,7 +160,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update without a valid signature is @@ -165,7 +183,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -174,7 +194,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update with a valid signature is @@ -198,7 +221,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -210,7 +235,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } prop_compose! { @@ -251,7 +279,7 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(address, &target, &token, amount); + tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount).unwrap(); }); let vp_env = vp_host_env::take(); @@ -260,7 +288,7 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!(!validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); } /// Test that a debit of less than or equal to [`MAX_FREE_DEBIT`] tokens without a valid signature is accepted. @@ -284,7 +312,7 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(address, &target, &token, amount); + tx_host_env::token::transfer(tx::ctx(), address, &target, &token, amount).unwrap(); }); let vp_env = vp_host_env::take(); @@ -293,7 +321,7 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!(validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); } /// Test that a signed tx that performs arbitrary storage writes or @@ -321,9 +349,9 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { // Write or delete some data in the transaction if let Some(value) = &storage_value { - tx_host_env::write(storage_key.to_string(), value); + tx::ctx().write(&storage_key, value).unwrap(); } else { - tx_host_env::delete(storage_key.to_string()); + tx::ctx().delete(&storage_key).unwrap(); } }); @@ -336,7 +364,7 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!(validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); } } } diff --git a/wasm/wasm_source/src/vp_token.rs b/wasm/wasm_source/src/vp_token.rs index 60513ce808..b9d3de8f7d 100644 --- a/wasm/wasm_source/src/vp_token.rs +++ b/wasm/wasm_source/src/vp_token.rs @@ -5,11 +5,12 @@ use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + ctx: &Ctx, _tx_data: Vec, addr: Address, keys_changed: BTreeSet, verifiers: BTreeSet
, -) -> bool { +) -> VpResult { debug_log!( "validate_tx called with token addr: {}, key_changed: {:?}, \ verifiers: {:?}", @@ -18,20 +19,18 @@ fn validate_tx( verifiers ); - if !is_tx_whitelisted() { - return false; + if !is_tx_whitelisted(ctx)? { + return reject(); } - let vp_check = - keys_changed - .iter() - .all(|key| match key.is_validity_predicate() { - Some(_) => { - let vp: Vec = read_bytes_post(key.to_string()).unwrap(); - is_vp_whitelisted(&vp) - } - None => true, - }); + for key in keys_changed.iter() { + if key.is_validity_predicate().is_some() { + let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); + if !is_vp_whitelisted(ctx, &vp)? { + return reject(); + } + } + } - vp_check && token::vp(&addr, &keys_changed, &verifiers) + token::vp(ctx, &addr, &keys_changed, &verifiers) } diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index a222a344ef..d20472d9ec 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -57,11 +57,12 @@ impl<'a> From<&'a storage::Key> for KeyType<'a> { #[validity_predicate] fn validate_tx( + ctx: &Ctx, tx_data: Vec, addr: Address, keys_changed: BTreeSet, verifiers: BTreeSet
, -) -> bool { +) -> VpResult { debug_log!( "vp_user called with user addr: {}, key_changed: {:?}, verifiers: {:?}", addr, @@ -74,22 +75,32 @@ fn validate_tx( let valid_sig = Lazy::new(|| match &*signed_tx_data { Ok(signed_tx_data) => { - let pk = key::get(&addr); + let pk = key::get(ctx, &addr); match pk { - Some(pk) => verify_tx_signature(&pk, &signed_tx_data.sig), - None => false, + Ok(Some(pk)) => { + matches!( + ctx.verify_tx_signature(&pk, &signed_tx_data.sig), + Ok(true) + ) + } + _ => false, } } _ => false, }); let valid_intent = Lazy::new(|| match &*signed_tx_data { - Ok(signed_tx_data) => check_intent_transfers(&addr, signed_tx_data), + Ok(signed_tx_data) => { + matches!( + check_intent_transfers(ctx, &addr, signed_tx_data), + Ok(true) + ) + } _ => false, }); - if !is_tx_whitelisted() { - return false; + if !is_tx_whitelisted(ctx)? { + return reject(); } for key in keys_changed.iter() { @@ -97,10 +108,10 @@ fn validate_tx( let is_valid = match key_type { KeyType::Token(owner) => { if owner == &addr { - let key = key.to_string(); - let pre: token::Amount = read_pre(&key).unwrap_or_default(); + let pre: token::Amount = + ctx.read_pre(key)?.unwrap_or_default(); let post: token::Amount = - read_post(&key).unwrap_or_default(); + ctx.read_post(key)?.unwrap_or_default(); let change = post.change() - pre.change(); // debit has to signed, credit doesn't let valid = change >= 0 || *valid_sig || *valid_intent; @@ -150,11 +161,10 @@ fn validate_tx( } KeyType::InvalidIntentSet(owner) => { if owner == &addr { - let key = key.to_string(); let pre: HashSet = - read_pre(&key).unwrap_or_default(); + ctx.read_pre(key)?.unwrap_or_default(); let post: HashSet = - read_post(&key).unwrap_or_default(); + ctx.read_post(key)?.unwrap_or_default(); // A new invalid intent must have been added pre.len() + 1 == post.len() } else { @@ -184,18 +194,17 @@ fn validate_tx( } } KeyType::Vp(owner) => { - let key = key.to_string(); - let has_post: bool = has_key_post(&key); + let has_post: bool = ctx.has_key_post(key)?; if owner == &addr { if has_post { - let vp: Vec = read_bytes_post(&key).unwrap(); - return *valid_sig && is_vp_whitelisted(&vp); + let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); + *valid_sig && is_vp_whitelisted(ctx, &vp)? } else { - return false; + false } } else { - let vp: Vec = read_bytes_post(&key).unwrap(); - return is_vp_whitelisted(&vp); + let vp: Vec = ctx.read_bytes_post(key)?.unwrap(); + is_vp_whitelisted(ctx, &vp)? } } KeyType::Unknown => { @@ -211,24 +220,25 @@ fn validate_tx( }; if !is_valid { debug_log!("key {} modification failed vp", key); - return false; + return reject(); } } - true + accept() } fn check_intent_transfers( + ctx: &Ctx, addr: &Address, signed_tx_data: &SignedTxData, -) -> bool { +) -> EnvResult { if let Some((raw_intent_transfers, exchange, intent)) = try_decode_intent(addr, signed_tx_data) { log_string("check intent"); - return check_intent(addr, exchange, intent, raw_intent_transfers); + return check_intent(ctx, addr, exchange, intent, raw_intent_transfers); } - false + reject() } fn try_decode_intent( @@ -259,25 +269,26 @@ fn try_decode_intent( } fn check_intent( + ctx: &Ctx, addr: &Address, exchange: namada_vp_prelude::Signed, intent: namada_vp_prelude::Signed, raw_intent_transfers: Vec, -) -> bool { +) -> EnvResult { // verify signature - let pk = key::get(addr); + let pk = key::get(ctx, addr)?; if let Some(pk) = pk { if intent.verify(&pk).is_err() { log_string("invalid sig"); - return false; + return reject(); } } else { - return false; + return reject(); } // verify the intent have not been already used - if !intent::vp_exchange(&exchange) { - return false; + if !intent::vp_exchange(ctx, &exchange)? { + return reject(); } // verify the intent is fulfilled @@ -294,10 +305,10 @@ fn check_intent( debug_log!("vp is: {}", vp.is_some()); if let Some(code) = vp { - let eval_result = eval(code.to_vec(), raw_intent_transfers); + let eval_result = ctx.eval(code.to_vec(), raw_intent_transfers)?; debug_log!("eval result: {}", eval_result); if !eval_result { - return false; + return reject(); } } @@ -310,18 +321,19 @@ fn check_intent( rate_min.0 ); - let token_sell_key = token::balance_key(token_sell, addr).to_string(); + let token_sell_key = token::balance_key(token_sell, addr); let mut sell_difference: token::Amount = - read_pre(&token_sell_key).unwrap_or_default(); + ctx.read_pre(&token_sell_key)?.unwrap_or_default(); let sell_post: token::Amount = - read_post(token_sell_key).unwrap_or_default(); + ctx.read_post(&token_sell_key)?.unwrap_or_default(); sell_difference.spend(&sell_post); - let token_buy_key = token::balance_key(token_buy, addr).to_string(); - let buy_pre: token::Amount = read_pre(&token_buy_key).unwrap_or_default(); + let token_buy_key = token::balance_key(token_buy, addr); + let buy_pre: token::Amount = + ctx.read_pre(&token_buy_key)?.unwrap_or_default(); let mut buy_difference: token::Amount = - read_post(token_buy_key).unwrap_or_default(); + ctx.read_post(&token_buy_key)?.unwrap_or_default(); buy_difference.spend(&buy_pre); @@ -354,9 +366,9 @@ fn check_intent( min_buy.change(), buy_diff / sell_diff ); - false + reject() } else { - true + accept() } } @@ -365,9 +377,10 @@ mod tests { use address::testing::arb_non_internal_address; // Use this as `#[test]` annotation to enable logging use namada_tests::log::test; - use namada_tests::tx::{tx_host_env, TestTxEnv}; + use namada_tests::tx::{self, tx_host_env, TestTxEnv}; use namada_tests::vp::vp_host_env::storage::Key; use namada_tests::vp::*; + use namada_tx_prelude::{StorageWrite, TxEnv}; use namada_vp_prelude::key::RefTo; use proptest::prelude::*; use storage::testing::arb_account_storage_key_no_vp; @@ -388,7 +401,9 @@ mod tests { // The VP env must be initialized before calling `validate_tx` vp_host_env::init(); - assert!(validate_tx(tx_data, addr, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, addr, keys_changed, verifiers).unwrap() + ); } /// Test that a credit transfer is accepted. @@ -412,7 +427,14 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(&source, address, &token, amount); + tx_host_env::token::transfer( + tx::ctx(), + &source, + address, + &token, + amount, + ) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -421,7 +443,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a debit transfer without a valid signature is rejected. @@ -445,7 +470,14 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(address, &target, &token, amount); + tx_host_env::token::transfer( + tx::ctx(), + address, + &target, + &token, + amount, + ) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -454,7 +486,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a debit transfer with a valid signature is accepted. @@ -482,7 +517,14 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Apply transfer in a transaction - tx_host_env::token::transfer(address, &target, &token, amount); + tx_host_env::token::transfer( + tx::ctx(), + address, + &target, + &token, + amount, + ) + .unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -494,7 +536,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a transfer on with accounts other than self is accepted. @@ -518,9 +563,16 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { - tx_host_env::insert_verifier(address); + tx::ctx().insert_verifier(address).unwrap(); // Apply transfer in a transaction - tx_host_env::token::transfer(&source, &target, &token, amount); + tx_host_env::token::transfer( + tx::ctx(), + &source, + &target, + &token, + amount, + ) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -529,7 +581,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } prop_compose! { @@ -569,9 +624,9 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { // Write or delete some data in the transaction if let Some(value) = &storage_value { - tx_host_env::write(storage_key.to_string(), value); + tx::ctx().write(&storage_key, value).unwrap(); } else { - tx_host_env::delete(storage_key.to_string()); + tx::ctx().delete(&storage_key).unwrap(); } }); @@ -581,7 +636,7 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!(!validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); } } @@ -611,9 +666,9 @@ mod tests { vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |_address| { // Write or delete some data in the transaction if let Some(value) = &storage_value { - tx_host_env::write(storage_key.to_string(), value); + tx::ctx().write(&storage_key, value).unwrap(); } else { - tx_host_env::delete(storage_key.to_string()); + tx::ctx().delete(&storage_key).unwrap(); } }); @@ -626,7 +681,7 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!(validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers).unwrap()); } } @@ -647,7 +702,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let vp_env = vp_host_env::take(); @@ -656,7 +713,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update with a valid signature is @@ -681,7 +741,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -693,7 +755,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update is rejected if not whitelisted @@ -717,7 +782,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -729,7 +796,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a validity predicate update is accepted if whitelisted @@ -755,7 +825,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -767,7 +839,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } /// Test that a tx is rejected if not whitelisted @@ -797,7 +872,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -809,7 +886,10 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(!validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + !validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } #[test] @@ -834,7 +914,9 @@ mod tests { // Initialize VP environment from a transaction vp_host_env::init_from_tx(vp_owner.clone(), tx_env, |address| { // Update VP in a transaction - tx_host_env::update_validity_predicate(address, &vp_code); + tx::ctx() + .update_validity_predicate(address, &vp_code) + .unwrap(); }); let mut vp_env = vp_host_env::take(); @@ -846,6 +928,9 @@ mod tests { vp_env.all_touched_storage_keys(); let verifiers: BTreeSet
= BTreeSet::default(); vp_host_env::set(vp_env); - assert!(validate_tx(tx_data, vp_owner, keys_changed, verifiers)); + assert!( + validate_tx(&CTX, tx_data, vp_owner, keys_changed, verifiers) + .unwrap() + ); } } diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index 88c8ef0ada..c31821bdf4 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index c9a102773e..d9c8586901 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_no_op.wasm b/wasm_for_tests/tx_no_op.wasm index bdab4054d9..649a1b72f1 100755 Binary files a/wasm_for_tests/tx_no_op.wasm and b/wasm_for_tests/tx_no_op.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index 3c753377ed..4f19c38cdb 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index b4266e6940..15ea698e74 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write_storage_key.wasm b/wasm_for_tests/tx_write_storage_key.wasm index 89b669f6af..a7db45733e 100755 Binary files a/wasm_for_tests/tx_write_storage_key.wasm and b/wasm_for_tests/tx_write_storage_key.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index 57b061685c..7f7bb42473 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index 8990ed651f..1317469042 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 15b9ad7d67..58dff40f04 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index 9d445b88fd..3e476d5ec4 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index fa5e6bcb3a..4a91dfb6fa 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 9df5195b2f..a851da5f13 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -643,6 +643,12 @@ dependencies = [ "syn", ] +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + [[package]] name = "der" version = "0.5.1" @@ -1357,7 +1363,7 @@ dependencies = [ [[package]] name = "libsecp256k1" version = "0.7.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "arrayref", "base64", @@ -1373,7 +1379,7 @@ dependencies = [ [[package]] name = "libsecp256k1-core" version = "0.3.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "crunchy", "digest 0.9.0", @@ -1383,7 +1389,7 @@ dependencies = [ [[package]] name = "libsecp256k1-gen-ecmult" version = "0.3.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "libsecp256k1-core", ] @@ -1391,7 +1397,7 @@ dependencies = [ [[package]] name = "libsecp256k1-gen-genmult" version = "0.3.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "libsecp256k1-core", ] @@ -1527,7 +1533,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.7.0" +version = "0.7.1" dependencies = [ "ark-bls12-381", "ark-serialize", @@ -1535,10 +1541,10 @@ dependencies = [ "borsh", "chrono", "clru", + "data-encoding", "derivative", "ed25519-consensus", "ferveo-common", - "hex", "ibc", "ibc-proto", "ics23", @@ -1576,7 +1582,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.7.0" +version = "0.7.1" dependencies = [ "quote", "syn", @@ -1584,7 +1590,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.7.0" +version = "0.7.1" dependencies = [ "borsh", "proptest", @@ -1593,13 +1599,14 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.7.0" +version = "0.7.1" dependencies = [ "chrono", "concat-idents", "derivative", "namada", - "namada_vm_env", + "namada_tx_prelude", + "namada_vp_prelude", "prost", "serde_json", "sha2 0.9.9", @@ -1611,39 +1618,44 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.7.0" +version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] name = "namada_vm_env" -version = "0.7.0" +version = "0.7.1" dependencies = [ "borsh", - "hex", "namada", - "namada_macros", ] [[package]] name = "namada_vp_prelude" -version = "0.7.0" +version = "0.7.1" dependencies = [ + "borsh", + "namada", + "namada_macros", "namada_vm_env", "sha2 0.10.2", + "thiserror", ] [[package]] name = "namada_wasm_for_tests" -version = "0.7.0" +version = "0.7.1" dependencies = [ "borsh", "getrandom", "namada_tests", "namada_tx_prelude", - "namada_vm_env", "namada_vp_prelude", "wee_alloc", ] diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index d4df7a2e5c..18de2af010 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -26,7 +26,6 @@ tx_proposal_code = [] [dependencies] namada_tx_prelude = {path = "../../tx_prelude"} -namada_vm_env = {path = "../../vm_env"} namada_vp_prelude = {path = "../../vp_prelude"} borsh = "0.9.1" wee_alloc = "0.4.5" diff --git a/wasm_for_tests/wasm_source/src/lib.rs b/wasm_for_tests/wasm_source/src/lib.rs index 674fb2a2d9..4731ef60be 100644 --- a/wasm_for_tests/wasm_source/src/lib.rs +++ b/wasm_for_tests/wasm_source/src/lib.rs @@ -1,66 +1,71 @@ /// A tx that doesn't do anything. #[cfg(feature = "tx_no_op")] pub mod main { - use namada_vm_env::tx_prelude::*; + use namada_tx_prelude::*; #[transaction] - fn apply_tx(_tx_data: Vec) {} + fn apply_tx(_ctx: &mut Ctx, _tx_data: Vec) -> TxResult { + Ok(()) + } } /// A tx that allocates a memory of size given from the `tx_data: usize`. #[cfg(feature = "tx_memory_limit")] pub mod main { - use namada_vm_env::tx_prelude::*; + use namada_tx_prelude::*; #[transaction] - fn apply_tx(tx_data: Vec) { + fn apply_tx(_ctx: &mut Ctx, tx_data: Vec) -> TxResult { let len = usize::try_from_slice(&tx_data[..]).unwrap(); log_string(format!("allocate len {}", len)); let bytes: Vec = vec![6_u8; len]; // use the variable to prevent it from compiler optimizing it away log_string(format!("{:?}", &bytes[..8])); + Ok(()) } } /// A tx to be used as proposal_code #[cfg(feature = "tx_proposal_code")] pub mod main { - use namada_vm_env::tx_prelude::*; + use namada_tx_prelude::*; #[transaction] - fn apply_tx(_tx_data: Vec) { + fn apply_tx(ctx: &mut Ctx, _tx_data: Vec) -> TxResult { // governance - let target_key = storage::get_min_proposal_grace_epoch_key(); - write(&target_key.to_string(), 9_u64); + let target_key = gov_storage::get_min_proposal_grace_epoch_key(); + ctx.write(&target_key, 9_u64)?; // treasury let target_key = treasury_storage::get_max_transferable_fund_key(); - write(&target_key.to_string(), token::Amount::whole(20_000)); + ctx.write(&target_key, token::Amount::whole(20_000))?; // parameters let target_key = parameters_storage::get_tx_whitelist_storage_key(); - write(&target_key.to_string(), vec!["hash"]); + ctx.write(&target_key, vec!["hash"])?; + Ok(()) } } /// A tx that attempts to read the given key from storage. #[cfg(feature = "tx_read_storage_key")] pub mod main { - use namada_vm_env::tx_prelude::*; + use namada_tx_prelude::*; #[transaction] - fn apply_tx(tx_data: Vec) { + fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { // Allocates a memory of size given from the `tx_data (usize)` - let key = Key::try_from_slice(&tx_data[..]).unwrap(); + let key = storage::Key::try_from_slice(&tx_data[..]).unwrap(); log_string(format!("key {}", key)); - let _result: Vec = read(key.to_string()).unwrap(); + let _result: Vec = ctx.read(&key)?.unwrap(); + Ok(()) } } /// A tx that attempts to write arbitrary data to the given key #[cfg(feature = "tx_write_storage_key")] pub mod main { - use namada_vm_env::tx_prelude::*; + use namada_tx_prelude::*; const TX_NAME: &str = "tx_write"; @@ -81,14 +86,9 @@ pub mod main { } #[transaction] - fn apply_tx(tx_data: Vec) { - let signed = match SignedTxData::try_from_slice(&tx_data[..]) { - Ok(signed) => { - log("got signed data"); - signed - } - Err(error) => fatal("getting signed data", error), - }; + fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; let data = match signed.data { Some(data) => { log(&format!("got data ({} bytes)", data.len())); @@ -100,15 +100,15 @@ pub mod main { }; let key = match String::from_utf8(data) { Ok(key) => { + let key = storage::Key::parse(key).unwrap(); log(&format!("parsed key from data: {}", key)); key } Err(error) => fatal("getting key", error), }; - let val: Option> = read(key.as_str()); + let val: Option = ctx.read(&key)?; match val { Some(val) => { - let val = String::from_utf8(val).unwrap(); log(&format!("preexisting val is {}", val)); } None => { @@ -119,7 +119,8 @@ pub mod main { "attempting to write new value {} to key {}", ARBITRARY_VALUE, key )); - write(key.as_str(), ARBITRARY_VALUE); + ctx.write(&key, ARBITRARY_VALUE)?; + Ok(()) } } @@ -128,11 +129,12 @@ pub mod main { /// token's VP. #[cfg(feature = "tx_mint_tokens")] pub mod main { - use namada_vm_env::tx_prelude::*; + use namada_tx_prelude::*; #[transaction] - fn apply_tx(tx_data: Vec) { - let signed = SignedTxData::try_from_slice(&tx_data[..]).unwrap(); + fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { + let signed = SignedTxData::try_from_slice(&tx_data[..]) + .wrap_err("failed to decode SignedTxData")?; let transfer = token::Transfer::try_from_slice(&signed.data.unwrap()[..]).unwrap(); log_string(format!("apply_tx called to mint tokens: {:#?}", transfer)); @@ -144,41 +146,44 @@ pub mod main { } = transfer; let target_key = token::balance_key(&token, &target); let mut target_bal: token::Amount = - read(&target_key.to_string()).unwrap_or_default(); + ctx.read(&target_key)?.unwrap_or_default(); target_bal.receive(&amount); - write(&target_key.to_string(), target_bal); + ctx.write(&target_key, target_bal)?; + Ok(()) } } /// A VP that always returns `true`. #[cfg(feature = "vp_always_true")] pub mod main { - use namada_vm_env::vp_prelude::*; + use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + _ctx: &Ctx, _tx_data: Vec, _addr: Address, _keys_changed: BTreeSet, _verifiers: BTreeSet
, - ) -> bool { - true + ) -> VpResult { + accept() } } /// A VP that always returns `false`. #[cfg(feature = "vp_always_false")] pub mod main { - use namada_vm_env::vp_prelude::*; + use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + _ctx: &Ctx, _tx_data: Vec, _addr: Address, _keys_changed: BTreeSet, _verifiers: BTreeSet
, - ) -> bool { - false + ) -> VpResult { + reject() } } @@ -186,19 +191,20 @@ pub mod main { /// of `eval`. #[cfg(feature = "vp_eval")] pub mod main { - use namada_vm_env::vp_prelude::*; + use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + ctx: &Ctx, tx_data: Vec, _addr: Address, _keys_changed: BTreeSet, _verifiers: BTreeSet
, - ) -> bool { + ) -> VpResult { use validity_predicate::EvalVp; let EvalVp { vp_code, input }: EvalVp = EvalVp::try_from_slice(&tx_data[..]).unwrap(); - eval(vp_code, input) + ctx.eval(vp_code, input) } } @@ -206,21 +212,22 @@ pub mod main { // Returns `true`, if the allocation is within memory limits. #[cfg(feature = "vp_memory_limit")] pub mod main { - use namada_vm_env::vp_prelude::*; + use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + _ctx: &Ctx, tx_data: Vec, _addr: Address, _keys_changed: BTreeSet, _verifiers: BTreeSet
, - ) -> bool { + ) -> VpResult { let len = usize::try_from_slice(&tx_data[..]).unwrap(); log_string(format!("allocate len {}", len)); let bytes: Vec = vec![6_u8; len]; // use the variable to prevent it from compiler optimizing it away log_string(format!("{:?}", &bytes[..8])); - true + accept() } } @@ -228,19 +235,20 @@ pub mod main { /// execution). Returns `true`, if the allocation is within memory limits. #[cfg(feature = "vp_read_storage_key")] pub mod main { - use namada_vm_env::vp_prelude::*; + use namada_vp_prelude::*; #[validity_predicate] fn validate_tx( + ctx: &Ctx, tx_data: Vec, _addr: Address, _keys_changed: BTreeSet, _verifiers: BTreeSet
, - ) -> bool { + ) -> VpResult { // Allocates a memory of size given from the `tx_data (usize)` - let key = Key::try_from_slice(&tx_data[..]).unwrap(); + let key = storage::Key::try_from_slice(&tx_data[..]).unwrap(); log_string(format!("key {}", key)); - let _result: Vec = read_pre(key.to_string()).unwrap(); - true + let _result: Vec = ctx.read_pre(&key)?.unwrap(); + accept() } }