diff --git a/.changelog/unreleased/improvements/553-rpc-queries-router.md b/.changelog/unreleased/improvements/553-rpc-queries-router.md new file mode 100644 index 0000000000..877ac77c20 --- /dev/null +++ b/.changelog/unreleased/improvements/553-rpc-queries-router.md @@ -0,0 +1,4 @@ +- Replace the handcrafted RPC paths with a new `router!` macro RPC queries + definition that handles dynamic path matching, type-safe handler function + dispatch and also generates type-safe client methods for the queries. + ([#553](https://github.com/anoma/namada/pull/553)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/569-rpc-sub-shell.md b/.changelog/unreleased/improvements/569-rpc-sub-shell.md new file mode 100644 index 0000000000..96f0a8bd3b --- /dev/null +++ b/.changelog/unreleased/improvements/569-rpc-sub-shell.md @@ -0,0 +1,2 @@ +- Move all shell RPC endpoints under the /shell path. This is a breaking change + to RPC consumers. ([#569](https://github.com/anoma/namada/pull/569)) \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 34ab8ab550..445e824d8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2917,6 +2917,7 @@ dependencies = [ "ark-ec", "ark-serialize", "assert_matches", + "async-trait", "bech32", "borsh", "byte-unit", @@ -2938,6 +2939,7 @@ dependencies = [ "loupe", "namada_proof_of_stake", "parity-wasm", + "paste", "pretty_assertions", "proptest", "prost", @@ -2945,6 +2947,7 @@ dependencies = [ "pwasm-utils", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde 1.0.145", "serde_json", @@ -2955,8 +2958,11 @@ dependencies = [ "tendermint 0.23.6", "tendermint-proto 0.23.5", "tendermint-proto 0.23.6", + "tendermint-rpc 0.23.5", + "tendermint-rpc 0.23.6", "test-log", "thiserror", + "tokio", "tonic-build", "tracing 0.1.37", "tracing-subscriber 0.3.16", @@ -4083,9 +4089,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ "autocfg 1.1.0", "crossbeam-deque", diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 45cdb9aa5e..50c1f5b065 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -65,7 +65,7 @@ abciplus = [ ] [dependencies] -namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "secp256k1-sign-verify"]} +namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "tendermint-rpc", "secp256k1-sign-verify"]} ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" @@ -104,7 +104,7 @@ prost = "0.9.0" prost-types = "0.9.0" rand = {version = "0.8", default-features = false} rand_core = {version = "0.6", default-features = false} -rayon = "=1.5.1" +rayon = "=1.5.3" regex = "1.4.5" reqwest = "0.11.4" rlimit = "0.5.4" diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 6c1e3fb5f3..d3f1303f41 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -23,6 +23,7 @@ use namada::ledger::pos::types::{ use namada::ledger::pos::{ self, is_validator_slashes_key, BondId, Bonds, PosParams, Slash, Unbonds, }; +use namada::ledger::queries::{self, RPC}; use namada::types::address::Address; use namada::types::governance::{ OfflineProposal, OfflineVote, ProposalResult, ProposalVote, TallyResult, @@ -35,63 +36,33 @@ use namada::types::{address, storage, token}; use crate::cli::{self, args, Context}; use crate::client::tendermint_rpc_types::TxResponse; -use crate::facade::tendermint::abci::Code; use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::facade::tendermint_rpc::error::Error as TError; use crate::facade::tendermint_rpc::query::Query; use crate::facade::tendermint_rpc::{ Client, HttpClient, Order, SubscriptionClient, WebSocketClient, }; -use crate::node::ledger::rpc::Path; /// Query the epoch of the last committed block pub async fn query_epoch(args: args::Query) -> Epoch { let client = HttpClient::new(args.ledger_address).unwrap(); - let path = Path::Epoch; - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match Epoch::try_from_slice(&response.value[..]) { - Ok(epoch) => { - println!("Last committed epoch: {}", epoch); - return epoch; - } - - Err(err) => { - eprintln!("Error decoding the epoch value: {}", err) - } - }, - Code::Err(err) => eprintln!( - "Error in the query {} (error code {})", - response.info, err - ), - } - cli::safe_exit(1) + let epoch = unwrap_client_response(RPC.shell().epoch(&client).await); + println!("Last committed epoch: {}", epoch); + epoch } /// Query the raw bytes of given storage key pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { let client = HttpClient::new(args.query.ledger_address).unwrap(); - let path = Path::Value(args.storage_key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => { - println!("{}", HEXLOWER.encode(&response.value)); - } - Code::Err(err) => { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ); - cli::safe_exit(1) - } + let response = unwrap_client_response( + RPC.shell() + .storage_value(&client, None, None, false, &args.storage_key) + .await, + ); + if !response.data.is_empty() { + println!("Found data: 0x{}", HEXLOWER.encode(&response.data)); + } else { + println!("No data found for key {}", args.storage_key); } } @@ -135,11 +106,9 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { let owner = ctx.get(&owner); for (token, _) in tokens { let prefix = token.to_db_key().into(); - let balances = query_storage_prefix::( - client.clone(), - prefix, - ) - .await; + let balances = + query_storage_prefix::(&client, &prefix) + .await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, Some(&owner)); } @@ -149,7 +118,7 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { let token = ctx.get(&token); let prefix = token.to_db_key().into(); let balances = - query_storage_prefix::(client, prefix).await; + query_storage_prefix::(&client, &prefix).await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, None); } @@ -158,8 +127,7 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { for (token, _) in tokens { let key = token::balance_prefix(&token); let balances = - query_storage_prefix::(client.clone(), key) - .await; + query_storage_prefix::(&client, &key).await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, None); } @@ -660,18 +628,14 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { let owner = ctx.get(&owner); // Find owner's bonds to any validator let bonds_prefix = pos::bonds_for_source_prefix(&owner); - let bonds = query_storage_prefix::( - client.clone(), - bonds_prefix, - ) - .await; + let bonds = + query_storage_prefix::(&client, &bonds_prefix) + .await; // Find owner's unbonds to any validator let unbonds_prefix = pos::unbonds_for_source_prefix(&owner); - let unbonds = query_storage_prefix::( - client.clone(), - unbonds_prefix, - ) - .await; + let unbonds = + query_storage_prefix::(&client, &unbonds_prefix) + .await; let mut total: token::Amount = 0.into(); let mut total_active: token::Amount = 0.into(); @@ -780,18 +744,14 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { (None, None) => { // Find all the bonds let bonds_prefix = pos::bonds_prefix(); - let bonds = query_storage_prefix::( - client.clone(), - bonds_prefix, - ) - .await; + let bonds = + query_storage_prefix::(&client, &bonds_prefix) + .await; // Find all the unbonds let unbonds_prefix = pos::unbonds_prefix(); - let unbonds = query_storage_prefix::( - client.clone(), - unbonds_prefix, - ) - .await; + let unbonds = + query_storage_prefix::(&client, &unbonds_prefix) + .await; let mut total: token::Amount = 0.into(); let mut total_active: token::Amount = 0.into(); @@ -1032,11 +992,9 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { None => { // Iterate slashes for all validators let slashes_prefix = pos::slashes_prefix(); - let slashes = query_storage_prefix::( - client.clone(), - slashes_prefix, - ) - .await; + let slashes = + query_storage_prefix::(&client, &slashes_prefix) + .await; match slashes { Some(slashes) => { @@ -1075,12 +1033,12 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { /// Dry run a transaction pub async fn dry_run_tx(ledger_address: &TendermintAddress, tx_bytes: Vec) { let client = HttpClient::new(ledger_address.clone()).unwrap(); - let path = Path::DryRunTx; - let response = client - .abci_query(Some(path.into()), tx_bytes, None, false) - .await - .unwrap(); - println!("{:#?}", response); + let (data, height, prove) = (Some(tx_bytes), None, false); + let result = unwrap_client_response( + RPC.shell().dry_run_tx(&client, data, height, prove).await, + ) + .data; + println!("Dry-run result: {}", result); } /// Get account's public key stored in its storage sub-space @@ -1113,7 +1071,7 @@ pub async fn is_delegator( let client = HttpClient::new(ledger_address).unwrap(); let bonds_prefix = pos::bonds_for_source_prefix(address); let bonds = - query_storage_prefix::(client.clone(), bonds_prefix).await; + query_storage_prefix::(&client, &bonds_prefix).await; bonds.is_some() && bonds.unwrap().count() > 0 } @@ -1123,8 +1081,7 @@ pub async fn is_delegator_at( epoch: Epoch, ) -> bool { let key = pos::bonds_for_source_prefix(address); - let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + let bonds_iter = query_storage_prefix::(client, &key).await; if let Some(mut bonds) = bonds_iter { bonds.any(|(_, bond)| bond.get(epoch).is_some()) } else { @@ -1144,7 +1101,7 @@ pub async fn known_address( Address::Established(_) => { // Established account exists if it has a VP let key = storage::Key::validity_predicate(address); - query_has_storage_key(client, key).await + query_has_storage_key(&client, &key).await } Address::Implicit(_) | Address::Internal(_) => true, } @@ -1293,100 +1250,77 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - let path = Path::Value(key.to_owned()); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match T::try_from_slice(&response.value[..]) { - Ok(value) => return Some(value), - Err(err) => eprintln!("Error decoding the value: {}", err), - }, - Code::Err(err) => { - if err == 1 { - return None; - } else { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) - } - } + // In case `T` is a unit (only thing that encodes to 0 bytes), we have to + // use `storage_has_key` instead of `storage_value`, because `storage_value` + // returns 0 bytes when the key is not found. + let maybe_unit = T::try_from_slice(&[]); + if let Ok(unit) = maybe_unit { + return if unwrap_client_response( + RPC.shell().storage_has_key(client, key).await, + ) { + Some(unit) + } else { + None + }; } - cli::safe_exit(1) + + let response = unwrap_client_response( + RPC.shell() + .storage_value(client, None, None, false, key) + .await, + ); + if response.data.is_empty() { + return None; + } + T::try_from_slice(&response.data[..]) + .map(Some) + .unwrap_or_else(|err| { + eprintln!("Error decoding the value: {}", err); + cli::safe_exit(1) + }) } /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. pub async fn query_storage_prefix( - client: HttpClient, - key: storage::Key, + client: &HttpClient, + key: &storage::Key, ) -> Option> where T: BorshDeserialize, { - let path = Path::Prefix(key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => { - match Vec::::try_from_slice(&response.value[..]) { - Ok(values) => { - let decode = |PrefixValue { key, value }: PrefixValue| { - match T::try_from_slice(&value[..]) { - Err(_) => None, - Ok(value) => Some((key, value)), - } - }; - return Some(values.into_iter().filter_map(decode)); - } - Err(err) => eprintln!("Error decoding the values: {}", err), - } - } - Code::Err(err) => { - if err == 1 { - return None; - } else { + let values = unwrap_client_response( + RPC.shell() + .storage_prefix(client, None, None, false, key) + .await, + ); + let decode = + |PrefixValue { key, value }: PrefixValue| match T::try_from_slice( + &value[..], + ) { + Err(err) => { eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) + "Skipping a value for key {}. Error in decoding: {}", + key, err + ); + None } - } + Ok(value) => Some((key, value)), + }; + if values.data.is_empty() { + None + } else { + Some(values.data.into_iter().filter_map(decode)) } - cli::safe_exit(1) } /// Query to check if the given storage key exists. pub async fn query_has_storage_key( - client: HttpClient, - key: storage::Key, + client: &HttpClient, + key: &storage::Key, ) -> bool { - let path = Path::HasKey(key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match bool::try_from_slice(&response.value[..]) { - Ok(value) => return value, - Err(err) => eprintln!("Error decoding the value: {}", err), - }, - Code::Err(err) => { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) - } - } - cli::safe_exit(1) + unwrap_client_response(RPC.shell().storage_has_key(client, key).await) } /// Represents a query for an event pertaining to the specified transaction @@ -1556,8 +1490,7 @@ pub async fn get_proposal_votes( let vote_prefix_key = gov_storage::get_proposal_vote_prefix_key(proposal_id); let vote_iter = - query_storage_prefix::(client.clone(), vote_prefix_key) - .await; + query_storage_prefix::(client, &vote_prefix_key).await; let mut yay_validators: HashMap = HashMap::new(); let mut yay_delegators: HashMap> = @@ -1662,7 +1595,7 @@ pub async fn get_proposal_offline_votes( { let key = pos::bonds_for_source_prefix(&proposal_vote.address); let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + query_storage_prefix::(client, &key).await; if let Some(bonds) = bonds_iter { for (key, epoched_bonds) in bonds { // Look-up slashes for the validator in this key and @@ -1907,8 +1840,7 @@ pub async fn get_delegators_delegation( _epoch: Epoch, ) -> Vec
{ let key = pos::bonds_for_source_prefix(address); - let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + let bonds_iter = query_storage_prefix::(client, &key).await; let mut delegation_addresses: Vec
= Vec::new(); if let Some(bonds) = bonds_iter { @@ -1970,3 +1902,11 @@ fn lookup_alias(ctx: &Context, addr: &Address) -> String { None => format!("{}", addr), } } + +/// A helper to unwrap client's response. Will shut down process on error. +fn unwrap_client_response(response: Result) -> T { + response.unwrap_or_else(|err| { + eprintln!("Error in the query {}", err); + cli::safe_exit(1) + }) +} diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index dc0dfc184c..9796c18fce 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -1,8 +1,6 @@ mod abortable; mod broadcaster; pub mod events; -pub mod protocol; -pub mod rpc; mod shell; mod shims; pub mod storage; diff --git a/apps/src/lib/node/ledger/rpc.rs b/apps/src/lib/node/ledger/rpc.rs deleted file mode 100644 index ad3d2f5fcb..0000000000 --- a/apps/src/lib/node/ledger/rpc.rs +++ /dev/null @@ -1,104 +0,0 @@ -//! RPC endpoint is used for ledger state queries - -use std::fmt::Display; -use std::str::FromStr; - -use namada::types::address::Address; -use namada::types::storage; -use thiserror::Error; - -use crate::facade::tendermint::abci::Path as AbciPath; - -/// RPC query path -#[derive(Debug, Clone)] -pub enum Path { - /// Dry run a transaction - DryRunTx, - /// Epoch of the last committed block - Epoch, - /// Read a storage value with exact storage key - Value(storage::Key), - /// Read a range of storage values with a matching key prefix - Prefix(storage::Key), - /// Check if the given storage key exists - HasKey(storage::Key), -} - -#[derive(Debug, Clone)] -pub struct BalanceQuery { - #[allow(dead_code)] - owner: Option
, - #[allow(dead_code)] - token: Option
, -} - -const DRY_RUN_TX_PATH: &str = "dry_run_tx"; -const EPOCH_PATH: &str = "epoch"; -const VALUE_PREFIX: &str = "value"; -const PREFIX_PREFIX: &str = "prefix"; -const HAS_KEY_PREFIX: &str = "has_key"; - -impl Display for Path { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Path::DryRunTx => write!(f, "{}", DRY_RUN_TX_PATH), - Path::Epoch => write!(f, "{}", EPOCH_PATH), - Path::Value(storage_key) => { - write!(f, "{}/{}", VALUE_PREFIX, storage_key) - } - Path::Prefix(storage_key) => { - write!(f, "{}/{}", PREFIX_PREFIX, storage_key) - } - Path::HasKey(storage_key) => { - write!(f, "{}/{}", HAS_KEY_PREFIX, storage_key) - } - } - } -} - -impl FromStr for Path { - type Err = PathParseError; - - fn from_str(s: &str) -> Result { - match s { - DRY_RUN_TX_PATH => Ok(Self::DryRunTx), - EPOCH_PATH => Ok(Self::Epoch), - _ => match s.split_once('/') { - Some((VALUE_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Value(key)) - } - Some((PREFIX_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Prefix(key)) - } - Some((HAS_KEY_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::HasKey(key)) - } - _ => Err(PathParseError::InvalidPath(s.to_string())), - }, - } - } -} - -impl From for AbciPath { - fn from(path: Path) -> Self { - let path = path.to_string(); - // TODO: update in tendermint-rs to allow to construct this from owned - // string. It's what `from_str` does anyway - AbciPath::from_str(&path).unwrap() - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum PathParseError { - #[error("Unrecognized query path: {0}")] - InvalidPath(String), - #[error("Invalid storage key: {0}")] - InvalidStorageKey(storage::Error), -} diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 2080b8d23d..92e06c96b9 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1,5 +1,6 @@ //! Implementation of the `FinalizeBlock` ABCI++ method for the Shell +use namada::ledger::protocol; use namada::types::storage::{BlockHash, Header}; use super::governance::execute_governance_proposals; diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 979c3c0df1..5ddcaf1673 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -3,6 +3,7 @@ use namada::ledger::governance::utils::{ compute_tally, get_proposal_votes, ProposalEvent, }; use namada::ledger::governance::vp::ADDRESS as gov_address; +use namada::ledger::protocol; use namada::ledger::slash_fund::ADDRESS as slash_fund_address; use namada::ledger::storage::types::encode; use namada::ledger::storage::{DBIter, StorageHasher, DB}; diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 28167d4eba..52026f40d1 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -18,7 +18,6 @@ use std::mem; use std::path::{Path, PathBuf}; #[allow(unused_imports)] use std::rc::Rc; -use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use namada::ledger::gas::BlockGasMeter; @@ -30,7 +29,7 @@ use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, DB, }; -use namada::ledger::{ibc, pos}; +use namada::ledger::{ibc, pos, protocol}; use namada::proto::{self, Tx}; use namada::types::chain::ChainId; use namada::types::key::*; @@ -48,7 +47,6 @@ use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; use tokio::sync::mpsc::UnboundedSender; -use super::rpc; use crate::config::{genesis, TendermintMode}; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::response_verify_vote_extension::VerifyStatus; @@ -60,7 +58,7 @@ use crate::facade::tower_abci::{request, response}; use crate::node::ledger::events::Event; use crate::node::ledger::shims::abcipp_shim_types::shim; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; -use crate::node::ledger::{protocol, storage, tendermint_node}; +use crate::node::ledger::{storage, tendermint_node}; #[allow(unused_imports)] use crate::wallet::ValidatorData; use crate::{config, wallet}; @@ -559,43 +557,6 @@ where response } - /// Simulate validation and application of a transaction. - fn dry_run_tx(&self, tx_bytes: &[u8]) -> response::Query { - let mut response = response::Query::default(); - let mut gas_meter = BlockGasMeter::default(); - let mut write_log = WriteLog::default(); - let mut vp_wasm_cache = self.vp_wasm_cache.read_only(); - let mut tx_wasm_cache = self.tx_wasm_cache.read_only(); - match Tx::try_from(tx_bytes) { - Ok(tx) => { - let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); - match protocol::apply_tx( - tx, - tx_bytes.len(), - &mut gas_meter, - &mut write_log, - &self.storage, - &mut vp_wasm_cache, - &mut tx_wasm_cache, - ) - .map_err(Error::TxApply) - { - Ok(result) => response.info = result.to_string(), - Err(error) => { - response.code = 1; - response.log = format!("{}", error); - } - } - response - } - Err(err) => { - response.code = 1; - response.log = format!("{}", Error::TxDecoding(err)); - response - } - } - } - /// Lookup a validator's keypair for their established account from their /// wallet. If the node is not validator, this function returns None #[allow(dead_code)] diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 5572e00495..5fa3e92763 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -150,9 +150,8 @@ where } } else { // check that the fee payer has sufficient balance - let balance = self - .get_balance(&tx.fee.token, &tx.fee_payer()) - .unwrap_or_default(); + let balance = + self.get_balance(&tx.fee.token, &tx.fee_payer()); if tx.fee.amount <= balance { TxResult { diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index 53587d5ebf..e53ea91417 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -2,14 +2,13 @@ use borsh::{BorshDeserialize, BorshSerialize}; use ferveo_common::TendermintValidator; +use namada::ledger::queries::{RequestCtx, ResponseQuery}; +use namada::ledger::storage_api; use namada::types::address::Address; -use namada::types::key; use namada::types::key::dkg_session_keys::DkgPublicKey; -use namada::types::storage::{Key, PrefixValue}; -use namada::types::token::{self, Amount}; +use namada::types::{key, token}; use super::*; -use crate::facade::tendermint_proto::crypto::{ProofOp, ProofOps}; use crate::node::ledger::response; impl Shell @@ -22,39 +21,39 @@ where /// the default if `path` is not a supported string. /// INVARIANT: This method must be stateless. pub fn query(&self, query: request::Query) -> response::Query { - use rpc::Path; - let height = match query.height { - 0 => self.storage.get_block_height().0, - 1.. => BlockHeight(query.height as u64), - _ => { + let ctx = RequestCtx { + storage: &self.storage, + vp_wasm_cache: self.vp_wasm_cache.read_only(), + tx_wasm_cache: self.tx_wasm_cache.read_only(), + }; + + // Convert request to domain-type + let request = match namada::ledger::queries::RequestQuery::try_from_tm( + &self.storage, + query, + ) { + Ok(request) => request, + Err(err) => { return response::Query { code: 1, - info: format!( - "The query height is invalid: {}", - query.height - ), + info: format!("Unexpected query: {}", err), ..Default::default() }; } }; - match Path::from_str(&query.path) { - Ok(path) => match path { - Path::DryRunTx => self.dry_run_tx(&query.data), - Path::Epoch => { - let (epoch, _gas) = self.storage.get_last_epoch(); - let value = namada::ledger::storage::types::encode(&epoch); - response::Query { - value, - ..Default::default() - } - } - Path::Value(storage_key) => { - self.read_storage_value(&storage_key, height, query.prove) - } - Path::Prefix(storage_key) => { - self.read_storage_prefix(&storage_key, height, query.prove) - } - Path::HasKey(storage_key) => self.has_storage_key(&storage_key), + + // Invoke the root RPC handler - returns borsh-encoded data on success + let result = namada::ledger::queries::handle_path(ctx, &request); + match result { + Ok(ResponseQuery { + data, + info, + proof_ops, + }) => response::Query { + value: data, + info, + proof_ops, + ..Default::default() }, Err(err) => response::Query { code: 1, @@ -70,205 +69,16 @@ where &self, token: &Address, owner: &Address, - ) -> std::result::Result { - let height = self.storage.get_block_height().0; - let query_resp = self.read_storage_value( + ) -> token::Amount { + let balance = storage_api::StorageRead::read( + &self.storage, &token::balance_key(token, owner), - height, - false, ); - if query_resp.code != 0 { - Err(format!( - "Unable to read token {} balance of the given address {}", - token, owner - )) - } else { - BorshDeserialize::try_from_slice(&query_resp.value[..]).map_err( - |_| { - "Unable to deserialize the balance of the given address" - .into() - }, - ) - } - } - - /// Query to read a value from storage - pub fn read_storage_value( - &self, - key: &Key, - height: BlockHeight, - is_proven: bool, - ) -> response::Query { - match self.storage.read_with_height(key, height) { - Ok((Some(value), _gas)) => { - let proof_ops = if is_proven { - match self.storage.get_existence_proof( - key, - value.clone().into(), - height, - ) { - Ok(proof) => Some(proof.into()), - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } else { - None - }; - response::Query { - value, - proof_ops, - ..Default::default() - } - } - Ok((None, _gas)) => { - let proof_ops = if is_proven { - match self.storage.get_non_existence_proof(key, height) { - Ok(proof) => Some(proof.into()), - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } else { - None - }; - response::Query { - code: 1, - info: format!("No value found for key: {}", key), - proof_ops, - ..Default::default() - } - } - Err(err) => response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }, - } - } - - /// Query to read a range of values from storage with a matching prefix. The - /// value in successful response is a [`Vec`] encoded with - /// [`BorshSerialize`]. - pub fn read_storage_prefix( - &self, - key: &Key, - height: BlockHeight, - is_proven: bool, - ) -> response::Query { - if height != self.storage.get_block_height().0 { - return response::Query { - code: 2, - info: format!( - "Prefix read works with only the latest height: height {}", - height - ), - ..Default::default() - }; - } - let (iter, _gas) = self.storage.iter_prefix(key); - let mut iter = iter.peekable(); - if iter.peek().is_none() { - response::Query { - code: 1, - info: format!("No value found for key: {}", key), - ..Default::default() - } - } else { - let values: std::result::Result< - Vec, - namada::types::storage::Error, - > = iter - .map(|(key, value, _gas)| { - let key = Key::parse(key)?; - Ok(PrefixValue { key, value }) - }) - .collect(); - match values { - Ok(values) => { - let proof_ops = if is_proven { - let mut ops = vec![]; - for PrefixValue { key, value } in &values { - match self.storage.get_existence_proof( - key, - value.clone().into(), - height, - ) { - Ok(p) => { - let mut cur_ops: Vec = p - .ops - .into_iter() - .map(|op| { - #[cfg(feature = "abcipp")] - { - ProofOp { - r#type: op.field_type, - key: op.key, - data: op.data, - } - } - #[cfg(not(feature = "abcipp"))] - { - op.into() - } - }) - .collect(); - ops.append(&mut cur_ops); - } - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } - // ops is not empty in this case - Some(ProofOps { ops }) - } else { - None - }; - let value = values.try_to_vec().unwrap(); - response::Query { - value, - proof_ops, - ..Default::default() - } - } - Err(err) => response::Query { - code: 1, - info: format!( - "Error parsing a storage key {}: {}", - key, err - ), - ..Default::default() - }, - } - } - } - - /// Query to check if a storage key exists. - fn has_storage_key(&self, key: &Key) -> response::Query { - match self.storage.has_key(key) { - Ok((has_key, _gas)) => response::Query { - value: has_key.try_to_vec().unwrap(), - ..Default::default() - }, - Err(err) => response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }, - } + // Storage read must not fail, but there might be no value, in which + // case default (0) is returned + balance + .expect("Storage read in the protocol must not fail") + .unwrap_or_default() } /// Lookup data about a validator from their protocol signing key diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 6469694ea9..d05e002230 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -27,6 +27,7 @@ ibc-mocks-abcipp = [ ] # for integration tests and test utilies testing = [ + "async-client", "proptest", "rand", "rand_core", @@ -37,6 +38,7 @@ wasm-runtime = [ "loupe", "parity-wasm", "pwasm-utils", + "rayon", "wasmer-cache", "wasmer-compiler-singlepass", "wasmer-engine-dylib", @@ -49,6 +51,15 @@ wasm-runtime = [ secp256k1-sign-verify = [ "libsecp256k1/hmac", ] +# Enable queries support for an async client +async-client = [ + "async-trait", +] +# tendermint-rpc support +tendermint-rpc = [ + "async-client", + "dep:tendermint-rpc", +] abcipp = [ "ibc-proto-abcipp", @@ -72,6 +83,7 @@ ark-serialize = "0.3" # We switch off "blake2b" because it cannot be compiled to wasm # branch = "bat/arse-merkle-tree" arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", default-features = false, features = ["std", "borsh"]} +async-trait = {version = "0.1.51", optional = true} bech32 = "0.8.0" borsh = "0.9.0" chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} @@ -93,6 +105,7 @@ itertools = "0.10.0" loupe = {version = "0.1.3", optional = true} libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} parity-wasm = {version = "0.42.2", optional = true} +paste = "1.0.9" # A fork with state machine testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} prost = "0.9.0" @@ -101,6 +114,7 @@ pwasm-utils = {version = "0.18.0", optional = true} rand = {version = "0.8", optional = true} # TODO proptest rexports the RngCore trait but the re-implementations only work for version `0.8`. *sigh* rand_core = {version = "0.6", optional = true} +rayon = {version = "=1.5.3", optional = true} rust_decimal = "1.14.3" serde = {version = "1.0.125", features = ["derive"]} serde_json = "1.0.62" @@ -109,8 +123,10 @@ sha2 = "0.9.3" tempfile = {version = "3.2.0", optional = true} # temporarily using fork work-around for https://github.com/informalsystems/tendermint-rs/issues/971 tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", features = ["http-client"], optional = true} tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} tendermint = {version = "0.23.6", optional = true} +tendermint-rpc = {version = "0.23.6", features = ["http-client"], optional = true} tendermint-proto = {version = "0.23.6", optional = true} thiserror = "1.0.30" tracing = "0.1.30" @@ -125,12 +141,14 @@ zeroize = "1.5.5" [dev-dependencies] assert_matches = "1.5.0" +async-trait = {version = "0.1.51"} byte-unit = "4.0.13" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} pretty_assertions = "0.7.2" # A fork with state machine testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} test-log = {version = "0.2.7", default-features = false, features = ["trace"]} +tokio = {version = "1.8.2", default-features = false, features = ["rt", "macros"]} tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} [build-dependencies] diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index ef92b1e2d9..cbe2528b76 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -7,6 +7,9 @@ pub mod ibc; pub mod native_vp; pub mod parameters; pub mod pos; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +pub mod protocol; +pub mod queries; pub mod slash_fund; pub mod storage; pub mod storage_api; diff --git a/apps/src/lib/node/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs similarity index 92% rename from apps/src/lib/node/ledger/protocol/mod.rs rename to shared/src/ledger/protocol/mod.rs index ed776fe21a..86b2a30290 100644 --- a/apps/src/lib/node/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -2,29 +2,31 @@ use std::collections::BTreeSet; use std::panic; -use namada::ledger::eth_bridge::vp::EthBridge; -use namada::ledger::gas::{self, BlockGasMeter, VpGasMeter}; -use namada::ledger::governance::GovernanceVp; -use namada::ledger::ibc::vp::{Ibc, IbcToken}; -use namada::ledger::native_vp::{self, NativeVp}; -use namada::ledger::parameters::{self, ParametersVp}; -use namada::ledger::pos::{self, PosVP}; -use namada::ledger::slash_fund::SlashFundVp; -use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{DBIter, Storage, StorageHasher, DB}; -use namada::proto::{self, Tx}; -use namada::types::address::{Address, InternalAddress}; -use namada::types::storage; -use namada::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; -use namada::vm::wasm::{TxCache, VpCache}; -use namada::vm::{self, wasm, WasmCacheAccess}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use thiserror::Error; +use crate::ledger::eth_bridge::vp::EthBridge; +use crate::ledger::gas::{self, BlockGasMeter, VpGasMeter}; +use crate::ledger::governance::GovernanceVp; +use crate::ledger::ibc::vp::{Ibc, IbcToken}; +use crate::ledger::native_vp::{self, NativeVp}; +use crate::ledger::parameters::{self, ParametersVp}; +use crate::ledger::pos::{self, PosVP}; +use crate::ledger::slash_fund::SlashFundVp; +use crate::ledger::storage::write_log::WriteLog; +use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use crate::proto::{self, Tx}; +use crate::types::address::{Address, InternalAddress}; +use crate::types::storage; +use crate::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; +use crate::vm::wasm::{TxCache, VpCache}; +use crate::vm::{self, wasm, WasmCacheAccess}; + +#[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { #[error("Storage error: {0}")] - StorageError(namada::ledger::storage::Error), + StorageError(crate::ledger::storage::Error), #[error("Error decoding a transaction from bytes: {0}")] TxDecodingError(proto::Error), #[error("Transaction runner error: {0}")] @@ -38,7 +40,7 @@ pub enum Error { #[error("The address {0} doesn't exist")] MissingAddress(Address), #[error("IBC native VP: {0}")] - IbcNativeVpError(namada::ledger::ibc::vp::Error), + IbcNativeVpError(crate::ledger::ibc::vp::Error), #[error("PoS native VP: {0}")] PosNativeVpError(pos::vp::Error), #[error("PoS native VP panicked")] @@ -46,17 +48,18 @@ pub enum Error { #[error("Parameters native VP: {0}")] ParametersNativeVpError(parameters::Error), #[error("IBC Token native VP: {0}")] - IbcTokenNativeVpError(namada::ledger::ibc::vp::IbcTokenError), + IbcTokenNativeVpError(crate::ledger::ibc::vp::IbcTokenError), #[error("Governance native VP error: {0}")] - GovernanceNativeVpError(namada::ledger::governance::vp::Error), + GovernanceNativeVpError(crate::ledger::governance::vp::Error), #[error("SlashFund native VP error: {0}")] - SlashFundNativeVpError(namada::ledger::slash_fund::Error), + SlashFundNativeVpError(crate::ledger::slash_fund::Error), #[error("Ethereum bridge native VP error: {0}")] - EthBridgeNativeVpError(namada::ledger::eth_bridge::vp::Error), + EthBridgeNativeVpError(crate::ledger::eth_bridge::vp::Error), #[error("Access to an internal address {0} is forbidden")] AccessForbidden(InternalAddress), } +/// Result of applying a transaction pub type Result = std::result::Result; /// Apply a given transaction diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs new file mode 100644 index 0000000000..8b31376be4 --- /dev/null +++ b/shared/src/ledger/queries/mod.rs @@ -0,0 +1,231 @@ +//! Ledger read-only queries can be handled and dispatched via the [`RPC`] +//! defined via `router!` macro. + +use shell::{Shell, SHELL}; +#[cfg(any(test, feature = "async-client"))] +pub use types::Client; +pub use types::{ + EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, Router, +}; + +use super::storage::{DBIter, StorageHasher, DB}; +use super::storage_api; + +#[macro_use] +mod router; +mod shell; +mod types; + +// Most commonly expected patterns should be declared first +router! {RPC, + // Shell provides storage read access, block metadata and can dry-run a tx + ( "shell" ) = (sub SHELL), +} + +/// Handle RPC query request in the ledger. On success, returns response with +/// borsh-encoded data. +pub fn handle_path( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + RPC.handle(ctx, request) +} + +// Handler helpers: + +/// For queries that only support latest height, check that the given height is +/// not different from latest height, otherwise return an error. +pub fn require_latest_height( + ctx: &RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result<()> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if request.height != ctx.storage.last_height { + return Err(storage_api::Error::new_const( + "This query doesn't support arbitrary block heights, only the \ + latest committed block height ('0' can be used as a special \ + value that means the latest block height)", + )); + } + Ok(()) +} + +/// For queries that do not support proofs, check that proof is not requested, +/// otherwise return an error. +pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { + if request.prove { + return Err(storage_api::Error::new_const( + "This query doesn't support proofs", + )); + } + Ok(()) +} + +/// For queries that don't use request data, require that there are no data +/// attached. +pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { + if !request.data.is_empty() { + return Err(storage_api::Error::new_const( + "This query doesn't accept request data", + )); + } + Ok(()) +} + +#[cfg(any(test, feature = "tendermint-rpc"))] +/// Provides [`Client`] implementation for Tendermint RPC client +pub mod tm { + use thiserror::Error; + + use super::*; + use crate::types::storage::BlockHeight; + + #[allow(missing_docs)] + #[derive(Error, Debug)] + pub enum Error { + #[error("{0}")] + Tendermint(#[from] tendermint_rpc::Error), + #[error("Decoding error: {0}")] + Decoding(#[from] std::io::Error), + #[error("Info log: {0}, error code: {1}")] + Query(String, u32), + #[error("Invalid block height: {0} (overflown i64)")] + InvalidHeight(BlockHeight), + } + + #[async_trait::async_trait] + impl Client for tendermint_rpc::HttpClient { + type Error = Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height + .map(|height| { + tendermint::block::Height::try_from(height.0) + .map_err(|_err| Error::InvalidHeight(height)) + }) + .transpose()?; + let response = tendermint_rpc::Client::abci_query( + self, + // TODO open the private Path constructor in tendermint-rpc + Some(std::str::FromStr::from_str(&path).unwrap()), + data, + height, + prove, + ) + .await?; + match response.code { + tendermint::abci::Code::Ok => Ok(EncodedResponseQuery { + data: response.value, + info: response.info, + proof_ops: response.proof.map(Into::into), + }), + tendermint::abci::Code::Err(code) => { + Err(Error::Query(response.info, code)) + } + } + } + } +} + +/// Queries testing helpers +#[cfg(any(test, feature = "testing"))] +mod testing { + use tempfile::TempDir; + + use super::*; + use crate::ledger::storage::testing::TestStorage; + use crate::types::storage::BlockHeight; + use crate::vm::wasm::{self, TxCache, VpCache}; + use crate::vm::WasmCacheRoAccess; + + /// A test client that has direct access to the storage + pub struct TestClient + where + RPC: Router, + { + /// RPC router + pub rpc: RPC, + /// storage + pub storage: TestStorage, + /// VP wasm compilation cache + pub vp_wasm_cache: VpCache, + /// tx wasm compilation cache + pub tx_wasm_cache: TxCache, + /// VP wasm compilation cache directory + pub vp_cache_dir: TempDir, + /// tx wasm compilation cache directory + pub tx_cache_dir: TempDir, + } + + impl TestClient + where + RPC: Router, + { + #[allow(dead_code)] + /// Initialize a test client for the given root RPC router + pub fn new(rpc: RPC) -> Self { + // Initialize the `TestClient` + let storage = TestStorage::default(); + let (vp_wasm_cache, vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + let (tx_wasm_cache, tx_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + Self { + rpc, + storage, + vp_wasm_cache: vp_wasm_cache.read_only(), + tx_wasm_cache: tx_wasm_cache.read_only(), + vp_cache_dir, + tx_cache_dir, + } + } + } + + #[async_trait::async_trait] + impl Client for TestClient + where + RPC: Router + Sync, + { + type Error = std::io::Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height.unwrap_or_default(); + // Handle a path by invoking the `RPC.handle` directly with the + // borrowed storage + let request = RequestQuery { + data, + path, + height, + prove, + }; + let ctx = RequestCtx { + storage: &self.storage, + vp_wasm_cache: self.vp_wasm_cache.clone(), + tx_wasm_cache: self.tx_wasm_cache.clone(), + }; + let response = self.rpc.handle(ctx, &request).unwrap(); + Ok(response) + } + } +} diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs new file mode 100644 index 0000000000..e4823e5ad7 --- /dev/null +++ b/shared/src/ledger/queries/router.rs @@ -0,0 +1,1090 @@ +//! The main export of this module is the `router!` macro, which can be used to +//! define compile time tree patterns for a router in which the terminal leaves +//! are connected to the given handler functions. +//! +//! Note that for debugging pattern matching issue, you can uncomment +//! all the `println!`s in this module. + +use thiserror::Error; + +/// Router error. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("Found no matching pattern for the given path {0}")] + WrongPath(String), +} + +/// Find the index of a next forward slash after the given `start` index in the +/// path. When there are no more slashes, returns the index after the end of the +/// path. +/// +/// # Panics +/// The given `start` must be < `path.len()`. +pub fn find_next_slash_index(path: &str, start: usize) -> usize { + path[start..] + .find('/') + // Offset by the starting position + .map(|i| start + i) + // If not found, go to the end of path + .unwrap_or(path.len()) +} + +/// Invoke the sub-handler or call the handler function with the matched +/// arguments generated by `try_match_segments`. +macro_rules! handle_match { + // Nested router + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (sub $router:tt), ( $( $matched_args:ident, )* ), + ) => { + // not used anymore - silence the warning + let _ = $end; + // Undo last '/' advance, the next pattern has to start with `/`. + // This cannot underflow because path cannot be empty and must start + // with `/` + $start -= 1; + // Invoke `handle` on the sub router + return $router.internal_handle($ctx, $request, $start) + }; + + // Handler function that uses a request (`with_options`) + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (with_options $handle:tt), ( $( $matched_args:ident, )* ), + ) => { + // check that we're at the end of the path - trailing slash is optional + if !($end == $request.path.len() || + // ignore trailing slashes + $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { + // we're not at the end, no match + println!("Not fully matched"); + break + } + let result = $handle($ctx, $request, $( $matched_args ),* )?; + // The handle must take care of encoding if needed and return `Vec`. + // This is because for `storage_value` the bytes are returned verbatim + // as read from storage. + return Ok(result); + }; + + // Handler function that doesn't use the request, just the path args, if any + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + $handle:tt, ( $( $matched_args:ident, )* ), + ) => { + // check that we're at the end of the path - trailing slash is optional + if !($end == $request.path.len() || + // ignore trailing slashes + $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { + // we're not at the end, no match + // println!("Not fully matched"); + break + } + // Check that the request is not sent with unsupported non-default + $crate::ledger::queries::require_latest_height(&$ctx, $request)?; + $crate::ledger::queries::require_no_proof($request)?; + $crate::ledger::queries::require_no_data($request)?; + + // If you get a compile error from here with `expected function, found + // queries::Storage`, you're probably missing the marker `(sub _)` + let data = $handle($ctx, $( $matched_args ),* )?; + // Encode the returned data with borsh + let data = borsh::BorshSerialize::try_to_vec(&data).into_storage_result()?; + return Ok($crate::ledger::queries::EncodedResponseQuery { + data, + info: Default::default(), + proof_ops: None, + }); + }; +} + +/// Using TT muncher pattern on the `$tail` pattern, this macro recursively +/// generates path matching logic that `break`s if some parts are unmatched. +macro_rules! try_match_segments { + // sub-pattern handle - this should only be invoked if the current + // $pattern is already matched + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + { $( $sub_pattern:tt $( -> $_sub_return_ty:path )? = $handle:tt, )* }, + $matched_args:tt, + () + ) => { + // Try to match each sub-patten + $( + // This loop never repeats, it's only used for a breaking + // mechanism when a $pattern is not matched to skip to the + // next one, if any + loop { + #[allow(unused_mut)] + let mut $start = $start; + let mut $end = $end; + // Try to match, parse args and invoke $handle, will + // break the `loop` not matched + try_match_segments!($ctx, $request, $start, $end, + $handle, $matched_args, $sub_pattern + ); + } + )* + }; + + // Terminal tail call, invoked after when all the args in the current + // pattern are matched and the $handle is not sub-pattern + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + () + ) => { + handle_match!($ctx, $request, $start, $end, $handle, ( $( $matched_args, )* ), ); + }; + + // Try to match an untyped argument, declares the expected $arg as &str + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:ident, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg = &$request.path[$start..$end]; + // Advanced index past the matched arg + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Try to match and parse a typed argument like the case below, but with + // the argument optional. + // Declares the expected $arg into type $t, if it can be parsed. + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : opt $arg_ty:ty] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg: Option<$arg_ty> = match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + // Only advance if optional argument is present, otherwise stay + // in the same position for the next match, if any. + + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + + Some(parsed) + }, + Err(_) => + { + // If arg cannot be parsed, ignore it because it's optional + None + } + }; + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Special case of the typed argument pattern below. When there are no more + // args in the tail and the handle isn't a sub-router (its handler is + // ident), we try to match the rest of the path till the end. + // + // This is specifically needed for storage methods, which have + // `storage::Key` param that includes path-like slashes. + // + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + $handle:ident, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + ) + ) => { + let $arg: $arg_ty; + $end = $request.path.len(); + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + // println!("Parsed {}", parsed); + $arg = parsed + }, + Err(_) => + { + // println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + // Invoke the terminal pattern + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), () ); + }; + + // One more special case of the typed argument pattern below for a handler + // `with_options`, where we try to match the rest of the path till the end. + // + // This is specifically needed for storage methods, which have + // `storage::Key` param that includes path-like slashes. + // + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (with_options $handle:ident), + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + ) + ) => { + let $arg: $arg_ty; + $end = $request.path.len(); + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + println!("Parsed {}", parsed); + $arg = parsed + }, + Err(_) => + { + println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + // Invoke the terminal pattern + try_match_segments!($ctx, $request, $start, $end, (with_options $handle), + ( $( $matched_args, )* $arg, ), () ); + }; + + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg: $arg_ty; + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + $arg = parsed + }, + Err(_) => + { + // println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Try to match an expected string literal + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + $expected:literal + $( / $( $tail:tt)/ * )? + ) + ) => { + if &$request.path[$start..$end] == $expected { + // Advanced index past the matched arg + // println!("Matched literal {}", $expected); + $start = $end; + } else { + // println!("{} doesn't match literal {}", &$request.path[$start..$end], $expected); + // Try to skip to next pattern + break; + } + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* ), ( $( $( $tail )/ * )? ) ); + }; +} + +/// Generate a function that tries to match the given pattern and `break`s if +/// any of its parts are unmatched. This layer will check that the path starts +/// with `/` and then invoke `try_match_segments` TT muncher that goes through +/// the patterns. +macro_rules! try_match { + ($ctx:ident, $request:ident, $start:ident, $handle:tt, $segments:tt) => { + // check that the initial char is '/' + if $request.path.is_empty() || &$request.path[..1] != "/" { + // println!("Missing initial slash"); + break; + } + // advance past initial '/' + $start += 1; + // Path is too short to match + if $start >= $request.path.len() { + // println!("Path is too short"); + break; + } + let mut end = find_next_slash_index(&$request.path, $start); + try_match_segments!( + $ctx, + $request, + $start, + end, + $handle, + (), + $segments + ); + }; +} + +/// Convert literal pattern into a `&[&'static str]` +// TODO sub router pattern is not yet used +#[allow(unused_macros)] +macro_rules! pattern_to_prefix { + ( ( $( $pattern:literal )/ * ) ) => { + &[$( $pattern ),*] + }; + ( $pattern:tt ) => { + compile_error!("sub-router cannot have non-literal prefix patterns") + }; +} + +/// Turn patterns and their handlers into methods for the router, where each +/// dynamic pattern is turned into a parameter for the method. +macro_rules! pattern_and_handler_to_method { + // Special terminal rule for `storage_value` handle from + // `shared/src/ledger/queries/shell.rs` that returns `Vec` which should + // not be decoded from response.data, but instead return as is + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + (with_options storage_value), + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `storage_value`."] + pub fn storage_value_path(&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request value with optional data (used for e.g. \ + `dry_run_tx`), optionally specified height (supported for \ + `storage_value`) and optional proof (supported for \ + `storage_value` and `storage_prefix`) from `storage_value`."] + pub async fn storage_value(&self, client: &CLIENT, + data: Option>, + height: Option<$crate::types::storage::BlockHeight>, + prove: bool, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $crate::ledger::queries::ResponseQuery>, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + println!("IMMA VEC!!!!!!"); + let path = self.storage_value_path( $( $param ),* ); + + let $crate::ledger::queries::ResponseQuery { + data, info, proof_ops + } = client.request(path, data, height, prove).await?; + + Ok($crate::ledger::queries::ResponseQuery { + data, + info, + proof_ops, + }) + } + } + }; + + // terminal rule for $handle that uses request (`with_options`) + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + (with_options $handle:tt), + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `" $handle "`."] + pub fn [<$handle _path>](&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request value with optional data (used for e.g. \ + `dry_run_tx`), optionally specified height (supported for \ + `storage_value`) and optional proof (supported for \ + `storage_value` and `storage_prefix`) from `" $handle "`."] + pub async fn $handle(&self, client: &CLIENT, + data: Option>, + height: Option<$crate::types::storage::BlockHeight>, + prove: bool, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $crate::ledger::queries::ResponseQuery<$return_type>, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + println!("IMMA not a VEC!!!!!!"); + let path = self.[<$handle _path>]( $( $param ),* ); + + let $crate::ledger::queries::ResponseQuery { + data, info, proof_ops + } = client.request(path, data, height, prove).await?; + + let decoded: $return_type = + borsh::BorshDeserialize::try_from_slice(&data[..])?; + + Ok($crate::ledger::queries::ResponseQuery { + data: decoded, + info, + proof_ops, + }) + } + } + }; + + // terminal rule that $handle that doesn't use request + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + $handle:tt, + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `" $handle "`."] + pub fn [<$handle _path>](&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request a simple borsh-encoded value from `" $handle "`, \ + without any additional request data, specified block height or \ + proof."] + pub async fn $handle(&self, client: &CLIENT, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $return_type, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + let path = self.[<$handle _path>]( $( $param ),* ); + + let data = client.simple_request(path).await?; + + let decoded: $return_type = + borsh::BorshDeserialize::try_from_slice(&data[..])?; + Ok(decoded) + } + } + }; + + // sub-pattern + ( + $param:tt + $prefix:tt + $( $_return_type:path )?, + { $( $sub_pattern:tt $( -> $sub_return_ty:path )? = $handle:tt, )* }, + $pattern:tt + ) => { + $( + // join pattern with each sub-pattern + pattern_and_handler_to_method!( + $param + $prefix + $( $sub_return_ty )?, $handle, $pattern, $sub_pattern + ); + )* + }; + + // literal string arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( $pattern:literal $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty ),* ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($pattern)) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // untyped arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: str ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($name)) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // typed arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt: $type:ty] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: $type ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($name.to_string())) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // opt typed arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt: opt $type:ty] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: std::option::Option<$type> ) + [ $( { $prefix }, )* { $name.map(|arg| std::borrow::Cow::from(arg.to_string())) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // join pattern with sub-pattern + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( $( $pattern:tt )/ * ), ( $( $sub_pattern:tt )/ * ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty ),* ) + [ $( { $prefix }, )* ] + $( $return_type )?, + $handle, ( $( $pattern / )* $( $sub_pattern )/ * ) + ); + }; +} + +/// TT muncher macro that generates a `struct $name` with methods for all its +/// handlers. +macro_rules! router_type { + // terminal rule + ($name:ident { $( $methods:item )* }, ) => { + paste::paste! { + #[doc = "`" $name "`path router type"] + pub struct $name { + prefix: String, + } + + impl $name { + #[doc = "Construct this router as a root router"] + pub const fn new() -> Self { + Self { + prefix: String::new(), + } + } + + #[allow(dead_code)] + #[doc = "Construct this router as a sub-router at the given prefix path"] + pub const fn sub(prefix: String) -> Self { + Self { + prefix, + } + } + + // paste the generated methods + $( $methods )* + } + } + }; + + // a sub router - recursion + ( + $name:ident { $( $methods:item )* }, + $pattern:tt = (sub $router:ident) + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + paste::paste! { + router_type!{ + $name { + #[doc = "`" $name "` sub-router"] + pub fn [<$router:camel:snake>](&self) -> [<$router:camel>] { + // prefix for a sub can only contain literals + let current_prefix: &[&'static str] = pattern_to_prefix!($pattern); + let path = [&[self.prefix.as_str()][..], current_prefix].concat().join("/"); + [<$router:camel>]::sub(path) + } + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + } + }; + + // a sub-pattern - add a method for each handle inside it + ( + $name:ident + { $( $methods:item )* }, + $pattern:tt = { $( $sub_pattern:tt $( -> $sub_return_ty:path )? = $handle:tt, )* } + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + router_type!{ + $name { + $( + // join pattern with each sub-pattern + pattern_and_handler_to_method!( () [] $( $sub_return_ty )?, $handle, + $pattern, $sub_pattern + ); + )* + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + }; + + // pattern with a handle - add a method for the handle + ( + $name:ident + { $( $methods:item )* }, + $pattern:tt -> $return_type:path = $handle:tt + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + router_type!{ + $name { + pattern_and_handler_to_method!( () [] $return_type, $handle, $pattern ); + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + }; +} + +/// Compile time tree patterns router with type-safe dynamic parameter parsing, +/// automatic routing, type-safe path constructors and optional client query +/// methods (enabled with `feature = "async-client"`). +/// +/// The `router!` macro implements greedy matching algorithm. +/// +/// ## Examples +/// +/// ```rust,ignore +/// router! {ROOT, +/// // This pattern matches `/pattern_a/something`, where `something` can be +/// // parsed with `FromStr` into `ArgType`. +/// ( "pattern_a" / [typed_dynamic_arg: ArgType] ) -> ReturnType = handler, +/// +/// ( "pattern_b" / [optional_dynamic_arg: opt ArgType] ) -> ReturnType = +/// handler, +/// +/// // Untyped dynamic arg is a string slice `&str` +/// ( "pattern_c" / [untyped_dynamic_arg] ) -> ReturnType = handler, +/// +/// // The handler additionally receives the `RequestQuery`, which can have +/// // some data attached, specified block height and ask for a proof. It +/// // returns `EncodedResponseQuery` (the `data` must be encoded, if +/// // necessary), which can have some `info` string and a proof. +/// ( "pattern_d" ) -> ReturnType = (with_options handler), +/// +/// ( "another" / "pattern" / "that" / "goes" / "deep" ) -> ReturnType = handler, +/// +/// // Inlined sub-tree +/// ( "subtree" / [this_is_fine: ArgType] ) = { +/// ( "a" ) -> u64 = a_handler, +/// ( "b" / [another_arg] ) -> u64 = b_handler, +/// } +/// +/// // Imported sub-router - The prefix can only have literal segments +/// ( "sub" / "no_dynamic_args" ) = (sub SUB_ROUTER), +/// } +/// +/// router! {SUB_ROUTER, +/// ( "pattern" ) -> ReturnType = handler, +/// } +/// ``` +/// +/// Handler functions used in the patterns should have the expected signature: +/// ```rust,ignore +/// fn handler(ctx: RequestCtx<'_, D, H>, args ...) +/// -> storage_api::Result +/// where +/// D: 'static + DB + for<'iter> DBIter<'iter> + Sync, +/// H: 'static + StorageHasher + Sync; +/// ``` +/// +/// If the handler wants to support request options, it can be defined as +/// `(with_options $handler)` and then the expected signature is: +/// ```rust,ignore +/// fn handler(ctx: RequestCtx<'_, D, H>, request: &RequestQuery, args +/// ...) -> storage_api::Result> +/// where +/// D: 'static + DB + for<'iter> DBIter<'iter> + Sync, +/// H: 'static + StorageHasher + Sync; +/// ``` +#[macro_export] +macro_rules! router { + { $name:ident, $( $pattern:tt $( -> $return_type:path )? = $handle:tt , )* } => ( + + // `paste!` is used to convert the $name cases for a derived type and function name + paste::paste! { + + router_type!{[<$name:camel>] {}, $( $pattern $( -> $return_type )? = $handle ),* } + + impl $crate::ledger::queries::Router for [<$name:camel>] { + // TODO: for some patterns, there's unused assignment of `$end` + #[allow(unused_assignments)] + fn internal_handle( + &self, + ctx: $crate::ledger::queries::RequestCtx<'_, D, H>, + request: &$crate::ledger::queries::RequestQuery, + start: usize + ) -> $crate::ledger::storage_api::Result<$crate::ledger::queries::EncodedResponseQuery> + where + D: 'static + $crate::ledger::storage::DB + for<'iter> $crate::ledger::storage::DBIter<'iter> + Sync, + H: 'static + $crate::ledger::storage::StorageHasher + Sync, + { + + // Import for `.into_storage_result()` + use $crate::ledger::storage_api::ResultExt; + + // Import helper from this crate used inside the macros + use $crate::ledger::queries::router::find_next_slash_index; + + $( + // This loop never repeats, it's only used for a breaking + // mechanism when a $pattern is not matched to skip to the + // next one, if any + loop { + let mut start = start; + // Try to match, parse args and invoke $handle, will + // break the `loop` not matched + try_match!(ctx, request, start, $handle, $pattern); + } + )* + + return Err( + $crate::ledger::queries::router::Error::WrongPath(request.path.clone())) + .into_storage_result(); + } + } + + #[doc = "`" $name "` path router"] + pub const $name: [<$name:camel>] = [<$name:camel>]::new(); + } + + ); +} + +/// You can expand the `handlers!` macro invocation with e.g.: +/// ```shell +/// cargo expand ledger::queries::router::test_rpc_handlers --features "ferveo-tpke, ibc-mocks, testing, wasm-runtime, tendermint-rpc" --tests --lib +/// ``` +#[cfg(test)] +mod test_rpc_handlers { + use borsh::BorshSerialize; + + use crate::ledger::queries::{ + EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, + }; + use crate::ledger::storage::{DBIter, StorageHasher, DB}; + use crate::ledger::storage_api::{self, ResultExt}; + use crate::types::storage::Epoch; + use crate::types::token; + + /// A little macro to generate boilerplate for RPC handler functions. + /// These are implemented to return their name as a String, joined by + /// slashes with their argument values turned `to_string()`, if any. + macro_rules! handlers { + ( + // name and params, if any + $( $name:ident $( ( $( $param:ident: $param_ty:ty ),* ) )? ),* + // optional trailing comma + $(,)? ) => { + $( + pub fn $name( + _ctx: RequestCtx<'_, D, H>, + $( $( $param: $param_ty ),* )? + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = stringify!($name).to_owned(); + $( $( + let data = format!("{data}/{}", $param); + )* )? + Ok(data) + } + )* + }; + } + + // Generate handler functions for the router below + handlers!( + a, + b0i, + b0ii, + b1, + b2i(balance: token::Amount), + b3(a1: token::Amount, a2: token::Amount, a3: token::Amount), + b3i(a1: token::Amount, a2: token::Amount, a3: token::Amount), + b3ii(a1: token::Amount, a2: token::Amount, a3: token::Amount), + x, + y(untyped_arg: &str), + z(untyped_arg: &str), + ); + + /// This handler is hand-written, because the test helper macro doesn't + /// support optional args. + pub fn b3iii( + _ctx: RequestCtx<'_, D, H>, + a1: token::Amount, + a2: token::Amount, + a3: Option, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "b3iii".to_owned(); + let data = format!("{data}/{}", a1); + let data = format!("{data}/{}", a2); + let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); + Ok(data) + } + + /// This handler is hand-written, because the test helper macro doesn't + /// support optional args. + pub fn b3iiii( + _ctx: RequestCtx<'_, D, H>, + a1: token::Amount, + a2: token::Amount, + a3: Option, + a4: Option, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "b3iiii".to_owned(); + let data = format!("{data}/{}", a1); + let data = format!("{data}/{}", a2); + let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); + let data = a4.map(|a4| format!("{data}/{}", a4)).unwrap_or(data); + Ok(data) + } + + /// This handler is hand-written, because the test helper macro doesn't + /// support handlers with `with_options`. + pub fn c( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "c".to_owned().try_to_vec().into_storage_result()?; + Ok(ResponseQuery { + data, + ..ResponseQuery::default() + }) + } +} + +/// You can expand the `router!` macro invocation with e.g.: +/// ```shell +/// cargo expand ledger::queries::router::test_rpc --features "ferveo-tpke, ibc-mocks, testing, wasm-runtime, tendermint-rpc" --tests --lib +/// ``` +#[cfg(test)] +mod test_rpc { + use super::test_rpc_handlers::*; + use crate::types::storage::Epoch; + use crate::types::token; + + // Setup an RPC router for testing + router! {TEST_RPC, + ( "sub" ) = (sub TEST_SUB_RPC), + ( "a" ) -> String = a, + ( "b" ) = { + ( "0" ) = { + ( "i" ) -> String = b0i, + ( "ii" ) -> String = b0ii, + }, + ( "1" ) -> String = b1, + ( "2" ) = { + ( "i" / [balance: token::Amount] ) -> String = b2i, + }, + ( "3" / [a1: token::Amount] / [a2: token::Amount] ) = { + ( "i" / [a3: token:: Amount] ) -> String = b3i, + ( [a3: token:: Amount] ) -> String = b3, + ( [a3: token:: Amount] / "ii" ) -> String = b3ii, + ( [a3: opt token::Amount] / "iii" ) -> String = b3iii, + ( "iiii" / [a3: opt token::Amount] / "xyz" / [a4: opt Epoch] ) -> String = b3iiii, + }, + }, + ( "c" ) -> String = (with_options c), + } + + router! {TEST_SUB_RPC, + ( "x" ) -> String = x, + ( "y" / [untyped_arg] ) -> String = y, + ( "z" / [untyped_arg] ) -> String = z, + } +} + +#[cfg(test)] +mod test { + use super::test_rpc::TEST_RPC; + use crate::ledger::queries::testing::TestClient; + use crate::ledger::queries::{RequestCtx, RequestQuery, Router}; + use crate::ledger::storage_api; + use crate::types::storage::Epoch; + use crate::types::token; + + /// Test all the possible paths in `TEST_RPC` router. + #[tokio::test] + async fn test_router_macro() -> storage_api::Result<()> { + let client = TestClient::new(TEST_RPC); + + // Test request with an invalid path + let request = RequestQuery { + path: "/invalid".to_owned(), + ..RequestQuery::default() + }; + let ctx = RequestCtx { + storage: &client.storage, + vp_wasm_cache: client.vp_wasm_cache.clone(), + tx_wasm_cache: client.tx_wasm_cache.clone(), + }; + let result = TEST_RPC.handle(ctx, &request); + assert!(result.is_err()); + + // Test requests to valid paths using the router's methods + + let result = TEST_RPC.a(&client).await.unwrap(); + assert_eq!(result, "a"); + + let result = TEST_RPC.b0i(&client).await.unwrap(); + assert_eq!(result, "b0i"); + + let result = TEST_RPC.b0ii(&client).await.unwrap(); + assert_eq!(result, "b0ii"); + + let result = TEST_RPC.b1(&client).await.unwrap(); + assert_eq!(result, "b1"); + + let balance = token::Amount::from(123_000_000); + let result = TEST_RPC.b2i(&client, &balance).await.unwrap(); + assert_eq!(result, format!("b2i/{balance}")); + + let a1 = token::Amount::from(345); + let a2 = token::Amount::from(123_000); + let a3 = token::Amount::from(1_000_999); + let result = TEST_RPC.b3(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3i(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3i/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3ii(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3ii/{a1}/{a2}/{a3}")); + + let result = + TEST_RPC.b3iii(&client, &a1, &a2, &Some(a3)).await.unwrap(); + assert_eq!(result, format!("b3iii/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3iii(&client, &a1, &a2, &None).await.unwrap(); + assert_eq!(result, format!("b3iii/{a1}/{a2}")); + + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &Some(a3), &None) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}/{a3}")); + + let a4 = Epoch::from(10); + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &Some(a3), &Some(a4)) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}/{a3}/{a4}")); + + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &None, &None) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}")); + + let result = TEST_RPC.c(&client, None, None, false).await.unwrap(); + assert_eq!(result.data, format!("c")); + + let result = TEST_RPC.test_sub_rpc().x(&client).await.unwrap(); + assert_eq!(result, format!("x")); + + let arg = "test123"; + let result = TEST_RPC.test_sub_rpc().y(&client, arg).await.unwrap(); + assert_eq!(result, format!("y/{arg}")); + + let arg = "test321"; + let result = TEST_RPC.test_sub_rpc().z(&client, arg).await.unwrap(); + assert_eq!(result, format!("z/{arg}")); + + Ok(()) + } +} diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs new file mode 100644 index 0000000000..7491af4945 --- /dev/null +++ b/shared/src/ledger/queries/shell.rs @@ -0,0 +1,329 @@ +use borsh::BorshSerialize; +use tendermint_proto::crypto::{ProofOp, ProofOps}; + +use crate::ledger::queries::types::{RequestCtx, RequestQuery}; +use crate::ledger::queries::{require_latest_height, EncodedResponseQuery}; +use crate::ledger::storage::{DBIter, StorageHasher, DB}; +use crate::ledger::storage_api::{self, ResultExt, StorageRead}; +use crate::types::storage::{self, Epoch, PrefixValue}; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +use crate::types::transaction::TxResult; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +use crate::types::transaction::{DecryptedTx, TxType}; + +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +router! {SHELL, + // Epoch of the last committed block + ( "epoch" ) -> Epoch = epoch, + + // Raw storage access - read value + ( "value" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_value), + + // Dry run a transaction + ( "dry_run_tx" ) -> TxResult = (with_options dry_run_tx), + + // Raw storage access - prefix iterator + ( "prefix" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_prefix), + + // Raw storage access - is given storage key present? + ( "has_key" / [storage_key: storage::Key] ) + -> bool = storage_has_key, +} + +#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] +router! {SHELL, + // Epoch of the last committed block + ( "epoch" ) -> Epoch = epoch, + + // Raw storage access - read value + ( "value" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_value), + + // Raw storage access - prefix iterator + ( "prefix" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_prefix), + + // Raw storage access - is given storage key present? + ( "has_key" / [storage_key: storage::Key] ) + -> bool = storage_has_key, +} + +// Handlers: + +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +fn dry_run_tx( + mut ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + use crate::ledger::gas::BlockGasMeter; + use crate::ledger::protocol; + use crate::ledger::storage::write_log::WriteLog; + use crate::proto::Tx; + + let mut gas_meter = BlockGasMeter::default(); + let mut write_log = WriteLog::default(); + let tx = Tx::try_from(&request.data[..]).into_storage_result()?; + let tx = TxType::Decrypted(DecryptedTx::Decrypted(tx)); + let data = protocol::apply_tx( + tx, + request.data.len(), + &mut gas_meter, + &mut write_log, + ctx.storage, + &mut ctx.vp_wasm_cache, + &mut ctx.tx_wasm_cache, + ) + .into_storage_result()?; + let data = data.try_to_vec().into_storage_result()?; + Ok(EncodedResponseQuery { + data, + proof_ops: None, + info: Default::default(), + }) +} + +fn epoch(ctx: RequestCtx<'_, D, H>) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let data = ctx.storage.last_epoch; + Ok(data) +} + +/// Returns data with `vec![]` when the storage key is not found. For all +/// borsh-encoded types, it is safe to check `data.is_empty()` to see if the +/// value was found, except for unit - see `fn query_storage_value` in +/// `apps/src/lib/client/rpc.rs` for unit type handling via `storage_has_key`. +fn storage_value( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + match ctx + .storage + .read_with_height(&storage_key, request.height) + .into_storage_result()? + { + (Some(value), _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_existence_proof( + &storage_key, + value.clone().into(), + request.height, + ) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(EncodedResponseQuery { + data: value, + proof_ops: proof, + info: Default::default(), + }) + } + (None, _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_non_existence_proof(&storage_key, request.height) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(EncodedResponseQuery { + data: vec![], + proof_ops: proof, + info: format!("No value found for key: {}", storage_key), + }) + } + } +} + +fn storage_prefix( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + + let (iter, _gas) = ctx.storage.iter_prefix(&storage_key); + let data: storage_api::Result> = iter + .map(|(key, value, _gas)| { + let key = storage::Key::parse(key).into_storage_result()?; + Ok(PrefixValue { key, value }) + }) + .collect(); + let data = data?; + let proof_ops = if request.prove { + let mut ops = vec![]; + for PrefixValue { key, value } in &data { + let proof = ctx + .storage + .get_existence_proof(key, value.clone().into(), request.height) + .into_storage_result()?; + let mut cur_ops: Vec = + proof.ops.into_iter().map(|op| op.into()).collect(); + ops.append(&mut cur_ops); + } + // ops is not empty in this case + Some(ProofOps { ops }) + } else { + None + }; + let data = data.try_to_vec().into_storage_result()?; + Ok(EncodedResponseQuery { + data, + proof_ops, + ..Default::default() + }) +} + +fn storage_has_key( + ctx: RequestCtx<'_, D, H>, + storage_key: storage::Key, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let data = StorageRead::has_key(ctx.storage, &storage_key)?; + Ok(data) +} + +#[cfg(test)] +mod test { + use borsh::BorshDeserialize; + + use crate::ledger::queries::testing::TestClient; + use crate::ledger::queries::RPC; + use crate::ledger::storage_api::{self, StorageWrite}; + use crate::proto::Tx; + use crate::types::{address, token}; + + const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; + + #[test] + fn test_shell_queries_router_paths() { + let path = RPC.shell().epoch_path(); + assert_eq!("/shell/epoch", path); + + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let key = token::balance_key(&token_addr, &owner); + let path = RPC.shell().storage_value_path(&key); + assert_eq!(format!("/shell/value/{}", key), path); + + let path = RPC.shell().dry_run_tx_path(); + assert_eq!("/shell/dry_run_tx", path); + + let path = RPC.shell().storage_prefix_path(&key); + assert_eq!(format!("/shell/prefix/{}", key), path); + + let path = RPC.shell().storage_has_key_path(&key); + assert_eq!(format!("/shell/has_key/{}", key), path); + } + + #[tokio::test] + async fn test_shell_queries_router_with_client() -> storage_api::Result<()> + { + // Initialize the `TestClient` + let mut client = TestClient::new(RPC); + + // Request last committed epoch + let read_epoch = RPC.shell().epoch(&client).await.unwrap(); + let current_epoch = client.storage.last_epoch; + assert_eq!(current_epoch, read_epoch); + + // Request dry run tx + let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); + let tx = Tx::new(tx_no_op, None); + let tx_bytes = tx.to_bytes(); + let result = RPC + .shell() + .dry_run_tx(&client, Some(tx_bytes), None, false) + .await + .unwrap(); + assert!(result.data.is_accepted()); + + // Request storage value for a balance key ... + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let balance_key = token::balance_key(&token_addr, &owner); + // ... there should be no value yet. + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert!(read_balance.data.is_empty()); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert!(read_balances.data.is_empty()); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(!has_balance_key); + + // Then write some balance ... + let balance = token::Amount::from(1000); + StorageWrite::write(&mut client.storage, &balance_key, balance)?; + // ... there should be the same value now + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert_eq!( + balance, + token::Amount::try_from_slice(&read_balance.data).unwrap() + ); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert_eq!(read_balances.data.len(), 1); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(has_balance_key); + + Ok(()) + } +} diff --git a/shared/src/ledger/queries/types.rs b/shared/src/ledger/queries/types.rs new file mode 100644 index 0000000000..c7b349ddc0 --- /dev/null +++ b/shared/src/ledger/queries/types.rs @@ -0,0 +1,171 @@ +use tendermint_proto::crypto::ProofOps; + +use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use crate::ledger::storage_api; +use crate::types::storage::BlockHeight; +#[cfg(feature = "wasm-runtime")] +use crate::vm::wasm::{TxCache, VpCache}; +#[cfg(feature = "wasm-runtime")] +use crate::vm::WasmCacheRoAccess; + +/// A request context provides read-only access to storage and WASM compilation +/// caches to request handlers. +#[derive(Debug, Clone)] +pub struct RequestCtx<'a, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + /// Storage access + pub storage: &'a Storage, + /// VP WASM compilation cache + #[cfg(feature = "wasm-runtime")] + pub vp_wasm_cache: VpCache, + /// tx WASM compilation cache + #[cfg(feature = "wasm-runtime")] + pub tx_wasm_cache: TxCache, +} + +/// A `Router` handles parsing read-only query requests and dispatching them to +/// their handler functions. A valid query returns a borsh-encoded result. +pub trait Router { + /// Handle a given request using the provided context. This must be invoked + /// on the root `Router` to be able to match the `request.path` fully. + fn handle( + &self, + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + self.internal_handle(ctx, request, 0) + } + + /// Internal method which shouldn't be invoked directly. Instead, you may + /// want to call `self.handle()`. + /// + /// Handle a given request using the provided context, starting to + /// try to match `request.path` against the `Router`'s patterns at the + /// given `start` offset. + fn internal_handle( + &self, + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + start: usize, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync; +} + +/// A client with async request dispatcher method, which can be used to invoke +/// type-safe methods from a root [`Router`], generated via `router!` macro. +#[cfg(any(test, feature = "async-client"))] +#[async_trait::async_trait] +pub trait Client { + /// `std::io::Error` can happen in decoding with + /// `BorshDeserialize::try_from_slice` + type Error: From; + + /// Send a simple query request at the given path. For more options, use the + /// `request` method. + async fn simple_request( + &self, + path: String, + ) -> Result, Self::Error> { + self.request(path, None, None, false) + .await + .map(|response| response.data) + } + + /// Send a query request at the given path. + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result; +} + +/// Temporary domain-type for `tendermint_proto::abci::RequestQuery`, copied +/// from +/// until we are on a branch that has it included. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct RequestQuery { + /// Raw query bytes. + /// + /// Can be used with or in lieu of `path`. + pub data: Vec, + /// Path of the request, like an HTTP `GET` path. + /// + /// Can be used with or in lieu of `data`. + /// + /// Applications MUST interpret `/store` as a query by key on the + /// underlying store. The key SHOULD be specified in the Data field. + /// Applications SHOULD allow queries over specific types like + /// `/accounts/...` or `/votes/...`. + pub path: String, + /// The block height for which the query should be executed. + /// + /// The default `0` returns data for the latest committed block. Note that + /// this is the height of the block containing the application's Merkle + /// root hash, which represents the state as it was after committing + /// the block at `height - 1`. + pub height: BlockHeight, + /// Whether to return a Merkle proof with the response, if possible. + pub prove: bool, +} + +/// Generic response from a query +#[derive(Clone, Debug, Default)] +pub struct ResponseQuery { + /// Response data to be borsh encoded + pub data: T, + /// Non-deterministic log of the request execution + pub info: String, + /// Optional proof - used for storage value reads which request `prove` + pub proof_ops: Option, +} + +/// [`ResponseQuery`] with borsh-encoded `data` field +pub type EncodedResponseQuery = ResponseQuery>; + +impl RequestQuery { + /// Try to convert tendermint RequestQuery into our [`RequestQuery`] + /// domain type. This tries to convert the block height into our + /// [`BlockHeight`] type, where `0` is treated as a special value to signal + /// to use the latest committed block height as per tendermint ABCI Query + /// spec. A negative block height will cause an error. + pub fn try_from_tm( + storage: &Storage, + tendermint_proto::abci::RequestQuery { + data, + path, + height, + prove, + }: tendermint_proto::abci::RequestQuery, + ) -> Result + where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, + { + let height = match height { + 0 => { + // `0` means last committed height + storage.last_height + } + _ => BlockHeight(height.try_into().map_err(|_| { + format!("Query height cannot be negative, got: {}", height) + })?), + }; + Ok(Self { + data, + path, + height, + prove, + }) + } +} diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index f9030a471a..b83a53b664 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -1357,6 +1357,7 @@ version = "0.8.1" dependencies = [ "ark-bls12-381", "ark-serialize", + "async-trait", "bech32", "borsh", "chrono", @@ -1373,12 +1374,14 @@ dependencies = [ "loupe", "namada_proof_of_stake", "parity-wasm", + "paste", "proptest", "prost", "prost-types", "pwasm-utils", "rand", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde", "serde_json", diff --git a/wasm/checksums.json b/wasm/checksums.json index 01140354ba..98d4134fd3 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,18 +1,18 @@ { - "tx_bond.wasm": "tx_bond.38c037a51f9215c2be9c1b01f647251ffdc96a02a0c958c5d3db4ee36ccde43b.wasm", - "tx_ibc.wasm": "tx_ibc.5f86477029d987073ebfec66019dc991b0bb8b80717d4885b860f910916cbcdd.wasm", - "tx_init_account.wasm": "tx_init_account.8d901bce15d1ab63a591def00421183a651d4d5e09ace4291bf0a9044692741d.wasm", - "tx_init_nft.wasm": "tx_init_nft.1991808f44c1c24d4376a3d46b602bed27575f6c0359095c53f37b9225050ffc.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.716cd08d59b26bd75815511f03e141e6ac27bc0b7d7be10a71b04559244722c2.wasm", - "tx_init_validator.wasm": "tx_init_validator.611edff2746f71cdaa7547a84a96676b555821f00af8375a28f8dab7ae9fc9fa.wasm", - "tx_mint_nft.wasm": "tx_mint_nft.3f20f1a86da43cc475ccc127428944bd177d40fbe2d2d1588c6fadd069cbe4b2.wasm", - "tx_transfer.wasm": "tx_transfer.5653340103a32e6685f9668ec24855f65ae17bcc43035c2559a13f5c47bb67af.wasm", - "tx_unbond.wasm": "tx_unbond.71e66ac6f792123a2aaafd60b3892d74a7d0e7a03c3ea34f15fea9089010b810.wasm", - "tx_update_vp.wasm": "tx_update_vp.6d291dadb43545a809ba33fe26582b7984c67c65f05e363a93dbc62e06a33484.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.ff3def7b4bb0c46635bd6d544ac1745362757ce063feb8142d2ed9ab207f2a12.wasm", - "tx_withdraw.wasm": "tx_withdraw.ba1a743cf8914a353d7706777e0b1a37e20cd271b16e022fd3b50ad28971291f.wasm", - "vp_nft.wasm": "vp_nft.4471284b5c5f3e28c973f0a2ad2dde52ebe4a1dcd5dc15e93b380706fd0e35ea.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.7d7eb09cddc7ae348417da623e21ec4a4f8c78f15ae12de5abe7087eeab1e0db.wasm", - "vp_token.wasm": "vp_token.4a5436f7519de15c80103557add57e8d06e766e1ec1f7a642ffca252be01c5d0.wasm", - "vp_user.wasm": "vp_user.729b18aab60e8ae09b75b5f067658f30459a5ccfcd34f909b88da96523681019.wasm" + "tx_bond.wasm": "tx_bond.72f7ca706910728e7cd2699d225147634981f2bd82fa5c5e1800f33dd7a9268f.wasm", + "tx_ibc.wasm": "tx_ibc.cf61f60f726b00c4e4e26a2bfbb54d5a9fb0503aeb7ae46d9cfcd269417c6de4.wasm", + "tx_init_account.wasm": "tx_init_account.be35e9136ce7c62236ef40a0ec3a4fbfdd1c1c5999b0943c0495895c574ac01b.wasm", + "tx_init_nft.wasm": "tx_init_nft.b8dd99751cf701dcc04ccdd795a37c84ad6e37c833cf2d83ca674b1a5b8b7246.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.da041bb1412b6d4bb303232aaf6bec9369138d4a94b70e5b2e2b87dadb0a47b9.wasm", + "tx_init_validator.wasm": "tx_init_validator.628232b3c034a63d11bb6b75be4f4ed831c41cacf1b710ee7cb6fd94d889d12e.wasm", + "tx_mint_nft.wasm": "tx_mint_nft.9bccf7930e21c59a03ff0aa7c85210bec8a320a87ed3d9c4bf000f98ade0cea2.wasm", + "tx_transfer.wasm": "tx_transfer.22f49259ce8c1534473959d699bbbfecb5b42499e9752785aa597c54f059e54b.wasm", + "tx_unbond.wasm": "tx_unbond.197405a2903fc1bf4a1b8f4bb2d901b9b0c455443d567907bd317d756afb16a5.wasm", + "tx_update_vp.wasm": "tx_update_vp.bb01d77ae24013ba7652c723bb4e446607b34dff10e4f01de4a6640aa80d282a.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.55f84360fc7f4cec4542e53272017ecae22e004bac0faf62550c8711895bbae5.wasm", + "tx_withdraw.wasm": "tx_withdraw.69dfa7f299a28ce25190402b231d2dd184431c5c3b9a691aae7b77a366c6d78b.wasm", + "vp_nft.wasm": "vp_nft.8234618f0a3de3d7a6dd75d1463d42a50a357b9783a83525c0093297a0b69738.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.92e4bb1ac583963ebe69a818d670c72e0db2370fe7a5ab2216060603f8e18440.wasm", + "vp_token.wasm": "vp_token.34405f1e1568f6478606de9cd8bb3ff1ffb78f1aa14cfc32861b1c2cf4b6eddd.wasm", + "vp_user.wasm": "vp_user.b70ceb1616f51aae27672c1d4c1705392716dca185e0503d61b3457c4e773f78.wasm" } \ No newline at end of file diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index b82f3b3d59..1259bd5cc4 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -1357,6 +1357,7 @@ version = "0.8.1" dependencies = [ "ark-bls12-381", "ark-serialize", + "async-trait", "bech32", "borsh", "chrono", @@ -1373,12 +1374,14 @@ dependencies = [ "loupe", "namada_proof_of_stake", "parity-wasm", + "paste", "proptest", "prost", "prost-types", "pwasm-utils", "rand", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde", "serde_json",