From 1368c191038b700ea897ea28b1793846f30b3cfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 19 Jan 2023 16:14:43 +0100 Subject: [PATCH 01/58] core: added `TempWlStorage` for ABCI++ prepare/process proposal --- core/src/ledger/storage/masp_conversions.rs | 4 +- core/src/ledger/storage/mod.rs | 2 +- core/src/ledger/storage/wl_storage.rs | 138 +++++++++++++++++--- 3 files changed, 123 insertions(+), 21 deletions(-) diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index 0834d7bb53..3945ba936a 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -29,8 +29,8 @@ pub fn update_allowed_conversions( wl_storage: &mut super::WlStorage, ) -> crate::ledger::storage_api::Result<()> where - D: super::DB + for<'iter> super::DBIter<'iter>, - H: super::StorageHasher, + D: 'static + super::DB + for<'iter> super::DBIter<'iter>, + H: 'static + super::StorageHasher, { use masp_primitives::ff::PrimeField; use masp_primitives::transaction::components::Amount as MaspAmount; diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index e2ac4da235..768e335a6d 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -20,7 +20,7 @@ pub use merkle_tree::{ use thiserror::Error; pub use traits::{Sha256Hasher, StorageHasher}; pub use wl_storage::{ - iter_prefix_post, iter_prefix_pre, PrefixIter, WlStorage, + iter_prefix_post, iter_prefix_pre, PrefixIter, TempWlStorage, WlStorage, }; #[cfg(feature = "wasm-runtime")] diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index 8c89d3e6c4..4f328cdef1 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -23,6 +23,97 @@ where pub storage: Storage, } +/// Temporary storage that can be used for changes that will never be committed +/// to the DB. This is useful for the shell `PrepareProposal` and +/// `ProcessProposal` handlers that should not change state, but need to apply +/// storage changes for replay protection to validate the proposal. +#[derive(Debug)] +pub struct TempWlStorage<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub write_log: WriteLog, + /// Storage provides access to DB + pub storage: &'a Storage, +} + +impl<'a, D, H> TempWlStorage<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Create a temp storage that can mutated in memory, but never committed to + /// DB. + pub fn new(storage: &'a Storage) -> Self { + Self { + write_log: WriteLog::default(), + storage, + } + } +} + +/// Common trait for [`WlStorage`] and [`TempWlStorage`], used to implement +/// storage_api traits. +trait WriteLogAndStorage { + // DB type + type D: DB + for<'iter> DBIter<'iter>; + // DB hasher type + type H: StorageHasher; + + /// Borrow `WriteLog` + fn write_log(&self) -> &WriteLog; + + /// Borrow mutable `WriteLog` + fn write_log_mut(&mut self) -> &mut WriteLog; + + /// Borrow `Storage` + fn storage(&self) -> &Storage; +} + +impl WriteLogAndStorage for WlStorage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn storage(&self) -> &Storage { + &self.storage + } +} + +impl WriteLogAndStorage for TempWlStorage<'_, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn storage(&self) -> &Storage { + self.storage + } +} + impl WlStorage where D: 'static + DB + for<'iter> DBIter<'iter>, @@ -204,10 +295,11 @@ where } } -impl StorageRead for WlStorage +impl StorageRead for T where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + T: WriteLogAndStorage, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, { type PrefixIter<'iter> = PrefixIter<'iter, D> where Self: 'iter; @@ -216,7 +308,7 @@ where key: &storage::Key, ) -> storage_api::Result>> { // try to read from the write log first - let (log_val, _gas) = self.write_log.read(key); + let (log_val, _gas) = self.write_log().read(key); match log_val { Some(&write_log::StorageModification::Write { ref value }) => { Ok(Some(value.clone())) @@ -231,14 +323,17 @@ where } None => { // when not found in write log, try to read from the storage - self.storage.db.read_subspace_val(key).into_storage_result() + self.storage() + .db + .read_subspace_val(key) + .into_storage_result() } } } fn has_key(&self, key: &storage::Key) -> storage_api::Result { // try to read from the write log first - let (log_val, _gas) = self.write_log.read(key); + let (log_val, _gas) = self.write_log().read(key); match log_val { Some(&write_log::StorageModification::Write { .. }) | Some(&write_log::StorageModification::InitAccount { .. }) @@ -249,7 +344,7 @@ where } None => { // when not found in write log, try to check the storage - self.storage.block.tree.has_key(key).into_storage_result() + self.storage().block.tree.has_key(key).into_storage_result() } } } @@ -259,7 +354,7 @@ where prefix: &storage::Key, ) -> storage_api::Result> { let (iter, _gas) = - iter_prefix_post(&self.write_log, &self.storage, prefix); + iter_prefix_post(self.write_log(), self.storage(), prefix); Ok(iter) } @@ -271,40 +366,41 @@ where } fn get_chain_id(&self) -> std::result::Result { - Ok(self.storage.chain_id.to_string()) + Ok(self.storage().chain_id.to_string()) } fn get_block_height( &self, ) -> std::result::Result { - Ok(self.storage.block.height) + Ok(self.storage().block.height) } fn get_block_hash( &self, ) -> std::result::Result { - Ok(self.storage.block.hash.clone()) + Ok(self.storage().block.hash.clone()) } fn get_block_epoch( &self, ) -> std::result::Result { - Ok(self.storage.block.epoch) + Ok(self.storage().block.epoch) } fn get_tx_index( &self, ) -> std::result::Result { - Ok(self.storage.tx_index) + Ok(self.storage().tx_index) } fn get_native_token(&self) -> storage_api::Result
{ - Ok(self.storage.native_token.clone()) + Ok(self.storage().native_token.clone()) } } -impl StorageWrite for WlStorage +impl StorageWrite for T where + T: WriteLogAndStorage, D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, { @@ -313,12 +409,18 @@ where key: &storage::Key, val: impl AsRef<[u8]>, ) -> storage_api::Result<()> { - self.write_log + let _ = self + .write_log_mut() .protocol_write(key, val.as_ref().to_vec()) - .into_storage_result() + .into_storage_result(); + Ok(()) } fn delete(&mut self, key: &storage::Key) -> storage_api::Result<()> { - self.write_log.protocol_delete(key).into_storage_result() + let _ = self + .write_log_mut() + .protocol_delete(key) + .into_storage_result(); + Ok(()) } } From 2bb8eadd994e376d99955da60904a255155c9720 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 10 Feb 2023 13:05:31 +0100 Subject: [PATCH 02/58] changelog: add #1051 --- .changelog/unreleased/improvements/1051-temp-wl-storage.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/unreleased/improvements/1051-temp-wl-storage.md diff --git a/.changelog/unreleased/improvements/1051-temp-wl-storage.md b/.changelog/unreleased/improvements/1051-temp-wl-storage.md new file mode 100644 index 0000000000..5be4294bd6 --- /dev/null +++ b/.changelog/unreleased/improvements/1051-temp-wl-storage.md @@ -0,0 +1,3 @@ +- Added a TempWlStorage for storage_api::StorageRead/Write + in ABCI++ prepare/process proposal handler. + ([#1051](https://github.com/anoma/namada/pull/1051)) \ No newline at end of file From 0b1aa744b4d9359dbb1360af3c3a9c26fd7d3413 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 29 Dec 2022 15:41:21 +0100 Subject: [PATCH 03/58] Updates replay protection specs --- .../src/base-ledger/replay-protection.md | 1116 +++++++++++++---- 1 file changed, 846 insertions(+), 270 deletions(-) diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index 1094460cad..71a5581e38 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -1,232 +1,523 @@ # Replay Protection -Replay protection is a mechanism to prevent _replay attacks_, which consist of a malicious user resubmitting an already executed transaction (also mentioned as tx in this document) to the ledger. +Replay protection is a mechanism to prevent _replay attacks_, which consist of a +malicious user resubmitting an already executed transaction (also mentioned as +tx in this document) to the ledger. -A replay attack causes the state of the machine to deviate from the intended one (from the perspective of the parties involved in the original transaction) and causes economic damage to the fee payer of the original transaction, who finds himself paying more than once. Further economic damage is caused if the transaction involved the moving of value in some form (e.g. a transfer of tokens) with the sender being deprived of more value than intended. +A replay attack causes the state of the machine to deviate from the intended one +(from the perspective of the parties involved in the original transaction) and +causes economic damage to the fee payer of the original transaction, who finds +himself paying more than once. Further economic damage is caused if the +transaction involved the moving of value in some form (e.g. a transfer of +tokens) with the sender being deprived of more value than intended. -Since the original transaction was already well formatted for the protocol's rules, the attacker doesn't need to rework it, making this attack relatively easy. +Since the original transaction was already well formatted for the protocol's +rules, the attacker doesn't need to rework it, making this attack relatively +easy. -Of course, a replay attack makes sense only if the attacker differs from the _source_ of the original transaction, as a user will always be able to generate another semantically identical transaction to submit without the need to replay the same one. +Of course, a replay attack makes sense only if the attacker differs from the +_source_ of the original transaction, as a user will always be able to generate +another semantically identical transaction to submit without the need to replay +the same one. + +To prevent this scenario, Namada supports a replay protection mechanism to +prevent the execution of already processed transactions. -To prevent this scenario, Namada supports a replay protection mechanism to prevent the execution of already processed transactions. - ## Context -This section will illustrate the pre-existing context in which we are going to implement the replay protection mechanism. +This section will illustrate the pre-existing context in which we are going to +implement the replay protection mechanism. ### Encryption-Authentication -The current implementation of Namada is built on top of Tendermint which provides an encrypted and authenticated communication channel between every two nodes to prevent a _man-in-the-middle_ attack (see the detailed [spec](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/p2p/peer.md)). +The current implementation of Namada is built on top of Tendermint which +provides an encrypted and authenticated communication channel between every two +nodes to prevent a _man-in-the-middle_ attack (see the detailed +[spec](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/p2p/peer.md)). -The Namada protocol relies on this substrate to exchange transactions (messages) that will define the state transition of the ledger. More specifically, a transaction is composed of two parts: a `WrapperTx` and an inner `Tx` +The Namada protocol relies on this substrate to exchange transactions (messages) +that will define the state transition of the ledger. More specifically, a +transaction is composed of two parts: a `WrapperTx` and an inner `Tx` ```rust pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// The epoch in which the tx is to be submitted. This determines - /// which decryption key will be used - pub epoch: Epoch, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// the encrypted payload - pub inner_tx: EncryptedTx, - /// sha-2 hash of the inner transaction acting as a commitment - /// the contents of the encrypted payload - pub tx_hash: Hash, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// The epoch in which the tx is to be submitted. This determines + /// which decryption key will be used + pub epoch: Epoch, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// The optional unshielding tx for fee payment + pub unshield: Option, + /// the encrypted payload + pub inner_tx: EncryptedTx, + /// sha-2 hash of the inner transaction acting as a commitment + /// the contents of the encrypted payload + pub tx_hash: Hash, } pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, } -``` +``` -The wrapper transaction is composed of some metadata, the encrypted inner transaction itself and the hash of this. The inner `Tx` transaction carries the Wasm code to be executed and the associated data. +The wrapper transaction is composed of some metadata, an optional unshielding tx +for fee payment (see [fee specs](../economics/fee-system.md)), the encrypted +inner transaction itself and the hash of this. The inner `Tx` transaction +carries the Wasm code to be executed and the associated data. A transaction is constructed as follows: 1. The struct `Tx` is produced -2. The hash of this transaction gets signed by the author, producing another `Tx` where the data field holds the concatenation of the original data and the signature (`SignedTxData`) -3. The produced transaction is encrypted and embedded in a `WrapperTx`. The encryption step is there for a future implementation of DKG (see [Ferveo](https://github.com/anoma/ferveo)) -4. Finally, the `WrapperTx` gets converted to a `Tx` struct, signed over its hash (same as step 2, relying on `SignedTxData`), and submitted to the network - -Note that the signer of the `WrapperTx` and that of the inner one don't need to coincide, but the signer of the wrapper will be charged with gas and fees. -In the execution steps: +2. The hash of this transaction gets signed by the author, producing another + `Tx` where the data field holds the concatenation of the original data and + the signature (`SignedTxData`) +3. The produced transaction is encrypted and embedded in a `WrapperTx`. The + encryption step is there for a future implementation of DKG (see + [Ferveo](https://github.com/anoma/ferveo)) +4. Finally, the `WrapperTx` gets converted to a `Tx` struct, signed over its + hash (same as step 2, relying on `SignedTxData`), and submitted to the + network + +Note that the signer of the `WrapperTx` and that of the inner one don't need to +coincide, but the signer of the wrapper will be charged with gas and fees. In +the execution steps: 1. The `WrapperTx` signature is verified and, only if valid, the tx is processed -2. In the following height the proposer decrypts the inner tx, checks that the hash matches that of the `tx_hash` field and, if everything went well, includes the decrypted tx in the proposed block +2. In the following height the proposer decrypts the inner tx, checks that the + hash matches that of the `tx_hash` field and, if everything went well, + includes the decrypted tx in the proposed block 3. The inner tx will then be executed by the Wasm runtime -4. After the execution, the affected validity predicates (also mentioned as VP in this document) will check the storage changes and (if relevant) the signature of the transaction: if the signature is not valid, the VP will deem the transaction invalid and the changes won't be applied to the storage +4. After the execution, the affected validity predicates (also mentioned as VP + in this document) will check the storage changes and (if relevant) the + signature of the transaction: if the signature is not valid, the VP will deem + the transaction invalid and the changes won't be applied to the storage -The signature checks effectively prevent any tampering with the transaction data because that would cause the checks to fail and the transaction to be rejected. -For a more in-depth view, please refer to the [Namada execution spec](./execution.md). +The signature checks effectively prevent any tampering with the transaction data +because that would cause the checks to fail and the transaction to be rejected. +For a more in-depth view, please refer to the +[Namada execution spec](./execution.md). ### Tendermint replay protection -The underlying consensus engine, [Tendermint](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/abci/apps.md), provides a first layer of protection in its mempool which is based on a cache of previously seen transactions. This mechanism is actually aimed at preventing a block proposer from including an already processed transaction in the next block, which can happen when the transaction has been received late. Of course, this also acts as a countermeasure against intentional replay attacks. This check though, like all the checks performed in `CheckTx`, is weak, since a malicious validator could always propose a block containing invalid transactions. There's therefore the need for a more robust replay protection mechanism implemented directly in the application. +The underlying consensus engine, +[Tendermint](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/abci/apps.md), +provides a first layer of protection in its mempool which is based on a cache of +previously seen transactions. This mechanism is actually aimed at preventing a +block proposer from including an already processed transaction in the next +block, which can happen when the transaction has been received late. Of course, +this also acts as a countermeasure against intentional replay attacks. This +check though, like all the checks performed in `CheckTx`, is weak, since a +malicious validator could always propose a block containing invalid +transactions. There's therefore the need for a more robust replay protection +mechanism implemented directly in the application. ## Implementation -Namada replay protection consists of three parts: the hash-based solution for both `EncryptedTx` (also called the `InnerTx`) and `WrapperTx`, a way to mitigate replay attacks in case of a fork and a concept of a lifetime for the transactions. +Namada replay protection consists of three parts: the hash-based solution for +both `EncryptedTx` (also called the `InnerTx`) and `WrapperTx`, a way to +mitigate replay attacks in case of a fork and a concept of a lifetime for the +transactions. ### Hash register -The actual Wasm code and data for the transaction are encapsulated inside a struct `Tx`, which gets encrypted as an `EncryptedTx` and wrapped inside a `WrapperTx` (see the [relative](#encryption-authentication) section). This inner transaction must be protected from replay attacks because it carries the actual semantics of the state transition. Moreover, even if the wrapper transaction was protected from replay attacks, an attacker could extract the inner transaction, rewrap it, and replay it. Note that for this attack to work, the attacker will need to sign the outer transaction himself and pay gas and fees for that, but this could still cause much greater damage to the parties involved in the inner transaction. - -`WrapperTx` is the only type of transaction currently accepted by the ledger. It must be protected from replay attacks because, if it wasn't, a malicious user could replay the transaction as is. Even if the inner transaction implemented replay protection or, for any reason, wasn't accepted, the signer of the wrapper would still pay for gas and fees, effectively suffering economic damage. - -To prevent the replay of both these transactions we will rely on a set of already processed transactions' digests that will be kept in storage. These digests will be computed on the **unsigned** transactions, to support replay protection even for [multisigned](multisignature.md) transactions: in this case, if hashes were taken from the signed transactions, a different set of signatures on the same tx would produce a different hash, effectively allowing for a replay. To support this, we'll need a subspace in storage headed by a `ReplayProtection` internal address: +The actual Wasm code and data for the transaction are encapsulated inside a +struct `Tx`, which gets encrypted as an `EncryptedTx` and wrapped inside a +`WrapperTx` (see the [relative](#encryption-authentication) section). This inner +transaction must be protected from replay attacks because it carries the actual +semantics of the state transition. Moreover, even if the wrapper transaction was +protected from replay attacks, an attacker could extract the inner transaction, +rewrap it, and replay it. Note that for this attack to work, the attacker will +need to sign the outer transaction himself and pay gas and fees for that, but +this could still cause much greater damage to the parties involved in the inner +transaction. + +`WrapperTx` is the only type of transaction currently accepted by the ledger. It +must be protected from replay attacks because, if it wasn't, a malicious user +could replay the transaction as is. Even if the inner transaction implemented +replay protection or, for any reason, wasn't accepted, the signer of the wrapper +would still pay for gas and fees, effectively suffering economic damage. + +To prevent the replay of both these transactions we will rely on a set of +already processed transactions' digests that will be kept in storage. These +digests will be computed on the **unsigned** transactions, to support replay +protection even for [multisigned](multisignature.md) transactions: in this case, +if hashes were taken from the signed transactions, a different set of signatures +on the same tx would produce a different hash, effectively allowing for a +replay. To support this, we'll need a subspace in storage headed by a +`ReplayProtection` internal address: ``` -/$ReplayProtectionAddress/$tx0_hash: None -/$ReplayProtectionAddress/$tx1_hash: None -/$ReplayProtectionAddress/$tx2_hash: None +/\$ReplayProtectionAddress/\$tx0_hash: None +/\$ReplayProtectionAddress/\$tx1_hash: None +/\$ReplayProtectionAddress/\$tx2_hash: None ... ``` -The hashes will form the last part of the path to allow for a fast storage lookup. - -The consistency of the storage subspace is of critical importance for the correct working of the replay protection mechanism. To protect it, a validity predicate will check that no changes to this subspace are applied by any wasm transaction, as those should only be available from protocol. - -Both in `mempool_validation` and `process_proposal` we will perform a check (together with others, see the [relative](#wrapper-checks) section) on both the digests against the storage to check that neither of the transactions has already been executed: if this doesn't hold, the `WrapperTx` will not be included into the mempool/block respectively. If both checks pass then the transaction is included in the block and executed. In the `finalize_block` function we will add the transaction's hash to storage to prevent re-executions. We will first add the hash of the wrapper transaction. After that, in the following block, we deserialize the inner transaction, check the correct order of the transactions in the block and execute the tx: if it runs out of gas then we'll avoid storing its hash to allow rewrapping and executing the transaction, otherwise we'll add the hash in storage (both in case of success or failure of the tx). +The hashes will form the last part of the path to allow for a fast storage +lookup. + +The consistency of the storage subspace is of critical importance for the +correct working of the replay protection mechanism. To protect it, a validity +predicate will check that no changes to this subspace are applied by any wasm +transaction, as those should only be available from protocol. + +Both in `mempool_validation` and `process_proposal` we will perform a check +(together with others, see the [relative](#wrapper-checks) section) on both the +digests against the storage to check that neither of the transactions has +already been executed: if this doesn't hold, the `WrapperTx` will not be +included into the mempool/block respectively. If both checks pass then the +transaction is included in the block and executed. In the `finalize_block` +function we will add the transaction's hash to storage to prevent re-executions. +We will first add the hash of the wrapper transaction. After that, in the +following block, we deserialize the inner transaction, check the correct order +of the transactions in the block and execute the tx: if it runs out of gas then +we'll avoid storing its hash to allow rewrapping and executing the transaction, +otherwise we'll add the hash in storage (both in case of success or failure of +the tx). + +#### Optional unshielding + +The optional `unshield` field is supposed to carry an unshielding masp +`Transfer`. Given this assumption, there's no need to manage it since masp has +an internal replay protection mechanism. + +Still, since this field represents a valid, signed `Tx`, there are three +possible attacks that can be run by leveraging this field: + +1. If the wrapper signer constructs an `unshield` tx that actually encodes + another type of transaction, then this one can be extracted and executed + separately +2. A malicious user could extract this tx before it makes it to a block and play + it in advance +3. A combination of the previous two + +In the first case, the unshielding operation would fail because of the checks +run in protocol, but the tx itself could be extracted, wrapped and submitted to +the network. This issue could be solved with the mechanism explained in the +previous section. + +The second attack, instead, is performed before the original tx is placed in a +block and, therefore, cannot be prevented with a replay protection mechanism. +The only result of this attack would be that the original wrapper transaction +would fail since it would attempt to replay a masp transfer: in this case, the +submitter of the original tx can recreate it without the need for the +unshielding operation since the attacker has already performed it. + +In the last case the unshielding transaction (which is not a masp transfer) +could be encrypted, wrapped and executed before the original transaction is +inserted in a block. When the latter gets executed the protocol checks detect +that this is not a masp unshielding transfer and reject it. + +Given that saving the hash of the unshielding transaction is redundant in case +of a proper masp transfer and it doesn't prevent the second scenario in case of +non-masp transaction, Namada does not implement the replay protection mechanism +on the unshielding transaction, whose correctness is left to the wrapper signer +and the masp validity predicate (in case the unshielding tx was indeed a correct +masp unshield transfer). The combination of the fee system, the validity +predicates set and the protocol checks on the unshielding operation guarantees +that even if one of the attacks explained in this section is performed: + +- The original wrapper signer doesn't suffer economic damage (the wrapper + containing the invalid unshielding forces the block rejection without fee + collection) +- The attacker has to pay fees on the rewrapped tx preventing him to submit + these transactions for free +- The invalid unshielding transaction must still be a valid transaction per the + VPs triggered ### Forks -In the case of a fork, the transaction hash is not enough to prevent replay attacks. Transactions, in fact, could still be replayed on the other branch as long as their format is kept unchanged and the counters in storage match. +In the case of a fork, the transaction hash is not enough to prevent replay +attacks. Transactions, in fact, could still be replayed on the other branch as +long as their format is kept unchanged and the counters in storage match. -To mitigate this problem, transactions will need to carry a `ChainId` identifier to tie them to a specific fork. This field needs to be added to the `Tx` struct so that it applies to both `WrapperTx` and `EncryptedTx`: +To mitigate this problem, transactions will need to carry a `ChainId` identifier +to tie them to a specific fork. This field needs to be added to the `Tx` struct +so that it applies to both `WrapperTx` and `EncryptedTx`: ```rust pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub chain_id: ChainId + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub chain_id: ChainId } ``` -This new field will be signed just like the other ones and is therefore subject to the same guarantees explained in the [initial](#encryption-authentication) section. The validity of this identifier will be checked in `process_proposal` for both the outer and inner tx: if a transaction carries an unexpected chain id, it won't be applied, meaning that no modifications will be applied to storage. +This new field will be signed just like the other ones and is therefore subject +to the same guarantees explained in the [initial](#encryption-authentication) +section. The validity of this identifier will be checked in `process_proposal` +for both the outer and inner tx: if a transaction carries an unexpected chain +id, it won't be applied, meaning that no modifications will be applied to +storage. ### Transaction lifetime -In general, a transaction is valid at the moment of submission, but after that, a series of external factors (ledger state, etc.) might change the mind of the submitter who's now not interested in the execution of the transaction anymore. - -We have to introduce the concept of a lifetime (or timeout) for the transactions: basically, the `Tx` struct will hold an extra field called `expiration` stating the maximum `DateTimeUtc` up until which the submitter is willing to see the transaction executed. After the specified time, the transaction will be considered invalid and discarded regardless of all the other checks. - -By introducing this new field we are setting a new constraint in the transaction's contract, where the ledger will make sure to prevent the execution of the transaction after the deadline and, on the other side, the submitter commits himself to the result of the execution at least until its expiration. If the expiration is reached and the transaction has not been executed the submitter can decide to submit a new transaction if he's still interested in the changes carried by it. - -In our design, the `expiration` will hold until the transaction is executed: once it's executed, either in case of success or failure, the tx hash will be written to storage and the transaction will not be replayable. In essence, the transaction submitter commits himself to one of these three conditions: +In general, a transaction is valid at the moment of submission, but after that, +a series of external factors (ledger state, etc.) might change the mind of the +submitter who's now not interested in the execution of the transaction anymore. + +We have to introduce the concept of a lifetime (or timeout) for the +transactions: basically, the `Tx` struct will hold an extra field called +`expiration` stating the maximum `DateTimeUtc` up until which the submitter is +willing to see the transaction executed. After the specified time, the +transaction will be considered invalid and discarded regardless of all the other +checks. + +By introducing this new field we are setting a new constraint in the +transaction's contract, where the ledger will make sure to prevent the execution +of the transaction after the deadline and, on the other side, the submitter +commits himself to the result of the execution at least until its expiration. If +the expiration is reached and the transaction has not been executed the +submitter can decide to submit a new transaction if he's still interested in the +changes carried by it. + +In our design, the `expiration` will hold until the transaction is executed: +once it's executed, either in case of success or failure, the tx hash will be +written to storage and the transaction will not be replayable. In essence, the +transaction submitter commits himself to one of these three conditions: - Transaction is invalid regardless of the specific state -- Transaction is executed (either with success or not) and the transaction hash is saved in the storage +- Transaction is executed (either with success or not) and the transaction hash + is saved in the storage - Expiration time has passed The first condition satisfied will invalidate further executions of the same tx. -In anticipation of DKG implementation, the current struct `WrapperTx` holds a field `epoch` stating the epoch in which the tx should be executed. This is because Ferveo will produce a new public key each epoch, effectively limiting the lifetime of the transaction (see section 2.2.2 of the [documentation](https://eprint.iacr.org/2022/898.pdf)). Unfortunately, for replay protection, a resolution of 1 epoch (~ 1 day) is too low for the possible needs of the submitters, therefore we need the `expiration` field to hold a maximum `DateTimeUtc` to increase resolution down to a single block (~ 10 seconds). +In anticipation of DKG implementation, the current struct `WrapperTx` holds a +field `epoch` stating the epoch in which the tx should be executed. This is +because Ferveo will produce a new public key each epoch, effectively limiting +the lifetime of the transaction (see section 2.2.2 of the +[documentation](https://eprint.iacr.org/2022/898.pdf)). Unfortunately, for +replay protection, a resolution of 1 epoch (~ 1 day) is too low for the possible +needs of the submitters, therefore we need the `expiration` field to hold a +maximum `DateTimeUtc` to increase resolution down to a single block (~ 10 +seconds). ```rust pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub chain_id: ChainId, - /// Lifetime of the transaction, also determines which decryption key will be used - pub expiration: DateTimeUtc, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub chain_id: ChainId, + /// Lifetime of the transaction, also determines which decryption key will be used + pub expiration: DateTimeUtc, } pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// the encrypted payload - pub inner_tx: EncryptedTx, - /// sha-2 hash of the inner transaction acting as a commitment - /// the contents of the encrypted payload - pub tx_hash: Hash, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// the encrypted payload + pub inner_tx: EncryptedTx, + /// sha-2 hash of the inner transaction acting as a commitment + /// the contents of the encrypted payload + pub tx_hash: Hash, } ``` -Since we now have more detailed information about the desired lifetime of the transaction, we can remove the `epoch` field and rely solely on `expiration`. Now, the producer of the inner transaction should make sure to set a sensible value for this field, in the sense that it should not span more than one epoch. If this happens, then the transaction will be correctly decrypted only in a subset of the desired lifetime (the one expecting the actual key used for the encryption), while, in the following epochs, the transaction will fail decryption and won't be executed. In essence, the `expiration` parameter can only restrict the implicit lifetime within the current epoch, it can not surpass it as that would make the transaction fail in the decryption phase. - -The subject encrypting the inner transaction will also be responsible for using the appropriate public key for encryption relative to the targeted time. - -The wrapper transaction will match the `expiration` of the inner for correct execution. Note that we need this field also for the wrapper to anticipate the check at mempool/proposal evaluation time, but also to prevent someone from inserting a wrapper transaction after the corresponding inner has expired forcing the wrapper signer to pay for the fees. +Since we now have more detailed information about the desired lifetime of the +transaction, we can remove the `epoch` field and rely solely on `expiration`. +Now, the producer of the inner transaction should make sure to set a sensible +value for this field, in the sense that it should not span more than one epoch. +If this happens, then the transaction will be correctly decrypted only in a +subset of the desired lifetime (the one expecting the actual key used for the +encryption), while, in the following epochs, the transaction will fail +decryption and won't be executed. In essence, the `expiration` parameter can +only restrict the implicit lifetime within the current epoch, it can not surpass +it as that would make the transaction fail in the decryption phase. + +The subject encrypting the inner transaction will also be responsible for using +the appropriate public key for encryption relative to the targeted time. + +The wrapper transaction will match the `expiration` of the inner for correct +execution. Note that we need this field also for the wrapper to anticipate the +check at mempool/proposal evaluation time, but also to prevent someone from +inserting a wrapper transaction after the corresponding inner has expired +forcing the wrapper signer to pay for the fees. ### Wrapper checks -In `mempool_validation` and `process_proposal` we will perform some checks on the wrapper tx to validate it. These will involve: - -- Valid signature -- Enough funds to pay the fee -- Valid chainId -- Valid transaction hash -- Valid expiration - -These checks can all be done before executing the transactions themselves (the check on the gas cannot be done ahead of time). If any of these fails, the transaction should be considered invalid and the action to take will be one of the followings: - -1. If the checks fail on the signature, chainId, expiration or transaction hash, then this transaction will be forever invalid, regardless of the possible evolution of the ledger's state. There's no need to include the transaction in the block. Moreover, we **cannot** include this transaction in the block to charge a fee (as a sort of punishment) because these errors may not depend on the signer of the tx (could be due to malicious users or simply a delay in the tx inclusion in the block) -2. If the checks fail _only_ because of an insufficient balance, the wrapper should be kept in mempool for a future play in case the funds should become available -3. If all the checks pass validation we will include the transaction in the block to store the hash and charge the fee - -The `expiration` parameter also justifies step 2 of the previous bullet points which states that if the validity checks fail only because of an insufficient balance to pay for fees then the transaction should be kept in mempool for future execution. Without it, the transaction could be potentially executed at any future moment, possibly going against the mutated interests of the submitter. With the expiration parameter, now, the submitter commits himself to accept the execution of the transaction up to the specified time: it's going to be his responsibility to provide a sensible value for this parameter. Given this constraint the transaction will be kept in memepool up until the expiration (since it would become invalid after that in any case), to prevent the mempool from increasing too much in size. - -This mechanism can also be applied to another scenario. Suppose a transaction was not propagated to the network by a node (or a group of colluding nodes). Now, this tx might be valid, but it doesn't get inserted into a block. Without an expiration, this tx can be replayed (better, applied, since it was never executed in the first place) at a future moment in time when the submitter might not be willing to execute it anymore. +In `mempool_validation` we will perform some checks on the wrapper tx to +validate it. These will involve: + +- Signature +- `GasLimit` is below the block gas limit +- `Fees` are paid with an accepted token and match the minimum amount required +- `ChainId` +- Transaction hash +- Expiration +- Unshielding tx (if present), is indeed a masp unshielding transfer + +For gas, fee and the unshielding tx more details can be found in the +[fee specs](../economics/fee-system.md). + +These checks can all be done before executing the transactions themselves. If +any of these fails, the transaction should be considered invalid and the action +to take will be one of the followings: + +1. If the checks fail on the signature, chainId, expiration, transaction hash or + the unshielding tx, then this transaction will be forever invalid, regardless + of the possible evolution of the ledger's state. There's no need to include + the transaction in the block. Moreover, we **cannot** include this + transaction in the block to charge a fee (as a sort of punishment) because + these errors may not depend on the signer of the tx (could be due to + malicious users or simply a delay in the tx inclusion in the block) +2. If the checks fail on `Fee` or `GasLimit` the transaction should be + discarded. In theory the gas limit of a block is a Namada parameter + controlled by governance, so there's a chance that the transaction could + become valid in the future should this limit be raised. The same applies to + the token whitelist and the minimum fee required. However we can expect a + slow rate of change of these parameters so we can reject the tx (the + submitter can always resubmit it at a future time) + +If instead all the checks pass validation we will include the transaction in the +block to store the hash and charge the fee. + +All these checks are also run in `process_proposal` with a few additions: + +- Wrapper signer has enough funds to pay the fee. This check should not be done + in mempool because the funds available for a certain address are variable in + time and should only be checked at block inclusion time. If any of the checks + fail here, the entire block is rejected forcing a new Tendermint round to + begin (see a better explanation of this choice in the + [relative](#block-rejection) section) +- The unshielding tx (if present) releases the minimum amount of tokens required + to pay fees +- The unshielding tx (if present) runs succesffuly + +The `expiration` parameter also justifies that the check on funds is only done +in `process_proposal` and not in mempool. Without it, the transaction could be +potentially executed at any future moment, possibly going against the mutated +interests of the submitter. With the expiration parameter, now, the submitter +commits himself to accept the execution of the transaction up to the specified +time: it's going to be his responsibility to provide a sensible value for this +parameter. Given this constraint the transaction will be kept in mempool up +until the expiration (since it would become invalid after that in any case), to +prevent the mempool from increasing too much in size. + +This mechanism can also be applied to another scenario. Suppose a transaction +was not propagated to the network by a node (or a group of colluding nodes). +Now, this tx might be valid, but it doesn't get inserted into a block. Without +an expiration, this tx can be replayed (better, applied, since it was never +executed in the first place) at a future moment in time when the submitter might +not be willing to execute it any more. + +### Block rejection + +To prevent a block proposer from including invalid transactions in a block, the +validators will reject the entire block in case they find a single invalid +wrapper transaction. + +Rejecting the single invalid transaction while still accepting the block is not +a valid solution. In this case, in fact, the block proposer has no incentive to +include invalid transactions in the block because these would gain him no fees +but, at the same time, he doesn't really have a disincentive to not include +them, since in this case the validators will simply discard the invalid tx but +accept the rest of the block granting the proposer his fees on all the other +transactions. This, of course, applies in case the proposer has no other valid +tx to include. A malicious proposer could act like this to spam the block +without suffering any penalty. + +To recap, a block is rejected when at least one of the following conditions is +met: + +- At least one `WrapperTx` is invalid with respect to the checks listed in the + [relative section](#wrapper-checks) +- The order/number of decrypted txs differs from the order/number committed in + the previous block ## Possible optimizations -In this section we describe two alternative solutions that come with some optimizations. +In this section we describe two alternative solutions that come with some +optimizations. ### Transaction counter -Instead of relying on a hash (32 bytes) we could use a 64 bits (8 bytes) transaction counter as nonce for the wrapper and inner transactions. The advantage is that the space required would be much less since we only need two 8 bytes values in storage for every address which is signing transactions. On the other hand, the handling of the counter for the inner transaction will be performed entirely in wasm (transactions and VPs) making it a bit less efficient. This solution also imposes a strict ordering on the transactions issued by a same address. +Instead of relying on a hash (32 bytes) we could use a 64 bits (8 bytes) +transaction counter as nonce for the wrapper and inner transactions. The +advantage is that the space required would be much less since we only need two 8 +bytes values in storage for every address which is signing transactions. On the +other hand, the handling of the counter for the inner transaction will be +performed entirely in wasm (transactions and VPs) making it a bit less +efficient. This solution also imposes a strict ordering on the transactions +issued by a same address. -**NOTE**: this solution requires the ability to [yield](https://github.com/wasmerio/wasmer/issues/1127) execution from Wasmer which is not implemented yet. +**NOTE**: this solution requires the ability to +[yield](https://github.com/wasmerio/wasmer/issues/1127) execution from Wasmer +which is not implemented yet. #### InnerTx -We will implement the protection entirely in Wasm: the check of the counter will be carried out by the validity predicates while the actual writing of the counter in storage will be done by the transactions themselves. +We will implement the protection entirely in Wasm: the check of the counter will +be carried out by the validity predicates while the actual writing of the +counter in storage will be done by the transactions themselves. -To do so, the `SignedTxData` attached to the transaction will hold the current value of the counter in storage: +To do so, the `SignedTxData` attached to the transaction will hold the current +value of the counter in storage: ```rust pub struct SignedTxData { - /// The original tx data bytes, if any - pub data: Option>, - /// The optional transaction counter for replay protection - pub tx_counter: Option, - /// The signature is produced on the tx data concatenated with the tx code - /// and the timestamp. - pub sig: common::Signature, + /// The original tx data bytes, if any + pub data: Option>, + /// The optional transaction counter for replay protection + pub tx_counter: Option, + /// The signature is produced on the tx data concatenated with the tx code + /// and the timestamp. + pub sig: common::Signature, } ``` -The counter must reside in `SignedTxData` and not in the data itself because this must be checked by the validity predicate which is not aware of the specific transaction that took place but only of the changes in the storage; therefore, the VP is not able to correctly deserialize the data of the transactions since it doesn't know what type of data the bytes represent. +The counter must reside in `SignedTxData` and not in the data itself because +this must be checked by the validity predicate which is not aware of the +specific transaction that took place but only of the changes in the storage; +therefore, the VP is not able to correctly deserialize the data of the +transactions since it doesn't know what type of data the bytes represent. -The counter will be signed as well to protect it from tampering and grant it the same guarantees explained at the [beginning](#encryption-authentication) of this document. +The counter will be signed as well to protect it from tampering and grant it the +same guarantees explained at the [beginning](#encryption-authentication) of this +document. -The wasm transaction will simply read the value from storage and increase its value by one. The target key in storage will be the following: +The wasm transaction will simply read the value from storage and increase its +value by one. The target key in storage will be the following: ``` /$Address/inner_tx_counter: u64 ``` -The VP of the _source_ address will then check the validity of the signature and, if it's deemed valid, will proceed to check if the pre-value of the counter in storage was equal to the one contained in the `SignedTxData` struct and if the post-value of the key in storage has been incremented by one: if any of these conditions doesn't hold the VP will discard the transactions and prevent the changes from being applied to the storage. +The VP of the _source_ address will then check the validity of the signature +and, if it's deemed valid, will proceed to check if the pre-value of the counter +in storage was equal to the one contained in the `SignedTxData` struct and if +the post-value of the key in storage has been incremented by one: if any of +these conditions doesn't hold the VP will discard the transactions and prevent +the changes from being applied to the storage. -In the specific case of a shielded transfer, since MASP already comes with replay protection as part of the Zcash design (see the [MASP specs](../masp.md) and [Zcash protocol specs](https://zips.z.cash/protocol/protocol.pdf)), the counter in `SignedTxData` is not required and therefore should be optional. +In the specific case of a shielded transfer, since MASP already comes with +replay protection as part of the Zcash design (see the [MASP specs](../masp.md) +and [Zcash protocol specs](https://zips.z.cash/protocol/protocol.pdf)), the +counter in `SignedTxData` is not required and therefore should be optional. -To implement replay protection for the inner transaction we will need to update all the VPs checking the transaction's signature to include the check on the transaction counter: at the moment the `vp_user` validity predicate is the only one to update. In addition, all the transactions involving `SignedTxData` should increment the counter. +To implement replay protection for the inner transaction we will need to update +all the VPs checking the transaction's signature to include the check on the +transaction counter: at the moment the `vp_user` validity predicate is the only +one to update. In addition, all the transactions involving `SignedTxData` should +increment the counter. #### WrapperTx -To protect this transaction we can implement an in-protocol mechanism. Since the wrapper transaction gets signed before being submitted to the network, we can leverage the `tx_counter` field of the `SignedTxData` already introduced for the inner tx. +To protect this transaction we can implement an in-protocol mechanism. Since the +wrapper transaction gets signed before being submitted to the network, we can +leverage the `tx_counter` field of the `SignedTxData` already introduced for the +inner tx. In addition, we need another counter in the storage subspace of every address: @@ -234,109 +525,229 @@ In addition, we need another counter in the storage subspace of every address: /$Address/wrapper_tx_counter: u64 ``` -where `$Address` is the one signing the transaction (the same implied by the `pk` field of the `WrapperTx` struct). +where `$Address` is the one signing the transaction (the same implied by the +`pk` field of the `WrapperTx` struct). -The check will consist of a signature check first followed by a check on the counter that will make sure that the counter attached to the transaction matches the one in storage for the signing address. This will be done in the `process_proposal` function so that validators can decide whether the transaction is valid or not; if it's not, then they will discard the transaction and skip to the following one. +The check will consist of a signature check first followed by a check on the +counter that will make sure that the counter attached to the transaction matches +the one in storage for the signing address. This will be done in the +`process_proposal` function so that validators can decide whether the +transaction is valid or not; if it's not, then they will discard the transaction +and skip to the following one. -At last, in `finalize_block`, the ledger will update the counter key in storage, increasing its value by one. This will happen when the following conditions are met: +At last, in `finalize_block`, the ledger will update the counter key in storage, +increasing its value by one. This will happen when the following conditions are +met: -- `process_proposal` has accepted the tx by validating its signature and transaction counter -- The tx was correctly applied in `finalize_block` (for `WrapperTx` this simply means inclusion in the block and gas accounting) +- `process_proposal` has accepted the tx by validating its signature and + transaction counter +- The tx was correctly applied in `finalize_block` (for `WrapperTx` this simply + means inclusion in the block and gas accounting) -Now, if a malicious user tried to replay this transaction, the `tx_counter` in the struct would no longer be equal to the one in storage and the transaction would be deemed invalid. +Now, if a malicious user tried to replay this transaction, the `tx_counter` in +the struct would no longer be equal to the one in storage and the transaction +would be deemed invalid. #### Implementation details -In this section we'll talk about some details of the replay protection mechanism that derive from the solution proposed in this section. +In this section we'll talk about some details of the replay protection mechanism +that derive from the solution proposed in this section. ##### Storage counters -Replay protection will require interaction with the storage from both the protocol and Wasm. To do so we can take advantage of the `StorageRead` and `StorageWrite` traits to work with a single interface. +Replay protection will require interaction with the storage from both the +protocol and Wasm. To do so we can take advantage of the `StorageRead` and +`StorageWrite` traits to work with a single interface. -This implementation requires two transaction counters in storage for every address, so that the storage subspace of a given address looks like the following: +This implementation requires two transaction counters in storage for every +address, so that the storage subspace of a given address looks like the +following: ``` /$Address/wrapper_tx_counter: u64 /$Address/inner_tx_counter: u64 ``` -An implementation requiring a single counter in storage has been taken into consideration and discarded because that would not support batching; see the [relative section](#single-counter-in-storage) for a more in-depth explanation. +An implementation requiring a single counter in storage has been taken into +consideration and discarded because that would not support batching; see the +[relative section](#single-counter-in-storage) for a more in-depth explanation. -For both the wrapper and inner transaction, the increase of the counter in storage is an important step that must be correctly executed. First, the implementation will return an error in case of a counter overflow to prevent wrapping, since this would allow for the replay of previous transactions. Also, we want to increase the counter as soon as we verify that the signature, the chain id and the passed-in transaction counter are valid. The increase should happen immediately after the checks because of two reasons: +For both the wrapper and inner transaction, the increase of the counter in +storage is an important step that must be correctly executed. First, the +implementation will return an error in case of a counter overflow to prevent +wrapping, since this would allow for the replay of previous transactions. Also, +we want to increase the counter as soon as we verify that the signature, the +chain id and the passed-in transaction counter are valid. The increase should +happen immediately after the checks because of two reasons: - Prevent replay attack of a transaction in the same block -- Update the transaction counter even in case the transaction fails, to prevent a possible replay attack in the future (since a transaction invalid at state Sx could become valid at state Sn where `n > x`) - -For `WrapperTx`, the counter increase and fee accounting will per performed in `finalize_block` (as stated in the [relative](#wrappertx) section). - -For `InnerTx`, instead, the logic is not straightforward. The transaction code will be executed in a Wasm environment ([Wasmer](https://wasmer.io)) till it eventually completes or raises an exception. In case of success, the counter in storage will be updated correctly but, in case of failure, the protocol will discard all of the changes brought by the transactions to the write-ahead-log, including the updated transaction counter. This is a problem because the transaction could be successfully replayed in the future if it will become valid. - -The ideal solution would be to interrupt the execution of the Wasm code after the transaction counter (if any) has been increased. This would allow performing a first run of the involved VPs and, if all of them accept the changes, let the protocol commit these changes before any possible failure. After that, the protocol would resume the execution of the transaction from the previous interrupt point until completion or failure, after which a second pass of the VPs is initiated to validate the remaining state modifications. In case of a VP rejection after the counter increase there would be no need to resume execution and the transaction could be immediately deemed invalid so that the protocol could skip to the next tx to be executed. With this solution, the counter update would be committed to storage regardless of a failure of the transaction itself. - -Unfortunately, at the moment, Wasmer doesn't allow [yielding](https://github.com/wasmerio/wasmer/issues/1127) from the execution. - -In case the transaction went out of gas (given the `gas_limit` field of the wrapper), all the changes applied will be discarded from the WAL and will not affect the state of the storage. The inner transaction could then be rewrapped with a correct gas limit and replayed until the `expiration` time has been reached. +- Update the transaction counter even in case the transaction fails, to prevent + a possible replay attack in the future (since a transaction invalid at state + Sx could become valid at state Sn where `n > x`) + +For `WrapperTx`, the counter increase and fee accounting will per performed in +`finalize_block` (as stated in the [relative](#wrappertx) section). + +For `InnerTx`, instead, the logic is not straightforward. The transaction code +will be executed in a Wasm environment ([Wasmer](https://wasmer.io)) till it +eventually completes or raises an exception. In case of success, the counter in +storage will be updated correctly but, in case of failure, the protocol will +discard all of the changes brought by the transactions to the write-ahead-log, +including the updated transaction counter. This is a problem because the +transaction could be successfully replayed in the future if it will become +valid. + +The ideal solution would be to interrupt the execution of the Wasm code after +the transaction counter (if any) has been increased. This would allow performing +a first run of the involved VPs and, if all of them accept the changes, let the +protocol commit these changes before any possible failure. After that, the +protocol would resume the execution of the transaction from the previous +interrupt point until completion or failure, after which a second pass of the +VPs is initiated to validate the remaining state modifications. In case of a VP +rejection after the counter increase there would be no need to resume execution +and the transaction could be immediately deemed invalid so that the protocol +could skip to the next tx to be executed. With this solution, the counter update +would be committed to storage regardless of a failure of the transaction itself. + +Unfortunately, at the moment, Wasmer doesn't allow +[yielding](https://github.com/wasmerio/wasmer/issues/1127) from the execution. + +In case the transaction went out of gas (given the `gas_limit` field of the +wrapper), all the changes applied will be discarded from the WAL and will not +affect the state of the storage. The inner transaction could then be rewrapped +with a correct gas limit and replayed until the `expiration` time has been +reached. ##### Batching and transaction ordering -This replay protection technique supports the execution of multiple transactions with the same address as _source_ in a single block. Actually, the presence of the transaction counters and the checks performed on them now impose a strict ordering on the execution sequence (which can be an added value for some use cases). The correct execution of more than one transaction per source address in the same block is preserved as long as: +This replay protection technique supports the execution of multiple transactions +with the same address as _source_ in a single block. Actually, the presence of +the transaction counters and the checks performed on them now impose a strict +ordering on the execution sequence (which can be an added value for some use +cases). The correct execution of more than one transaction per source address in +the same block is preserved as long as: -1. The wrapper transactions are inserted in the block with the correct ascending order +1. The wrapper transactions are inserted in the block with the correct ascending + order 2. No hole is present in the counters' sequence -3. The counter of the first transaction included in the block matches the expected one in storage - -The conditions are enforced by the block proposer who has an interest in maximizing the amount of fees extracted by the proposed block. To support this incentive, we will charge gas and fees at the same moment in which we perform the counter increase explained in the [storage counters](#storage-counters) section: this way we can avoid charging fees and gas if the transaction is invalid (invalid signature, wrong counter or wrong chain id), effectively incentivizing the block proposer to include only valid transactions and correctly reorder them to maximize the fees (see the [block rejection](#block-rejection) section for an alternative solution that was discarded in favor of this). - -In case of a missing transaction causes a hole in the sequence of transaction counters, the block proposer will include in the block all the transactions up to the missing one and discard all the ones following that one, effectively preserving the correct ordering. - -Correctly ordering the transactions is not enough to guarantee the correct execution. As already mentioned in the [WrapperTx](#wrappertx) section, the block proposer and the validators also need to access the storage to check that the first transaction counter of a sequence is actually the expected one. - -The entire counter ordering is only done on the `WrapperTx`: if the inner counter is wrong then the inner transaction will fail and the signer of the corresponding wrapper will be charged with fees. This incentivizes submitters to produce valid transactions and discourages malicious user from rewrapping and resubmitting old transactions. +3. The counter of the first transaction included in the block matches the + expected one in storage + +The conditions are enforced by the block proposer who has an interest in +maximizing the amount of fees extracted by the proposed block. To support this +incentive, validators will reject the block proposed if any of the included +wrapper transactions are invalid, effectively incentivizing the block proposer +to include only valid transactions and correctly reorder them to gain the fees. + +In case of a missing transaction causes a hole in the sequence of transaction +counters, the block proposer will include in the block all the transactions up +to the missing one and discard all the ones following that one, effectively +preserving the correct ordering. + +Correctly ordering the transactions is not enough to guarantee the correct +execution. As already mentioned in the [WrapperTx](#wrappertx) section, the +block proposer and the validators also need to access the storage to check that +the first transaction counter of a sequence is actually the expected one. + +The entire counter ordering is only done on the `WrapperTx`: if the inner +counter is wrong then the inner transaction will fail and the signer of the +corresponding wrapper will be charged with fees. This incentivizes submitters to +produce valid transactions and discourages malicious user from rewrapping and +resubmitting old transactions. ##### Mempool checks -As a form of optimization to prevent mempool spamming, some of the checks that have been introduced in this document will also be brought to the `mempool_validate` function. Of course, we always refer to checks on the `WrapperTx` only. More specifically: +As a form of optimization to prevent mempool spamming, some of the checks that +have been introduced in this document will also be brought to the +`mempool_validate` function. Of course, we always refer to checks on the +`WrapperTx` only. More specifically: - Check the `ChainId` field -- Check the signature of the transaction against the `pk` field of the `WrapperTx` +- Check the signature of the transaction against the `pk` field of the + `WrapperTx` - Perform a limited check on the transaction counter -Regarding the last point, `mempool_validate` will check if the counter in the transaction is `>=` than the one in storage for the address signing the `WrapperTx`. A complete check (checking for strict equality) is not feasible, as described in the [relative](#mempool-counter-validation) section. +Regarding the last point, `mempool_validate` will check if the counter in the +transaction is `>=` than the one in storage for the address signing the +`WrapperTx`. A complete check (checking for strict equality) is not feasible, as +described in the [relative](#mempool-counter-validation) section. #### Alternatives considered -In this section we list some possible solutions that were taken into consideration during the writing of this solution but were eventually discarded. +In this section we list some possible solutions that were taken into +consideration during the writing of this solution but were eventually discarded. ##### Mempool counter validation -The idea of performing a complete validation of the transaction counters in the `mempool_validate` function was discarded because of a possible flaw. - -Suppose a client sends five transactions (counters from 1 to 5). The mempool of the next block proposer is not guaranteed to receive them in order: something on the network could shuffle the transactions up so that they arrive in the following order: 2-3-4-5-1. Now, since we validate every single transaction to be included in the mempool in the exact order in which we receive them, we would discard the first four transactions and only accept the last one, that with counter 1. Now the next block proposer might have the four discarded transactions in its mempool (since those were not added to the previous block and therefore not evicted from the other mempools, at least they shouldn't, see [block rejection](#block-rejection)) and could therefore include them in the following block. But still, a process that could have ended in a single block actually took two blocks. Moreover, there are two more issues: - -- The next block proposer might have the remaining transactions out of order in his mempool as well, effectively propagating the same issue down to the next block proposer -- The next block proposer might not have these transactions in his mempool at all - -Finally, transactions that are not allowed into the mempool don't get propagated to the other peers, making their inclusion in a block even harder. -It is instead better to avoid a complete filter on the transactions based on their order in the mempool: instead we are going to perform a simpler check and then let the block proposer rearrange them correctly when proposing the block. +The idea of performing a complete validation of the transaction counters in the +`mempool_validate` function was discarded because of a possible flaw. + +Suppose a client sends five transactions (counters from 1 to 5). The mempool of +the next block proposer is not guaranteed to receive them in order: something on +the network could shuffle the transactions up so that they arrive in the +following order: 2-3-4-5-1. Now, since we validate every single transaction to +be included in the mempool in the exact order in which we receive them, we would +discard the first four transactions and only accept the last one, that with +counter 1. Now the next block proposer might have the four discarded +transactions in its mempool (since those were not added to the previous block +and therefore not evicted from the other mempools, at least they shouldn't, see +[block rejection](#block-rejection)) and could therefore include them in the +following block. But still, a process that could have ended in a single block +actually took two blocks. Moreover, there are two more issues: + +- The next block proposer might have the remaining transactions out of order in + his mempool as well, effectively propagating the same issue down to the next + block proposer +- The next block proposer might not have these transactions in his mempool at + all + +Finally, transactions that are not allowed into the mempool don't get propagated +to the other peers, making their inclusion in a block even harder. It is instead +better to avoid a complete filter on the transactions based on their order in +the mempool: instead we are going to perform a simpler check and then let the +block proposer rearrange them correctly when proposing the block. ##### In-protocol protection for InnerTx -An alternative implementation could place the protection for the inner tx in protocol, just like the wrapper one, based on the transaction counter inside `SignedTxData`. The check would run in `process_proposal` and the update in `finalize_block`, just like for the wrapper transaction. This implementation, though, shows two drawbacks: - -- it implies the need for an hard fork in case of a modification of the replay protection mechanism -- it's not clear who's the source of the inner transaction from the outside, as that depends on the specific code of the transaction itself. We could use specific whitelisted txs set to define when it requires a counter (would not work for future programmable transactions), but still, we have no way to define which address should be targeted for replay protection (**blocking issue**) +An alternative implementation could place the protection for the inner tx in +protocol, just like the wrapper one, based on the transaction counter inside +`SignedTxData`. The check would run in `process_proposal` and the update in +`finalize_block`, just like for the wrapper transaction. This implementation, +though, shows two drawbacks: + +- it implies the need for an hard fork in case of a modification of the replay + protection mechanism +- it's not clear who's the source of the inner transaction from the outside, as + that depends on the specific code of the transaction itself. We could use + specific whitelisted txs set to define when it requires a counter (would not + work for future programmable transactions), but still, we have no way to + define which address should be targeted for replay protection (**blocking + issue**) ##### In-protocol counter increase for InnerTx -In the [storage counter](#storage-counters) section we mentioned the issue of increasing the transaction counter for an inner tx even in case of failure. A possible solution that we took in consideration and discarded was to increase the counter from protocol in case of a failure. +In the [storage counter](#storage-counters) section we mentioned the issue of +increasing the transaction counter for an inner tx even in case of failure. A +possible solution that we took in consideration and discarded was to increase +the counter from protocol in case of a failure. -This is technically feasible since the protocol is aware of the keys modified by the transaction and also of the results of the validity predicates (useful in case the transaction updated more than one counter in storage). It is then possible to recover the value and reapply the change directly from protocol. This logic though, is quite dispersive, since it effectively splits the management of the counter for the `InnerTx` among Wasm and protocol, while our initial intent was to keep it completely in Wasm. +This is technically feasible since the protocol is aware of the keys modified by +the transaction and also of the results of the validity predicates (useful in +case the transaction updated more than one counter in storage). It is then +possible to recover the value and reapply the change directly from protocol. +This logic though, is quite dispersive, since it effectively splits the +management of the counter for the `InnerTx` among Wasm and protocol, while our +initial intent was to keep it completely in Wasm. ##### Single counter in storage -We can't use a single transaction counter in storage because this would prevent batching. +We can't use a single transaction counter in storage because this would prevent +batching. -As an example, if a client (with a current counter in storage holding value 5) generates two transactions to be included in the same block, signing both the outer and the inner (default behavior of the client), it would need to generate the following transaction counters: +As an example, if a client (with a current counter in storage holding value 5) +generates two transactions to be included in the same block, signing both the +outer and the inner (default behavior of the client), it would need to generate +the following transaction counters: ``` [ @@ -345,9 +756,15 @@ As an example, if a client (with a current counter in storage holding value 5) g ] ``` -Now, the current execution model of Namada includes the `WrapperTx` in a block first to then decrypt and execute the inner tx in the following block (respecting the committed order of the transactions). That would mean that the outer tx of `T1` would pass validation and immediately increase the counter to 6 to prevent a replay attack in the same block. Now, the outer tx of `T2` will be processed but it won't pass validation because it carries a counter with value 7 while the ledger expects 6. +Now, the current execution model of Namada includes the `WrapperTx` in a block +first to then decrypt and execute the inner tx in the following block +(respecting the committed order of the transactions). That would mean that the +outer tx of `T1` would pass validation and immediately increase the counter to 6 +to prevent a replay attack in the same block. Now, the outer tx of `T2` will be +processed but it won't pass validation because it carries a counter with value 7 +while the ledger expects 6. -To fix this, one could think to set the counters as follows: +To fix this, one could think to set the counters as follows: ``` [ @@ -356,11 +773,23 @@ To fix this, one could think to set the counters as follows: ] ``` -This way both the transactions will be considered valid and executed. The issue is that, if the second transaction is not included in the block (for any reason), than the first transaction (the only one remaining at this point) will fail. In fact, after the outer tx has correctly increased the counter in storage to value 6 the block will be accepted. In the next block the inner transaction will be decrypted and executed but this last step will fail since the counter in `SignedTxData` carries a value of 7 and the counter in storage has a value of 6. +This way both the transactions will be considered valid and executed. The issue +is that, if the second transaction is not included in the block (for any +reason), than the first transaction (the only one remaining at this point) will +fail. In fact, after the outer tx has correctly increased the counter in storage +to value 6 the block will be accepted. In the next block the inner transaction +will be decrypted and executed but this last step will fail since the counter in +`SignedTxData` carries a value of 7 and the counter in storage has a value of 6. -To cope with this there are two possible ways. The first one is that, instead of checking the exact value of the counter in storage and increasing its value by one, we could check that the transaction carries a counter `>=` than the one in storage and write this one (not increase) to storage. The problem with this is that it the lack of support for strict ordering of execution. +To cope with this there are two possible ways. The first one is that, instead of +checking the exact value of the counter in storage and increasing its value by +one, we could check that the transaction carries a counter `>=` than the one in +storage and write this one (not increase) to storage. The problem with this is +that it the lack of support for strict ordering of execution. -The second option is to keep the usual increase strategy of the counter (increase by one and check for strict equality) and simply use two different counters in storage for each address. The transaction will then look like this: +The second option is to keep the usual increase strategy of the counter +(increase by one and check for strict equality) and simply use two different +counters in storage for each address. The transaction will then look like this: ``` [ @@ -369,135 +798,282 @@ The second option is to keep the usual increase strategy of the counter (increas ] ``` -Since the order of inclusion of the `WrapperTxs` forces the same order of the execution for the inner ones, both transactions can be correctly executed and the correctness will be maintained even in case `T2` didn't make it to the block (note that the counter for an inner tx and the corresponding wrapper one don't need to coincide). - -##### Block rejection - -The implementation proposed in this document has one flaw when it comes to discontinuous transactions. If, for example, for a given address, the counter in storage for the `WrapperTx` is 5 and the block proposer receives, in order, transactions 6, 5 and 8, the proposer will have an incentive to correctly order transactions 5 and 6 to gain the fees that he would otherwise lose. Transaction 8 will never be accepted by the validators no matter the ordering (since they will expect tx 7 which got lost): this effectively means that the block proposer has no incentive to include this transaction in the block because it would gain him no fees but, at the same time, he doesn't really have a disincentive to not include it, since in this case the validators will simply discard the invalid tx but accept the rest of the block granting the proposer his fees on all the other transactions. - -A similar scenario happens in the case of a single transaction that is not the expected one (e.g. tx 5 when 4 is expected), or for a different type of inconsistencies, like a wrong `ChainId` or an invalid signature. - -It is up to the block proposer then, whether to include or not these kinds of transactions: a malicious proposer could do so to spam the block without suffering any penalty. The lack of fees could be a strong enough measure to prevent proposers from applying this behavior, together with the fact that the only damage caused to the chain would be spamming the blocks. - -If one wanted to completely prevent this scenario, the solution would be to reject the entire block: this way the proposer would have an incentive to behave correctly (by not including these transactions into the block) to gain the block fees. This would allow to shrink the size of the blocks in case of unfair block proposers but it would also cause the slow down of the block creation process, since after a block rejection a new Tendermint round has to be initiated. +Since the order of inclusion of the `WrapperTxs` forces the same order of the +execution for the inner ones, both transactions can be correctly executed and +the correctness will be maintained even in case `T2` didn't make it to the block +(note that the counter for an inner tx and the corresponding wrapper one don't +need to coincide). ### Wrapper-bound InnerTx -The solution is to tie an `InnerTx` to the corresponding `WrapperTx`. By doing so, it becomes impossible to rewrap an inner transaction and, therefore, all the attacks related to this practice would be unfeasible. This mechanism requires even less space in storage (only a 64 bit counter for every address signing wrapper transactions) and only one check on the wrapper counter in protocol. As a con, it requires communication between the signer of the inner transaction and that of the wrapper during the transaction construction. This solution also imposes a strict ordering on the wrapper transactions issued by a same address. +The solution is to tie an `InnerTx` to the corresponding `WrapperTx`. By doing +so, it becomes impossible to rewrap an inner transaction and, therefore, all the +attacks related to this practice would be unfeasible. This mechanism requires +even less space in storage (only a 64 bit counter for every address signing +wrapper transactions) and only one check on the wrapper counter in protocol. As +a con, it requires communication between the signer of the inner transaction and +that of the wrapper during the transaction construction. This solution also +imposes a strict ordering on the wrapper transactions issued by a same address. -To do so we will have to change the current definition of the two tx structs to the following: +To do so we will have to change the current definition of the two tx structs to +the following: ```rust pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// Lifetime of the transaction, also determines which decryption key will be used - pub expiration: DateTimeUtc, - /// Chain identifier for replay protection - pub chain_id: ChainId, - /// Transaction counter for replay protection - pub tx_counter: u64, - /// the encrypted payload - pub inner_tx: EncryptedTx, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// Lifetime of the transaction, also determines which decryption key will be used + pub expiration: DateTimeUtc, + /// Chain identifier for replay protection + pub chain_id: ChainId, + /// Transaction counter for replay protection + pub tx_counter: u64, + /// the encrypted payload + pub inner_tx: EncryptedTx, } pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub wrapper_commit: Option, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub wrapper_commit: Option, } -``` +``` -The Wrapper transaction no longer holds the inner transaction hash while the inner one now holds a commit to the corresponding wrapper tx in the form of the hash of a `WrapperCommit` struct, defined as: +The Wrapper transaction no longer holds the inner transaction hash while the +inner one now holds a commit to the corresponding wrapper tx in the form of the +hash of a `WrapperCommit` struct, defined as: ```rust pub struct WrapperCommit { - pub pk: common::PublicKey, - pub tx_counter: u64, - pub expiration: DateTimeUtc, - pub chain_id: ChainId, + pub pk: common::PublicKey, + pub tx_counter: u64, + pub expiration: DateTimeUtc, + pub chain_id: ChainId, } ``` -The `pk-tx_counter` couple contained in this struct, uniquely identifies a single `WrapperTx` (since a valid tx_counter is unique given the address) so that the inner one is now bound to this specific wrapper. The remaining fields, `expiration` and `chain_id`, will tie these two values given their importance in terms of safety (see the [relative](#wrappertx-checks) section). Note that the `wrapper_commit` field must be optional because the `WrapperTx` struct itself gets converted to a `Tx` struct before submission but it doesn't need any commitment. - -Both the inner and wrapper tx get signed on their hash, as usual, to prevent tampering with data. When a wrapper gets processed by the ledger, we first check the validity of the signature, checking that none of the fields were modified: this means that the inner tx embedded within the wrapper is, in fact, the intended one. This last statement means that no external attacker has tampered data, but the tampering could still have been performed by the signer of the wrapper before signing the wrapper transaction. - -If this check (and others, explained later in the [checks](#wrappertx-checks) section) passes, then the inner tx gets decrypted in the following block proposal process. At this time we check that the order in which the inner txs are inserted in the block matches that of the corresponding wrapper txs in the previous block. To do so, we rely on an in-storage queue holding the hash of the `WrapperCommit` struct computed from the wrapper tx. From the inner tx we extract the `WrapperCommit` hash and check that it matches that in the queue: if they don't it means that the inner tx has been reordered or rewrapped and we reject the block. Note that, since we have already checked the wrapper at this point, the only way to rewrap the inner tx would be to also modify its commitment (need to change at least the `tx_counter` field), otherwise the checks on the wrapper would have spotted the inconsistency and rejected the tx. - -If this check passes then we can send the inner transaction to the wasm environment for execution: if the transaction is signed, then at least one VP will check its signature to spot possible tampering of the data (especially by the wrapper signer, since this specific case cannot be checked before this step) and, if this is the case, will reject this transaction and no storage modifications will be applied. +The `pk-tx_counter` couple contained in this struct, uniquely identifies a +single `WrapperTx` (since a valid tx_counter is unique given the address) so +that the inner one is now bound to this specific wrapper. The remaining fields, +`expiration` and `chain_id`, will tie these two values given their importance in +terms of safety (see the [relative](#wrappertx-checks) section). Note that the +`wrapper_commit` field must be optional because the `WrapperTx` struct itself +gets converted to a `Tx` struct before submission but it doesn't need any +commitment. + +Both the inner and wrapper tx get signed on their hash, as usual, to prevent +tampering with data. When a wrapper gets processed by the ledger, we first check +the validity of the signature, checking that none of the fields were modified: +this means that the inner tx embedded within the wrapper is, in fact, the +intended one. This last statement means that no external attacker has tampered +data, but the tampering could still have been performed by the signer of the +wrapper before signing the wrapper transaction. + +If this check (and others, explained later in the [checks](#wrappertx-checks) +section) passes, then the inner tx gets decrypted in the following block +proposal process. At this time we check that the order in which the inner txs +are inserted in the block matches that of the corresponding wrapper txs in the +previous block. To do so, we rely on an in-storage queue holding the hash of the +`WrapperCommit` struct computed from the wrapper tx. From the inner tx we +extract the `WrapperCommit` hash and check that it matches that in the queue: if +they don't it means that the inner tx has been reordered and we reject the +block. + +If this check passes then we can send the inner transaction to the wasm +environment for execution: if the transaction is signed, then at least one VP +will check its signature to spot possible tampering of the data (especially by +the wrapper signer, since this specific case cannot be checked before this step) +and, if this is the case, will reject this transaction and no storage +modifications will be applied. In summary: - The `InnerTx` carries a unique identifier of the `WrapperTx` embedding it - Both the inner and wrapper txs are signed on all of their data -- The signature check on the wrapper tx ensures that the inner transaction is the intended one and that this wrapper has not been used to wrap a different inner tx. It also verifies that no tampering happened with the inner transaction by a third party. Finally, it ensures that the public key is the one of the signer -- The check on the `WrapperCommit` ensures that the inner tx has not been reordered nor rewrapped (this last one is a non-exhaustive check, inner tx data could have been tampered with by the wrapper signer) -- The signature check of the inner tx performed in Vp grants that no data of the inner tx has been tampered with, effectively verifying the correctness of the previous check (`WrapperCommit`) - -This sequence of controls makes it no longer possible to rewrap an `InnerTx` which is now bound to its wrapper. This implies that replay protection is only needed on the `WrapperTx` since there's no way to extract the inner one, rewrap it and replay it. +- The signature check on the wrapper tx ensures that the inner transaction is + the intended one and that this wrapper has not been used to wrap a different + inner tx. It also verifies that no tampering happened with the inner + transaction by a third party. Finally, it ensures that the public key is the + one of the signer +- The check on the `WrapperCommit` ensures that the inner tx has not been + reordered nor rewrapped (this last one is a non-exhaustive check, inner tx + data could have been tampered with by the wrapper signer) +- The signature check of the inner tx performed in Vp grants that no data of the + inner tx has been tampered with, effectively verifying the correctness of the + previous check (`WrapperCommit`) + +This sequence of controls makes it no longer possible to rewrap an `InnerTx` +which is now bound to its wrapper. This implies that replay protection is only +needed on the `WrapperTx` since there's no way to extract the inner one, rewrap +it and replay it. #### WrapperTx checks -In `mempool_validation` and `process_proposal` we will perform some checks on the wrapper tx to validate it. These will involve: +In `mempool_validation` we will perform some checks on the wrapper tx to +validate it. These will involve: - Valid signature -- Enough funds to pay for the fee +- `GasLimit` is below the block gas limit (see the + [fee specs](../economics/fee-system.md) for more details) +- `Fees` are paid with an accepted token and match the minimum amount required + (see the [fee specs](../economics/fee-system.md) for more details) - Valid chainId - Valid transaction counter - Valid expiration -These checks can all be done before executing the transactions themselves. The check on the gas cannot be done ahead of time and we'll deal with it later. If any of these fails, the transaction should be considered invalid and the action to take will be one of the followings: - -1. If the checks fail on the signature, chainId, expiration or transaction counter, then this transaction will be forever invalid, regardless of the possible evolution of the ledger's state. There's no need to include the transaction in the block nor to increase the transaction counter. Moreover, we **cannot** include this transaction in the block to charge a fee (as a sort of punishment) because these errors may not depend on the signer of the tx (could be due to malicious users or simply a delay in the tx inclusion in the block) -2. If the checks fail _only_ because of an insufficient balance, the wrapper should be kept in mempool for a future play in case the funds should become available -3. If all the checks pass validation we will include the transaction in the block to increase the counter and charge the fee - -Note that, regarding point one, there's a distinction to be made about an invalid `tx_counter` which could be invalid because of being old or being in advance. To solve this last issue (counter greater than the expected one), we have to introduce the concept of a lifetime (or timeout) for the transactions: basically, the `WrapperTx` will hold an extra field called `expiration` stating the maximum time up until which the submitter is willing to see the transaction executed. After the specified time the transaction will be considered invalid and discarded regardless of all the other checks. This way, in case of a transaction with a counter greater than expected, it is sufficient to wait till after the expiration to submit more transactions, so that the counter in storage is not modified (kept invalid for the transaction under observation) and replaying that tx would result in a rejection. - -This actually generalizes to a more broad concept. In general, a transaction is valid at the moment of submission, but after that, a series of external factors (ledger state, etc.) might change the mind of the submitter who's now not interested in the execution of the transaction anymore. By introducing this new field we are introducing a new constraint in the transaction's contract, where the ledger will make sure to prevent the execution of the transaction after the deadline and, on the other side, the submitter commits himself to the result of the execution at least until its expiration. If the expiration is reached and the transaction has not been executed the submitter can decide to submit a new, identical transaction if he's still interested in the changes carried by it. - -In our design, the `expiration` will hold until the transaction is executed, once it's executed, either in case of success or failure, the `tx_counter` will be increased and the transaction will not be replayable. In essence, the transaction submitter commits himself to one of these three conditions: +These checks can all be done before executing the transactions themselves. If +any of these fails, the transaction should be considered invalid and the action +to take will be one of the followings: + +1. If the checks fail on the signature, chainId, expiration or transaction + counter, then this transaction will be forever invalid, regardless of the + possible evolution of the ledger's state. There's no need to include the + transaction in the block nor to increase the transaction counter. Moreover, + we **cannot** include this transaction in the block to charge a fee (as a + sort of punishment) because these errors may not depend on the signer of the + tx (could be due to malicious users or simply a delay in the tx inclusion in + the block) +2. If the checks fail on `Fee` or `GasLimit` the transaction should be + discarded. In theory the gas limit of a block is a Namada parameter + controlled by governance, so there's a chance that the transaction could + become valid in the future should this limit be raised. The same applies to + the token whitelist and the minimum fee required. However we can expect a + slow rate of change of these parameters so we can reject the tx (the + submitter can always resubmit it at a future time) +3. If all the checks pass validation we will include the transaction in the + block to increase the counter and charge the fee + +Note that, regarding point one, there's a distinction to be made about an +invalid `tx_counter` which could be invalid because of being old or being in +advance. To solve this last issue (counter greater than the expected one), we +have to introduce the concept of a lifetime (or timeout) for the transactions: +basically, the `WrapperTx` will hold an extra field called `expiration` stating +the maximum time up until which the submitter is willing to see the transaction +executed. After the specified time the transaction will be considered invalid +and discarded regardless of all the other checks. This way, in case of a +transaction with a counter greater than expected, it is sufficient to wait till +after the expiration to submit more transactions, so that the counter in storage +is not modified (kept invalid for the transaction under observation) and +replaying that tx would result in a rejection. + +This actually generalizes to a more broad concept. In general, a transaction is +valid at the moment of submission, but after that, a series of external factors +(ledger state, etc.) might change the mind of the submitter who's now not +interested in the execution of the transaction anymore. By introducing this new +field we are introducing a new constraint in the transaction's contract, where +the ledger will make sure to prevent the execution of the transaction after the +deadline and, on the other side, the submitter commits himself to the result of +the execution at least until its expiration. If the expiration is reached and +the transaction has not been executed the submitter can decide to submit a new, +identical transaction if he's still interested in the changes carried by it. + +In our design, the `expiration` will hold until the transaction is executed, +once it's executed, either in case of success or failure, the `tx_counter` will +be increased and the transaction will not be replayable. In essence, the +transaction submitter commits himself to one of these three conditions: - Transaction is invalid regardless of the specific state -- Transaction is executed (either with success or not) and the transaction counter is increased +- Transaction is executed (either with success or not) and the transaction + counter is increased - Expiration time has passed The first condition satisfied will invalidate further executions of the same tx. -The `expiration` parameter also justifies step 2 of the previous bullet points which states that if the validity checks fail only because of an insufficient balance to pay for fees than the transaction should be kept in mempool for a future execution. Without it, the transaction could be potentially executed at any future moment (provided that the counter is still valid), possibily going against the mutated interests of the submitter. With the expiration parameter, now, the submitter commits himself to accepting the execution of the transaction up to the specified time: it's going to be his responsibility to provide a sensible value for this parameter. Given this constraint the transaction will be kept in memepool up until the expiration (since it would become invalid after that in any case), to prevent the mempool from increasing too much in size. - -This mechanism can also be applied to another scenario. Suppose a transaction was not propagated to the network by a node (or a group of colluding nodes). Now, this tx might be valid, but it doesn't get inserted into a block. Without an expiration, if the submitter doesn't submit any other transaction (which gets included in a block to increase the transaction counter), this tx can be replayed (better, applied, since it was never executed in the first place) at a future moment in time when the submitter might not be willing to execute it any more. - -Since the signer of the wrapper may be different from the one of the inner we also need to include this `expiration` field in the `WrapperCommit` struct, to prevent the signer of the wrapper from setting a lifetime which is in conflict with the interests of the inner signer. Note that adding a separate lifetime for the wrapper alone (which would require two separate checks) doesn't carry any benefit: a wrapper with a lifetime greater than the inner would have no sense since the inner would fail. Restricting the lifetime would work but it also means that the wrapper could prevent a valid inner transaction from being executed. We will then keep a single `expiration` field specifying the wrapper tx max time (the inner one will actually be executed one block later because of the execution mechanism of Namada). - -To prevent the signer of the wrapper from submitting the transaction to a different chain, the `ChainId` field should also be included in the commit. - -Finally, in case the transaction run out of gas (based on the provided `gas_limit` field of the wrapper) we don't need to take any action: by this time the transaction counter will have already been incremented and the tx is not replayable anymore. In theory, we don't even need to increment the counter since the only way this transaction could become valid is a change in the way gas is accounted, which might require a fork anyway, and consequently a change in the required `ChainId`. However, since we can't tell the gas consumption before the inner tx has been executed, we cannot anticipate this check. +Since the signer of the wrapper may be different from the one of the inner we +also need to include this `expiration` field in the `WrapperCommit` struct, to +prevent the signer of the wrapper from setting a lifetime which is in conflict +with the interests of the inner signer. Note that adding a separate lifetime for +the wrapper alone (which would require two separate checks) doesn't carry any +benefit: a wrapper with a lifetime greater than the inner would have no sense +since the inner would fail. Restricting the lifetime would work but it also +means that the wrapper could prevent a valid inner transaction from being +executed. We will then keep a single `expiration` field specifying the wrapper +tx max time (the inner one will actually be executed one block later because of +the execution mechanism of Namada). + +To prevent the signer of the wrapper from submitting the transaction to a +different chain, the `ChainId` field should also be included in the commit. + +Finally, in case the transaction run out of gas (based on the provided +`GasLimit` field of the wrapper) we don't need to take any action: by this time +the transaction counter will have already been incremented and the tx is not +replayable anymore. In theory, we don't even need to increment the counter since +the only way this transaction could become valid is a change in the way gas is +accounted, which might require a fork anyway, and consequently a change in the +required `ChainId`. However, since we can't tell the gas consumption before the +inner tx has been executed, we cannot anticipate this check. + +All these checks are also run in `process_proposal` with an addition: validators +also check that the wrapper signer has enough funds to pay the fee. This check +should not be done in mempool because the funds available for a certain address +are variable in time and should only be checked at block inclusion time. If any +of the checks fail here, the entire block is rejected forcing a new Tendermint +round to begin (see a better explanation of this choice in the +[relative](#block-rejection) section). + +The `expiration` parameter also justifies that the check on funds is only done +in `process_proposal` and not in mempool. Without it, the transaction could be +potentially executed at any future moment, possibly going against the mutated +interests of the submitter. With the expiration parameter, now, the submitter +commits himself to accept the execution of the transaction up to the specified +time: it's going to be his responsibility to provide a sensible value for this +parameter. Given this constraint the transaction will be kept in mempool up +until the expiration (since it would become invalid after that in any case), to +prevent the mempool from increasing too much in size. + +This mechanism can also be applied to another scenario. Suppose a transaction +was not propagated to the network by a node (or a group of colluding nodes). +Now, this tx might be valid, but it doesn't get inserted into a block. Without +an expiration, if the submitter doesn't submit any other transaction (which gets +included in a block to increase the transaction counter), this tx can be +replayed (better, applied, since it was never executed in the first place) at a +future moment in time when the submitter might not be willing to execute it any +more. #### WrapperCommit -The fields of `WrapperTx` not included in `WrapperCommit` are at the discretion of the `WrapperTx` producer. These fields are not included in the commit because of one of these two reasons: +The fields of `WrapperTx` not included in `WrapperCommit` are at the discretion +of the `WrapperTx` producer. These fields are not included in the commit because +of one of these two reasons: -- They depend on the specific state of the wrapper signer and cannot be forced (like `fee`, since the wrapper signer must have enough funds to pay for those) -- They are not a threat (in terms of replay attacks) to the signer of the inner transaction in case of failure of the transaction +- They depend on the specific state of the wrapper signer and cannot be forced + (like `fee`, since the wrapper signer must have enough funds to pay for those) +- They are not a threat (in terms of replay attacks) to the signer of the inner + transaction in case of failure of the transaction -In a certain way, the `WrapperCommit` not only binds an `InnerTx` no a wrapper, but effectively allows the inner to control the wrapper by requesting some specific parameters for its creation and bind these parameters among the two transactions: this allows us to apply the same constraints to both txs while performing the checks on the wrapper only. +In a certain way, the `WrapperCommit` not only binds an `InnerTx` no a wrapper, +but effectively allows the inner to control the wrapper by requesting some +specific parameters for its creation and bind these parameters among the two +transactions: this allows us to apply the same constraints to both txs while +performing the checks on the wrapper only. #### Transaction creation process -To craft a transaction, the process will now be the following (optional steps are only required if the signer of the inner differs from that of the wrapper): - -- (**Optional**) the `InnerTx` constructor request, to the wrapper signer, his public key and the `tx_counter` to be used -- The `InnerTx` is constructed in its entirety with also the `wrapper_commit` field to define the constraints of the future wrapper -- The produced `Tx` struct get signed over all of its data (with `SignedTxData`) producing a new struct `Tx` -- (**Optional**) The inner tx produced is sent to the `WrapperTx` producer together with the `WrapperCommit` struct (required since the inner tx only holds the hash of it) -- The signer of the wrapper constructs a `WrapperTx` compliant with the `WrapperCommit` fields +To craft a transaction, the process will now be the following (optional steps +are only required if the signer of the inner differs from that of the wrapper): + +- (**Optional**) the `InnerTx` constructor request, to the wrapper signer, his + public key and the `tx_counter` to be used +- The `InnerTx` is constructed in its entirety with also the `wrapper_commit` + field to define the constraints of the future wrapper +- The produced `Tx` struct get signed over all of its data (with `SignedTxData`) + producing a new struct `Tx` +- (**Optional**) The inner tx produced is sent to the `WrapperTx` producer + together with the `WrapperCommit` struct (required since the inner tx only + holds the hash of it) +- The signer of the wrapper constructs a `WrapperTx` compliant with the + `WrapperCommit` fields - The produced `WrapperTx` gets signed over all of its fields -Compared to a solution not binding the inner tx to the wrapper one, this solution requires the exchange of 3 messages (request `tx_counter`, receive `tx_counter`, send `InnerTx`) between the two signers (in case they differ), instead of one. However, it allows the signer of the inner to send the `InnerTx` to the wrapper signer already encrypted, guaranteeing a higher level of safety: only the `WrapperCommit` struct should be sent clear, but this doesn't reveal any sensitive information about the inner transaction itself. +Compared to a solution not binding the inner tx to the wrapper one, this +solution requires the exchange of 3 messages (request `tx_counter`, receive +`tx_counter`, send `InnerTx`) between the two signers (in case they differ), +instead of one. However, it allows the signer of the inner to send the `InnerTx` +to the wrapper signer already encrypted, guaranteeing a higher level of safety: +only the `WrapperCommit` struct should be sent clear, but this doesn't reveal +any sensitive information about the inner transaction itself. From 77e162787bba918f86c9874144d8e62118281143 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 29 Dec 2022 18:18:03 +0100 Subject: [PATCH 04/58] Adds replay protection internal address and vp --- core/src/ledger/mod.rs | 1 + core/src/ledger/replay_protection.rs | 13 +++ core/src/types/address.rs | 17 +++- shared/src/ledger/native_vp/mod.rs | 1 + .../src/ledger/native_vp/replay_protection.rs | 81 +++++++++++++++++++ shared/src/ledger/protocol/mod.rs | 15 ++++ 6 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 core/src/ledger/replay_protection.rs create mode 100644 shared/src/ledger/native_vp/replay_protection.rs diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index 31879e9b99..f472c5e321 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -5,6 +5,7 @@ pub mod governance; #[cfg(any(feature = "abciplus", feature = "abcipp"))] pub mod ibc; pub mod parameters; +pub mod replay_protection; pub mod slash_fund; pub mod storage; pub mod storage_api; diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs new file mode 100644 index 0000000000..13941d3570 --- /dev/null +++ b/core/src/ledger/replay_protection.rs @@ -0,0 +1,13 @@ +//! Replay protection storage + +use crate::types::address::{Address, InternalAddress}; +use crate::types::storage::{DbKeySeg, Key}; + +/// Internal replay protection address +pub const ADDRESS: Address = + Address::Internal(InternalAddress::ReplayProtection); + +/// Check if a key is a replay protection key +pub fn is_tx_hash_key(key: &Key) -> bool { + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) +} diff --git a/core/src/types/address.rs b/core/src/types/address.rs index 5104615cb7..a17298130a 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -69,6 +69,8 @@ mod internal { "ibc::IBC Mint Address "; pub const ETH_BRIDGE: &str = "ano::ETH Bridge Address "; + pub const REPLAY_PROTECTION: &str = + "ano::Replay Protection "; } /// Fixed-length address strings prefix for established addresses. @@ -198,6 +200,9 @@ impl Address { InternalAddress::EthBridge => { internal::ETH_BRIDGE.to_string() } + InternalAddress::ReplayProtection => { + internal::REPLAY_PROTECTION.to_string() + } }; debug_assert_eq!(string.len(), FIXED_LEN_STRING_BYTES); string @@ -251,6 +256,9 @@ impl Address { internal::ETH_BRIDGE => { Ok(Address::Internal(InternalAddress::EthBridge)) } + internal::REPLAY_PROTECTION => { + Ok(Address::Internal(InternalAddress::ReplayProtection)) + } _ => Err(Error::new( ErrorKind::InvalidData, "Invalid internal address", @@ -466,6 +474,8 @@ pub enum InternalAddress { SlashFund, /// Bridge to Ethereum EthBridge, + /// Replay protection contains transactions' hash + ReplayProtection, } impl InternalAddress { @@ -500,6 +510,7 @@ impl Display for InternalAddress { Self::IbcBurn => "IbcBurn".to_string(), Self::IbcMint => "IbcMint".to_string(), Self::EthBridge => "EthBridge".to_string(), + Self::ReplayProtection => "ReplayProtection".to_string(), } ) } @@ -776,8 +787,9 @@ pub mod testing { InternalAddress::IbcEscrow => {} InternalAddress::IbcBurn => {} InternalAddress::IbcMint => {} - InternalAddress::EthBridge => {} /* Add new addresses in the - * `prop_oneof` below. */ + InternalAddress::EthBridge => {} + InternalAddress::ReplayProtection => {} /* Add new addresses in the + * `prop_oneof` below. */ }; prop_oneof![ Just(InternalAddress::PoS), @@ -792,6 +804,7 @@ pub mod testing { Just(InternalAddress::Governance), Just(InternalAddress::SlashFund), Just(InternalAddress::EthBridge), + Just(InternalAddress::ReplayProtection) ] } diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 231405dde5..fa3e319533 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -3,6 +3,7 @@ pub mod governance; pub mod parameters; +pub mod replay_protection; pub mod slash_fund; use std::cell::RefCell; diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs new file mode 100644 index 0000000000..b764870957 --- /dev/null +++ b/shared/src/ledger/native_vp/replay_protection.rs @@ -0,0 +1,81 @@ +//! Native VP for replay protection + +use std::collections::BTreeSet; + +use thiserror::Error; + +use namada_core::ledger::{replay_protection, storage}; +use namada_core::types::address::{Address, InternalAddress}; +use namada_core::types::storage::Key; + +use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::vm::WasmCacheAccess; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("Native VP error: {0}")] + NativeVpError(#[from] native_vp::Error), +} + +/// ReplayProtection functions result +pub type Result = std::result::Result; + +/// Replay Protection VP +pub struct ReplayProtectionVp<'a, DB, H, CA> +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, + CA: WasmCacheAccess, +{ + /// Context to interact with the host structures. + pub ctx: Ctx<'a, DB, H, CA>, +} + +impl<'a, DB, H, CA> NativeVp for ReplayProtectionVp<'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + storage::StorageHasher, + CA: 'static + WasmCacheAccess, +{ + const ADDR: InternalAddress = InternalAddress::ReplayProtection; + + type Error = Error; + + fn validate_tx( + &self, + _tx_data: &[u8], + keys_changed: &BTreeSet, + _verifiers: &BTreeSet
, + ) -> Result { + // VP should prevent any modification of the subspace. + // Changes are only allowed from protocol + let result = keys_changed.iter().all(|key| { + let key_type: KeyType = key.into(); + match key_type { + KeyType::TX_HASH => false, + KeyType::UNKNOWN => true, + } + }); + + Ok(result) + } +} + +enum KeyType { + #[allow(clippy::upper_case_acronyms)] + #[allow(non_camel_case_types)] + TX_HASH, + #[allow(clippy::upper_case_acronyms)] + UNKNOWN, +} + +impl From<&Key> for KeyType { + fn from(value: &Key) -> Self { + if replay_protection::is_tx_hash_key(value) { + KeyType::TX_HASH + } else { + KeyType::UNKNOWN + } + } +} diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index cfd416c14d..ab78a851c5 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -10,6 +10,7 @@ use crate::ledger::gas::{self, BlockGasMeter, VpGasMeter}; use crate::ledger::ibc::vp::{Ibc, IbcToken}; use crate::ledger::native_vp::governance::GovernanceVp; use crate::ledger::native_vp::parameters::{self, ParametersVp}; +use crate::ledger::native_vp::replay_protection::ReplayProtectionVp; use crate::ledger::native_vp::slash_fund::SlashFundVp; use crate::ledger::native_vp::{self, NativeVp}; use crate::ledger::pos::{self, PosVP}; @@ -56,6 +57,10 @@ pub enum Error { SlashFundNativeVpError(crate::ledger::native_vp::slash_fund::Error), #[error("Ethereum bridge native VP error: {0}")] EthBridgeNativeVpError(crate::ledger::eth_bridge::vp::Error), + #[error("Replay protection native VP error: {0}")] + ReplayProtectionNativeVpError( + crate::ledger::native_vp::replay_protection::Error, + ), #[error("Access to an internal address {0} is forbidden")] AccessForbidden(InternalAddress), } @@ -389,6 +394,16 @@ where gas_meter = bridge.ctx.gas_meter.into_inner(); result } + InternalAddress::ReplayProtection => { + let replay_protection_vp = + ReplayProtectionVp { ctx }; + let result = replay_protection_vp + .validate_tx(tx_data, &keys_changed, &verifiers) + .map_err(Error::ReplayProtectionNativeVpError); + gas_meter = + replay_protection_vp.ctx.gas_meter.into_inner(); + result + } }; accepted From e028f9ff5e0a4ce307fda778f7f55febda1af289 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 4 Jan 2023 17:15:24 +0100 Subject: [PATCH 05/58] Updates replay protections specs with governance and unsigned inner hash --- .../src/base-ledger/replay-protection.md | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index 71a5581e38..aa7b634a58 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -149,8 +149,13 @@ digests will be computed on the **unsigned** transactions, to support replay protection even for [multisigned](multisignature.md) transactions: in this case, if hashes were taken from the signed transactions, a different set of signatures on the same tx would produce a different hash, effectively allowing for a -replay. To support this, we'll need a subspace in storage headed by a -`ReplayProtection` internal address: +replay. To support this, we'll first need to update the `WrapperTx` hash field +to contain the hash of the unsigned inner tx, instead of the signed one: this +doesn't affect the overall safety of Namada (since the wrapper is still signed +over all of its bytes, including the inner signature) and allows for early +replay attack checks in mempool and at wrapper block-inclusion time. +Additionally, we need a subspace in storage headed by a `ReplayProtection` +internal address: ``` /\$ReplayProtectionAddress/\$tx0_hash: None @@ -231,6 +236,19 @@ that even if one of the attacks explained in this section is performed: - The invalid unshielding transaction must still be a valid transaction per the VPs triggered +#### Governance proposals + +Governance [proposals](../base-ledger/governance.md) may carry some wasm code to +be executed in case the proposal passed. This code is embedded into a +`DecryptedTx` directly by the validators at block processing time and is not +inserted into the block itself. + +Given that the wasm code is attached to the transaction initiating the proposal, +it could be extracted from here and inserted in a transaction before the +proposal is executed. Therefore, replay protection is not a solution to prevent +attacks on governance proposals' code. Instead, to protect these transactions, +Namada relies on its proposal id mechanism in conjunction with the VP set. + ### Forks In the case of a fork, the transaction hash is not enough to prevent replay From 14019dad348a695406e4c800de5b9fee3b065ce9 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 4 Jan 2023 17:57:38 +0100 Subject: [PATCH 06/58] Adds tx hash check in mempool validate --- apps/src/lib/node/ledger/shell/mod.rs | 115 ++++++++++++++++++++------ core/src/ledger/replay_protection.rs | 10 ++- 2 files changed, 97 insertions(+), 28 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 6b4b05b5ad..804076381e 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -21,6 +21,7 @@ use std::path::{Path, PathBuf}; use std::rc::Rc; use borsh::{BorshDeserialize, BorshSerialize}; +use namada::core::ledger::replay_protection; use namada::ledger::events::log::EventLog; use namada::ledger::events::Event; use namada::ledger::gas::BlockGasMeter; @@ -578,49 +579,109 @@ where /// Validate a transaction request. On success, the transaction will /// included in the mempool and propagated to peers, otherwise it will be /// rejected. + /// + /// Error codes: + /// 1 - Tx format + /// 2 - Wrapper Tx signature + /// 3 - Tx type + /// 4 - wrapper tx hash + /// 5 - inner tx hash pub fn mempool_validate( &self, tx_bytes: &[u8], r#_type: MempoolTxType, ) -> response::CheckTx { let mut response = response::CheckTx::default(); - match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { - Ok(tx) => { - // Check balance for fee - if let Ok(TxType::Wrapper(wrapper)) = process_tx(tx) { - let fee_payer = if wrapper.pk != masp_tx_key().ref_to() { - wrapper.fee_payer() - } else { - masp() - }; - // check that the fee payer has sufficient balance - let balance = - self.get_balance(&wrapper.fee.token, &fee_payer); - // In testnets with a faucet, tx is allowed to skip fees if - // it includes a valid PoW - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = self.has_valid_pow_solution(&wrapper); - #[cfg(feature = "mainnet")] - let has_valid_pow = false; + // Tx format check + let tx = match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { + Ok(t) => t, + Err(msg) => { + response.code = 1; + response.log = msg.to_string(); + return response; + } + }; - if !has_valid_pow && self.get_wrapper_tx_fees() > balance { - response.code = 1; - response.log = String::from( - "The address given does not have sufficient \ - balance to pay fee", - ); + // Tx signature check + let tx_type = match process_tx(tx) { + Ok(ty) => ty, + Err(msg) => { + response.code = 2; + response.log = msg.to_string(); + return response; + } + }; + + // Tx type check + if let TxType::Wrapper(wrapper) = tx_type { + // Replay protection check + let inner_hash_key = + replay_protection::get_tx_hash_key(&wrapper.tx_hash); + match self.storage.has_key(&inner_hash_key) { + Ok((found, _)) => { + if found { + response.code = 4; + response.log = "Wrapper transaction hash already in storage, replay attempt".to_string(); return response; } } + Err(msg) => { + response.code = 4; + response.log = msg.to_string(); + return response; + } + } - response.log = String::from("Mempool validation passed"); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&hash_tx(tx_bytes)); + match self.storage.has_key(&wrapper_hash_key) { + Ok((found, _)) => { + if found { + response.code = 5; + response.log = "Inner transaction hash already in storage, replay attempt".to_string(); + return response; + } + } + Err(msg) => { + response.code = 5; + response.log = msg.to_string(); + return response; + } } - Err(msg) => { + + // Check balance for fee + let fee_payer = if wrapper.pk != masp_tx_key().ref_to() { + wrapper.fee_payer() + } else { + masp() + }; + // check that the fee payer has sufficient balance + let balance = self.get_balance(&wrapper.fee.token, &fee_payer); + + // In testnets with a faucet, tx is allowed to skip fees if + // it includes a valid PoW + #[cfg(not(feature = "mainnet"))] + let has_valid_pow = self.has_valid_pow_solution(&wrapper); + #[cfg(feature = "mainnet")] + let has_valid_pow = false; + + if !has_valid_pow && self.get_wrapper_tx_fees() > balance { response.code = 1; - response.log = msg.to_string(); + response.log = String::from( + "The address given does not have sufficient \ + balance to pay fee", + ); + return response; } + } else { + response.code = 3; + response.log = "Unsupported tx type".to_string(); + return response; } + + response.log = "Mempool validation passed".to_string(); + response } diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs index 13941d3570..cee54ef06f 100644 --- a/core/src/ledger/replay_protection.rs +++ b/core/src/ledger/replay_protection.rs @@ -1,7 +1,8 @@ //! Replay protection storage use crate::types::address::{Address, InternalAddress}; -use crate::types::storage::{DbKeySeg, Key}; +use crate::types::hash::Hash; +use crate::types::storage::{DbKeySeg, Key, KeySeg}; /// Internal replay protection address pub const ADDRESS: Address = @@ -11,3 +12,10 @@ pub const ADDRESS: Address = pub fn is_tx_hash_key(key: &Key) -> bool { matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) } + +/// Get the transaction hash key +pub fn get_tx_hash_key(hash: &Hash) -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&hash.to_string()) + .expect("Cannot obtain a valid db key") +} From c8710d7f49f68bd084d1e613e60731dc5cb75a33 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 9 Jan 2023 15:19:19 +0100 Subject: [PATCH 07/58] Wrapper commit hash on unsigned inner tx --- core/src/proto/types.rs | 13 +++++++++++++ core/src/types/transaction/decrypted.rs | 7 ++++--- core/src/types/transaction/wrapper.rs | 22 +++++++++++----------- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 40e343d1bf..276cf4af04 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -9,6 +9,7 @@ use thiserror::Error; use super::generated::types; #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] use crate::tendermint_proto::abci::ResponseDeliverTx; +use crate::types::hash; use crate::types::key::*; use crate::types::time::DateTimeUtc; #[cfg(feature = "ferveo-tpke")] @@ -362,6 +363,18 @@ impl Tx { SigningTx::from(self.clone()).hash() } + /// Returns the hash of the unsigned transaction (if signed), otherwise the hash of + /// entire tx. + pub fn unsigned_hash(&self) -> hash::Hash { + match SignedTxData::try_from_slice(&self.to_bytes()) { + Ok(signed) => { + // Exclude the signature from the digest computation + hash_tx(signed.data.unwrap_or_default().as_ref()) + } + Err(_) => hash_tx(&self.to_bytes()), + } + } + pub fn code_hash(&self) -> [u8; 32] { SigningTx::from(self.clone()).code_hash } diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 3ac49efc77..6d1565f8ff 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -11,7 +11,7 @@ pub mod decrypted_tx { use super::EllipticCurve; use crate::proto::Tx; - use crate::types::transaction::{hash_tx, Hash, TxType, WrapperTx}; + use crate::types::transaction::{Hash, TxType, WrapperTx}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] #[allow(clippy::large_enum_variant)] @@ -56,14 +56,15 @@ pub mod decrypted_tx { } /// Return the hash used as a commitment to the tx's contents in the - /// wrapper tx that includes this tx as an encrypted payload. + /// wrapper tx that includes this tx as an encrypted payload. The commitment + /// is computed on the unsigned tx if tx is signed pub fn hash_commitment(&self) -> Hash { match self { DecryptedTx::Decrypted { tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: _, - } => hash_tx(&tx.to_bytes()), + } => tx.unsigned_hash(), DecryptedTx::Undecryptable(wrapper) => wrapper.tx_hash.clone(), } } diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index 70ef2827bc..5c014ed8a9 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -17,9 +17,7 @@ pub mod wrapper_tx { use crate::types::storage::Epoch; use crate::types::token::Amount; use crate::types::transaction::encrypted::EncryptedTx; - use crate::types::transaction::{ - hash_tx, EncryptionKey, Hash, TxError, TxType, - }; + use crate::types::transaction::{EncryptionKey, Hash, TxError, TxType}; /// Minimum fee amount in micro NAMs pub const MIN_FEE: u64 = 100; @@ -206,7 +204,7 @@ pub mod wrapper_tx { epoch, gas_limit, inner_tx, - tx_hash: hash_tx(&tx.to_bytes()), + tx_hash: tx.unsigned_hash(), #[cfg(not(feature = "mainnet"))] pow_solution, } @@ -227,7 +225,7 @@ pub mod wrapper_tx { /// Decrypt the wrapped transaction. /// - /// Will fail if the inner transaction does match the + /// Will fail if the inner transaction doesn't match the /// hash commitment or we are unable to recover a /// valid Tx from the decoded byte stream. pub fn decrypt( @@ -236,14 +234,15 @@ pub mod wrapper_tx { ) -> Result { // decrypt the inner tx let decrypted = self.inner_tx.decrypt(privkey); + let decrypted_tx = Tx::try_from(decrypted.as_ref()) + .map_err(|_| WrapperTxErr::InvalidTx)?; + // check that the hash equals commitment - if hash_tx(&decrypted) != self.tx_hash { - Err(WrapperTxErr::DecryptedHash) - } else { - // convert back to Tx type - Tx::try_from(decrypted.as_ref()) - .map_err(|_| WrapperTxErr::InvalidTx) + if decrypted_tx.unsigned_hash() != self.tx_hash { + return Err(WrapperTxErr::DecryptedHash); } + + Ok(decrypted_tx) } /// Sign the wrapper transaction and convert to a normal Tx type @@ -348,6 +347,7 @@ pub mod wrapper_tx { use super::*; use crate::proto::SignedTxData; use crate::types::address::nam; + use crate::types::transaction::hash_tx; fn gen_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; From 78f1333be96f8ba3768e9cfd5193947d21b51275 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 10 Jan 2023 15:38:48 +0100 Subject: [PATCH 08/58] Unit test `mempool_validate` --- apps/src/lib/node/ledger/shell/mod.rs | 235 +++++++++++++++++++++++++- 1 file changed, 231 insertions(+), 4 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 804076381e..71c5aab759 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -584,8 +584,8 @@ where /// 1 - Tx format /// 2 - Wrapper Tx signature /// 3 - Tx type - /// 4 - wrapper tx hash - /// 5 - inner tx hash + /// 4 - Inner tx hash + /// 5 - Wrapper tx hash pub fn mempool_validate( &self, tx_bytes: &[u8], @@ -604,7 +604,7 @@ where }; // Tx signature check - let tx_type = match process_tx(tx) { + let tx_type = match process_tx(tx.clone()) { Ok(ty) => ty, Err(msg) => { response.code = 2; @@ -634,7 +634,7 @@ where } let wrapper_hash_key = - replay_protection::get_tx_hash_key(&hash_tx(tx_bytes)); + replay_protection::get_tx_hash_key(&tx.unsigned_hash()); match self.storage.has_key(&wrapper_hash_key) { Ok((found, _)) => { if found { @@ -1125,3 +1125,230 @@ mod test_utils { assert!(!shell.wl_storage.storage.tx_queue.is_empty()); } } + +/// Test the faliure cases of [`mempool_validate`] +#[cfg(test)] +mod test_mempool_validate { + use super::test_utils::TestShell; + use super::MempoolTxType; + use super::*; + use namada::proto::SignedTxData; + use namada::types::storage::Epoch; + use namada::types::transaction::Fee; + + /// Mempool validation must reject unsigned wrappers + #[test] + fn test_missing_signature() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + + let mut wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + ) + .sign(&keypair) + .expect("Wrapper signing failed"); + + let unsigned_wrapper = if let Some(Ok(SignedTxData { + data: Some(data), + sig: _, + })) = wrapper + .data + .take() + .map(|data| SignedTxData::try_from_slice(&data[..])) + { + Tx::new(vec![], Some(data)) + } else { + panic!("Test failed") + }; + + let mut result = shell.mempool_validate( + unsigned_wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, 2); + result = shell.mempool_validate( + unsigned_wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, 2); + } + + /// Mempool validation must reject wrappers with an invalid signature + #[test] + fn test_invalid_signature() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + + let mut wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + ) + .sign(&keypair) + .expect("Wrapper signing failed"); + + let invalid_wrapper = if let Some(Ok(SignedTxData { + data: Some(data), + sig, + })) = wrapper + .data + .take() + .map(|data| SignedTxData::try_from_slice(&data[..])) + { + let mut new_wrapper = if let TxType::Wrapper(wrapper) = + ::deserialize(&mut data.as_ref()) + .expect("Test failed") + { + wrapper + } else { + panic!("Test failed") + }; + + // we mount a malleability attack to try and remove the fee + new_wrapper.fee.amount = 0.into(); + let new_data = TxType::Wrapper(new_wrapper) + .try_to_vec() + .expect("Test failed"); + Tx::new( + vec![], + Some( + SignedTxData { + sig, + data: Some(new_data), + } + .try_to_vec() + .expect("Test failed"), + ), + ) + } else { + panic!("Test failed"); + }; + + let mut result = shell.mempool_validate( + invalid_wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, 2); + result = shell.mempool_validate( + invalid_wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, 2); + } + + /// Mempool validation must reject non-wrapper txs + #[test] + fn test_wrong_tx_type() { + let (shell, _) = TestShell::new(); + + // Test Raw TxType + let tx = Tx::new("wasm_code".as_bytes().to_owned(), None); + + let result = shell.mempool_validate( + tx.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, 3); + } + + /// Mempool validation must reject already applied wrapper and decrypted transactions + #[test] + fn test_replay_attack() { + let (mut shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + + let wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + ) + .sign(&keypair) + .expect("Wrapper signing failed"); + + let tx_type = match process_tx(wrapper.clone()).expect("Test failed") { + TxType::Wrapper(t) => t, + _ => panic!("Test failed"), + }; + + // Write wrapper hash to storage + let wrapper_hash = wrapper.unsigned_hash(); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + shell + .storage + .write(&wrapper_hash_key, &wrapper_hash) + .expect("Test failed"); + + // Try wrapper tx replay attack + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, 5); + + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, 5); + + // Write inner hash in storage + let inner_hash_key = + replay_protection::get_tx_hash_key(&tx_type.tx_hash); + shell + .storage + .write(&inner_hash_key, &tx_type.tx_hash) + .expect("Test failed"); + + // Try inner tx replay attack + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, 4); + + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, 4); + } +} From a5962d178af614f2f943e7b27495c0f77d68c0a6 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 10 Jan 2023 16:04:31 +0100 Subject: [PATCH 09/58] Refactors `unsigned_hash_tx` --- apps/src/lib/node/ledger/shell/mod.rs | 22 +++++++++++++++------- core/src/proto/types.rs | 13 ------------- core/src/types/transaction/decrypted.rs | 4 ++-- core/src/types/transaction/mod.rs | 13 +++++++++++++ core/src/types/transaction/wrapper.rs | 8 +++++--- 5 files changed, 35 insertions(+), 25 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 71c5aab759..d2ff84d529 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -40,7 +40,6 @@ use namada::types::address; use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; use namada::types::internal::WrapperTxInQueue; -use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::types::token::{self}; @@ -48,6 +47,7 @@ use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, MIN_FEE, }; +use namada::types::{key::*, transaction}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; @@ -604,7 +604,7 @@ where }; // Tx signature check - let tx_type = match process_tx(tx.clone()) { + let tx_type = match process_tx(tx) { Ok(ty) => ty, Err(msg) => { response.code = 2; @@ -633,8 +633,9 @@ where } } - let wrapper_hash_key = - replay_protection::get_tx_hash_key(&tx.unsigned_hash()); + let wrapper_hash_key = replay_protection::get_tx_hash_key( + &transaction::unsigned_hash_tx(tx_bytes), + ); match self.storage.has_key(&wrapper_hash_key) { Ok((found, _)) => { if found { @@ -1126,7 +1127,7 @@ mod test_utils { } } -/// Test the faliure cases of [`mempool_validate`] +/// Test the failure cases of [`mempool_validate`] #[cfg(test)] mod test_mempool_validate { use super::test_utils::TestShell; @@ -1134,7 +1135,7 @@ mod test_mempool_validate { use super::*; use namada::proto::SignedTxData; use namada::types::storage::Epoch; - use namada::types::transaction::Fee; + use namada::types::transaction::{Fee, WrapperTx}; /// Mempool validation must reject unsigned wrappers #[test] @@ -1158,6 +1159,8 @@ mod test_mempool_validate { 0.into(), tx, Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ) .sign(&keypair) .expect("Wrapper signing failed"); @@ -1209,6 +1212,8 @@ mod test_mempool_validate { 0.into(), tx, Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ) .sign(&keypair) .expect("Wrapper signing failed"); @@ -1299,6 +1304,8 @@ mod test_mempool_validate { 0.into(), tx, Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ) .sign(&keypair) .expect("Wrapper signing failed"); @@ -1309,7 +1316,8 @@ mod test_mempool_validate { }; // Write wrapper hash to storage - let wrapper_hash = wrapper.unsigned_hash(); + let wrapper_hash = + super::transaction::unsigned_hash_tx(&wrapper.to_bytes()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); shell diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 276cf4af04..40e343d1bf 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -9,7 +9,6 @@ use thiserror::Error; use super::generated::types; #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] use crate::tendermint_proto::abci::ResponseDeliverTx; -use crate::types::hash; use crate::types::key::*; use crate::types::time::DateTimeUtc; #[cfg(feature = "ferveo-tpke")] @@ -363,18 +362,6 @@ impl Tx { SigningTx::from(self.clone()).hash() } - /// Returns the hash of the unsigned transaction (if signed), otherwise the hash of - /// entire tx. - pub fn unsigned_hash(&self) -> hash::Hash { - match SignedTxData::try_from_slice(&self.to_bytes()) { - Ok(signed) => { - // Exclude the signature from the digest computation - hash_tx(signed.data.unwrap_or_default().as_ref()) - } - Err(_) => hash_tx(&self.to_bytes()), - } - } - pub fn code_hash(&self) -> [u8; 32] { SigningTx::from(self.clone()).code_hash } diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 6d1565f8ff..42367524ec 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -11,7 +11,7 @@ pub mod decrypted_tx { use super::EllipticCurve; use crate::proto::Tx; - use crate::types::transaction::{Hash, TxType, WrapperTx}; + use crate::types::transaction::{self, Hash, TxType, WrapperTx}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] #[allow(clippy::large_enum_variant)] @@ -64,7 +64,7 @@ pub mod decrypted_tx { tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: _, - } => tx.unsigned_hash(), + } => transaction::unsigned_hash_tx(tx.to_bytes().as_ref()), DecryptedTx::Undecryptable(wrapper) => wrapper.tx_hash.clone(), } } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 0e0a5e980e..3a6822777a 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -27,6 +27,7 @@ use sha2::{Digest, Sha256}; pub use wrapper::*; use crate::ledger::gas::VpsGas; +use crate::proto::SignedTxData; use crate::types::address::Address; use crate::types::hash::Hash; use crate::types::ibc::IbcEvent; @@ -39,6 +40,18 @@ pub fn hash_tx(tx_bytes: &[u8]) -> Hash { Hash(*digest.as_ref()) } +/// Get the hash of the unsigned transaction (if signed), otherwise the hash of +/// entire tx. +pub fn unsigned_hash_tx(tx_bytes: &[u8]) -> Hash { + match SignedTxData::try_from_slice(tx_bytes) { + Ok(signed) => { + // Exclude the signature from the digest computation + hash_tx(signed.data.unwrap_or_default().as_ref()) + } + Err(_) => hash_tx(tx_bytes), + } +} + /// Transaction application result // TODO derive BorshSchema after #[derive(Clone, Debug, Default, BorshSerialize, BorshDeserialize)] diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index 5c014ed8a9..d9b9b0d157 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -17,7 +17,9 @@ pub mod wrapper_tx { use crate::types::storage::Epoch; use crate::types::token::Amount; use crate::types::transaction::encrypted::EncryptedTx; - use crate::types::transaction::{EncryptionKey, Hash, TxError, TxType}; + use crate::types::transaction::{ + self, EncryptionKey, Hash, TxError, TxType, + }; /// Minimum fee amount in micro NAMs pub const MIN_FEE: u64 = 100; @@ -204,7 +206,7 @@ pub mod wrapper_tx { epoch, gas_limit, inner_tx, - tx_hash: tx.unsigned_hash(), + tx_hash: transaction::unsigned_hash_tx(&tx.to_bytes()), #[cfg(not(feature = "mainnet"))] pow_solution, } @@ -238,7 +240,7 @@ pub mod wrapper_tx { .map_err(|_| WrapperTxErr::InvalidTx)?; // check that the hash equals commitment - if decrypted_tx.unsigned_hash() != self.tx_hash { + if transaction::unsigned_hash_tx(&decrypted) != self.tx_hash { return Err(WrapperTxErr::DecryptedHash); } From 8cbccf79def334196e86ca3778ebc2cf0fa610d7 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 10 Jan 2023 19:12:54 +0100 Subject: [PATCH 10/58] Fixes replay protection specs --- .../src/base-ledger/replay-protection.md | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index aa7b634a58..613fe21e25 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -176,15 +176,20 @@ Both in `mempool_validation` and `process_proposal` we will perform a check (together with others, see the [relative](#wrapper-checks) section) on both the digests against the storage to check that neither of the transactions has already been executed: if this doesn't hold, the `WrapperTx` will not be -included into the mempool/block respectively. If both checks pass then the -transaction is included in the block and executed. In the `finalize_block` -function we will add the transaction's hash to storage to prevent re-executions. -We will first add the hash of the wrapper transaction. After that, in the -following block, we deserialize the inner transaction, check the correct order -of the transactions in the block and execute the tx: if it runs out of gas then -we'll avoid storing its hash to allow rewrapping and executing the transaction, -otherwise we'll add the hash in storage (both in case of success or failure of -the tx). +included into the mempool/block respectively. If both checks pass then both of +the hashes are added to the write ahead log in `process_proposal` to be then +committed to storage: using the WAL allows us to prevent a replay of a +transaction in the same block. The transaction is then included in the block and +executed. + +In the next block we deserialize the inner transaction, check the validity of +the decrypted txs and their correct order: if the order is off a new round of +tendermint will start. If instead an error is found in any single decrypted tx, +we remove from storage the previously inserted hash of the inner tx to allow it +to be rewrapped, and discard the tx itself. Finally, in `finalize_block` we +execute the tx: if it runs out of gas then we'll remove its hash from storage, +again to allow rewrapping and executing the transaction, otherwise we'll keep +the hash in storage (both in case of success or failure of the tx). #### Optional unshielding @@ -411,7 +416,7 @@ All these checks are also run in `process_proposal` with a few additions: [relative](#block-rejection) section) - The unshielding tx (if present) releases the minimum amount of tokens required to pay fees -- The unshielding tx (if present) runs succesffuly +- The unshielding tx (if present) runs succesfully The `expiration` parameter also justifies that the check on funds is only done in `process_proposal` and not in mempool. Without it, the transaction could be From 5decfc96cecfd2bf6ad4dde4c22566d5e197cbd1 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Jan 2023 16:13:55 +0100 Subject: [PATCH 11/58] Replay protection checks in `process_proposal` --- apps/src/lib/node/ledger/shell/mod.rs | 12 +- .../lib/node/ledger/shell/process_proposal.rs | 116 ++++++++++++++++-- 2 files changed, 115 insertions(+), 13 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index d2ff84d529..62af959f26 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -618,7 +618,7 @@ where // Replay protection check let inner_hash_key = replay_protection::get_tx_hash_key(&wrapper.tx_hash); - match self.storage.has_key(&inner_hash_key) { + match self.wl_storage.storage.has_key(&inner_hash_key) { Ok((found, _)) => { if found { response.code = 4; @@ -636,7 +636,7 @@ where let wrapper_hash_key = replay_protection::get_tx_hash_key( &transaction::unsigned_hash_tx(tx_bytes), ); - match self.storage.has_key(&wrapper_hash_key) { + match self.wl_storage.storage.has_key(&wrapper_hash_key) { Ok((found, _)) => { if found { response.code = 5; @@ -1152,7 +1152,7 @@ mod test_mempool_validate { let mut wrapper = WrapperTx::new( Fee { amount: 100.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1205,7 +1205,7 @@ mod test_mempool_validate { let mut wrapper = WrapperTx::new( Fee { amount: 100.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1297,7 +1297,7 @@ mod test_mempool_validate { let wrapper = WrapperTx::new( Fee { amount: 100.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -1321,6 +1321,7 @@ mod test_mempool_validate { let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); shell + .wl_storage .storage .write(&wrapper_hash_key, &wrapper_hash) .expect("Test failed"); @@ -1342,6 +1343,7 @@ mod test_mempool_validate { let inner_hash_key = replay_protection::get_tx_hash_key(&tx_type.tx_hash); shell + .wl_storage .storage .write(&inner_hash_key, &tx_type.tx_hash) .expect("Test failed"); diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 11deec9e13..11ba8edd8e 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1,8 +1,11 @@ //! Implementation of the ['VerifyHeader`], [`ProcessProposal`], //! and [`RevertProposal`] ABCI++ methods for the Shell +use namada::ledger::storage::TempWlStorage; use namada::types::internal::WrapperTxInQueue; +use namada::ledger::storage::write_log::StorageModification; + use super::*; use crate::facade::tendermint_proto::abci::response_process_proposal::ProposalStatus; use crate::facade::tendermint_proto::abci::RequestProcessProposal; @@ -21,18 +24,26 @@ where Default::default() } - /// Check all the txs in a block. Some txs may be incorrect, - /// but we only reject the entire block if the order of the - /// included txs violates the order decided upon in the previous - /// block. + /// Check all the txs in a block. + /// We reject the entire block when: + /// - decrypted txs violate the committed order + /// - more decrypted txs than expected + /// - checks on wrapper tx fail + /// + /// We cannot reject the block for failed checks on the decrypted txs since + /// their order has already been committed in storage, so we simply discard + /// the single invalid inner tx pub fn process_proposal( - &self, + &mut self, req: RequestProcessProposal, ) -> ProcessProposal { let tx_results = self.process_txs(&req.txs); ProcessProposal { - status: if tx_results.iter().any(|res| res.code > 3) { + status: if tx_results.iter().any(|res| match res.code { + 1 | 2 | 4 | 5 => true, + _ => false, + }) { ProposalStatus::Reject as i32 } else { ProposalStatus::Accept as i32 @@ -44,9 +55,23 @@ where /// Check all the given txs. pub fn process_txs(&self, txs: &[Vec]) -> Vec { let mut tx_queue_iter = self.wl_storage.storage.tx_queue.iter(); + let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); txs.iter() .map(|tx_bytes| { - self.process_single_tx(tx_bytes, &mut tx_queue_iter) + let result = self.process_single_tx( + tx_bytes, + &mut tx_queue_iter, + &mut temp_wl_storage, + ); + if result.code == 0 || result.code == 9 { + // Commit write log in case of success or if the decrypted + // tx was invalid to remove its hash from storage + //FIXME: better to the case 9 in finalize_block? + temp_wl_storage.write_log.commit_tx(); + } else { + temp_wl_storage.write_log.drop_tx(); + } + result }) .collect() } @@ -73,7 +98,9 @@ where &self, tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, + temp_wl_storage: &mut TempWlStorage, ) -> TxResult { + //FIXME: unit test let tx = match Tx::try_from(tx_bytes) { Ok(tx) => tx, Err(_) => { @@ -129,8 +156,17 @@ where .into(), } } else { + // Remove decrypted transaction hash from storage + let inner_hash_key = + replay_protection::get_tx_hash_key( + &wrapper.tx_hash, + ); + temp_wl_storage.write_log.delete(&inner_hash_key).expect( + "Couldn't delete transaction hash from write log", + ); + TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ErrorCodes::Undecryptable.into(), info: "The encrypted payload of tx was \ incorrectly marked as un-decryptable" .into(), @@ -155,6 +191,70 @@ where ), } } else { + // Replay protection checks + // Decrypted txs hash may be removed from storage in + // case the tx was invalid. Txs in the block, though, + // are listed with the Wrapper txs before the decrypted + // ones, so there's no need to check the WAL before the + // storage + let inner_hash_key = + replay_protection::get_tx_hash_key(&tx.tx_hash); + if temp_wl_storage + .storage + .has_key(&inner_hash_key) + .expect("Error while checking inner tx hash key in storage") + .0 + { + return TxResult { + code: ErrorCodes::InvalidTx.into(), + info: format!("Inner transaction hash {} already in storage, replay attempt", &tx.tx_hash) + }; + } + if let (Some(m), _) = + temp_wl_storage.write_log.read(&inner_hash_key) + { + // Check in WAL for replay attack in the same block + if let StorageModification::Write { value: _ } = m { + return TxResult { + code: ErrorCodes::InvalidTx.into(), + info: format!("Inner transaction hash {} already in storage, replay attempt", &tx.tx_hash) + }; + } + } + + // Write inner hash to WAL + temp_wl_storage.write_log.write(&inner_hash_key, vec![]).expect("Couldn't write inner tranasction hash to write log"); + + let wrapper_hash = + transaction::unsigned_hash_tx(tx_bytes); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + if temp_wl_storage.storage.has_key(&wrapper_hash_key).expect("Error while checking wrapper tx hash key in storage").0 { + return TxResult { + code: ErrorCodes::InvalidTx.into(), + info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) + }; + } + if let (Some(m), _) = + temp_wl_storage.write_log.read(&wrapper_hash_key) + { + // Check in WAL for replay attack in the same block + if let StorageModification::Write { value: _ } = m { + return TxResult { + code: ErrorCodes::InvalidTx.into(), + info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) + }; + } + } + + // Write wrapper hash to WAL + temp_wl_storage + .write_log + .write(&wrapper_hash_key, vec![]) + .expect( + "Couldn't write wrapper tx hash to write log", + ); + // If the public key corresponds to the MASP sentinel // transaction key, then the fee payer is effectively // the MASP, otherwise derive From c795dce95be1ac32f4d2e1dce526e672ea14c8d8 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Jan 2023 17:16:18 +0100 Subject: [PATCH 12/58] Refactors `process_proposal` --- .../lib/node/ledger/shell/process_proposal.rs | 72 +++++++++---------- core/src/types/internal.rs | 12 +++- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 11ba8edd8e..e982098b2a 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1,11 +1,10 @@ //! Implementation of the ['VerifyHeader`], [`ProcessProposal`], //! and [`RevertProposal`] ABCI++ methods for the Shell +use namada::ledger::storage::write_log::StorageModification; use namada::ledger::storage::TempWlStorage; use namada::types::internal::WrapperTxInQueue; -use namada::ledger::storage::write_log::StorageModification; - use super::*; use crate::facade::tendermint_proto::abci::response_process_proposal::ProposalStatus; use crate::facade::tendermint_proto::abci::RequestProcessProposal; @@ -134,51 +133,50 @@ where is coming soon to a blockchain near you. Patience." .into(), }, - TxType::Decrypted(tx) => match tx_queue_iter.next() { - Some(WrapperTxInQueue { - tx: wrapper, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: _, - }) => { - if wrapper.tx_hash != tx.hash_commitment() { - TxResult { - code: ErrorCodes::InvalidOrder.into(), - info: "Process proposal rejected a decrypted \ + TxType::Decrypted(tx) => { + match tx_queue_iter.next() { + Some(wrapper) => { + if wrapper.tx.tx_hash != tx.hash_commitment() { + TxResult { + code: ErrorCodes::InvalidOrder.into(), + info: + "Process proposal rejected a decrypted \ transaction that violated the tx order \ determined in the previous block" - .into(), - } - } else if verify_decrypted_correctly(&tx, privkey) { - TxResult { - code: ErrorCodes::Ok.into(), - info: "Process Proposal accepted this \ + .into(), + } + } else if verify_decrypted_correctly(&tx, privkey) { + TxResult { + code: ErrorCodes::Ok.into(), + info: "Process Proposal accepted this \ transaction" - .into(), - } - } else { - // Remove decrypted transaction hash from storage - let inner_hash_key = - replay_protection::get_tx_hash_key( - &wrapper.tx_hash, - ); - temp_wl_storage.write_log.delete(&inner_hash_key).expect( + .into(), + } + } else { + // Remove decrypted transaction hash from storage + let inner_hash_key = + replay_protection::get_tx_hash_key( + &wrapper.tx.tx_hash, + ); + temp_wl_storage.write_log.delete(&inner_hash_key).expect( "Couldn't delete transaction hash from write log", ); - TxResult { - code: ErrorCodes::Undecryptable.into(), - info: "The encrypted payload of tx was \ + TxResult { + code: ErrorCodes::Undecryptable.into(), + info: "The encrypted payload of tx was \ incorrectly marked as un-decryptable" - .into(), + .into(), + } } } + None => TxResult { + code: ErrorCodes::ExtraTxs.into(), + info: "Received more decrypted txs than expected" + .into(), + }, } - None => TxResult { - code: ErrorCodes::ExtraTxs.into(), - info: "Received more decrypted txs than expected" - .into(), - }, - }, + } TxType::Wrapper(tx) => { // validate the ciphertext via Ferveo if !tx.validate_ciphertext() { diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs index 8c85a4236e..ebaaa43ed8 100644 --- a/core/src/types/internal.rs +++ b/core/src/types/internal.rs @@ -40,7 +40,11 @@ impl HostEnvResult { impl From for HostEnvResult { fn from(success: bool) -> Self { - if success { Self::Success } else { Self::Fail } + if success { + Self::Success + } else { + Self::Fail + } } } @@ -89,6 +93,12 @@ mod tx_queue { pub fn is_empty(&self) -> bool { self.0.is_empty() } + + /// Get reference to the element at the given index. + /// Returns [`None`] if index exceeds the queue lenght. + pub fn get(&self, index: usize) -> Option<&WrapperTxInQueue> { + self.0.get(index) + } } } From c00cb14a6a76505b31f9aa07e26663d684459652 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Jan 2023 18:07:24 +0100 Subject: [PATCH 13/58] Fixes error codes --- apps/src/lib/node/ledger/shell/mod.rs | 103 ++++++++++-------- .../lib/node/ledger/shell/process_proposal.rs | 17 +-- 2 files changed, 66 insertions(+), 54 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 62af959f26..314e4c5cef 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -130,6 +130,7 @@ pub enum ErrorCodes { InvalidOrder = 4, ExtraTxs = 5, Undecryptable = 6, + ReplayTx = 7, } impl From for u32 { @@ -581,11 +582,10 @@ where /// rejected. /// /// Error codes: - /// 1 - Tx format - /// 2 - Wrapper Tx signature - /// 3 - Tx type - /// 4 - Inner tx hash - /// 5 - Wrapper tx hash + /// 0: Ok + /// 1: Invalid tx + /// 2: Tx is invalidly signed + /// 7: Replay attack pub fn mempool_validate( &self, tx_bytes: &[u8], @@ -597,7 +597,7 @@ where let tx = match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { Ok(t) => t, Err(msg) => { - response.code = 1; + response.code = ErrorCodes::InvalidTx.into(); response.log = msg.to_string(); return response; } @@ -607,7 +607,7 @@ where let tx_type = match process_tx(tx) { Ok(ty) => ty, Err(msg) => { - response.code = 2; + response.code = ErrorCodes::InvalidSig.into(); response.log = msg.to_string(); return response; } @@ -618,37 +618,31 @@ where // Replay protection check let inner_hash_key = replay_protection::get_tx_hash_key(&wrapper.tx_hash); - match self.wl_storage.storage.has_key(&inner_hash_key) { - Ok((found, _)) => { - if found { - response.code = 4; - response.log = "Wrapper transaction hash already in storage, replay attempt".to_string(); - return response; - } - } - Err(msg) => { - response.code = 4; - response.log = msg.to_string(); - return response; - } + if self + .wl_storage + .storage + .has_key(&inner_hash_key) + .expect("Error while checking inner tx hash key in storage") + .0 + { + response.code = ErrorCodes::ReplayTx.into(); + response.log = format!("Inner transaction hash {} already in storage, replay attempt", wrapper.tx_hash); + return response; } - let wrapper_hash_key = replay_protection::get_tx_hash_key( - &transaction::unsigned_hash_tx(tx_bytes), - ); - match self.wl_storage.storage.has_key(&wrapper_hash_key) { - Ok((found, _)) => { - if found { - response.code = 5; - response.log = "Inner transaction hash already in storage, replay attempt".to_string(); - return response; - } - } - Err(msg) => { - response.code = 5; - response.log = msg.to_string(); - return response; - } + let wrapper_hash = transaction::unsigned_hash_tx(tx_bytes); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + if self + .wl_storage + .storage + .has_key(&wrapper_hash_key) + .expect("Error while checking wrapper tx hash key in storage") + .0 + { + response.code = ErrorCodes::ReplayTx.into(); + response.log = format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash); + return response; } // Check balance for fee @@ -676,7 +670,7 @@ where return response; } } else { - response.code = 3; + response.code = ErrorCodes::InvalidTx.into(); response.log = "Unsupported tx type".to_string(); return response; } @@ -1182,12 +1176,12 @@ mod test_mempool_validate { unsigned_wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, 2); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); result = shell.mempool_validate( unsigned_wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, 2); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); } /// Mempool validation must reject wrappers with an invalid signature @@ -1259,12 +1253,12 @@ mod test_mempool_validate { invalid_wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, 2); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); result = shell.mempool_validate( invalid_wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, 2); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); } /// Mempool validation must reject non-wrapper txs @@ -1279,7 +1273,8 @@ mod test_mempool_validate { tx.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, 3); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidTx)); + assert_eq!(result.log, "Unsupported tx type") } /// Mempool validation must reject already applied wrapper and decrypted transactions @@ -1331,13 +1326,15 @@ mod test_mempool_validate { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, 5); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!(result.log, format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash)); let result = shell.mempool_validate( wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, 5); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!(result.log, format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash)); // Write inner hash in storage let inner_hash_key = @@ -1353,12 +1350,26 @@ mod test_mempool_validate { wrapper.to_bytes().as_ref(), MempoolTxType::NewTransaction, ); - assert_eq!(result.code, 4); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Inner transaction hash {} already in storage, replay attempt", + tx_type.tx_hash + ) + ); let result = shell.mempool_validate( wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); - assert_eq!(result.code, 4); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Inner transaction hash {} already in storage, replay attempt", + tx_type.tx_hash + ) + ) } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index e982098b2a..8174fcbb30 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -40,7 +40,7 @@ where ProcessProposal { status: if tx_results.iter().any(|res| match res.code { - 1 | 2 | 4 | 5 => true, + 1 | 2 | 4 | 5 | 7 => true, _ => false, }) { ProposalStatus::Reject as i32 @@ -62,10 +62,9 @@ where &mut tx_queue_iter, &mut temp_wl_storage, ); - if result.code == 0 || result.code == 9 { + if result.code == 0 || result.code == 6 { // Commit write log in case of success or if the decrypted // tx was invalid to remove its hash from storage - //FIXME: better to the case 9 in finalize_block? temp_wl_storage.write_log.commit_tx(); } else { temp_wl_storage.write_log.drop_tx(); @@ -88,7 +87,9 @@ where /// 2: Tx is invalidly signed /// 3: Wasm runtime error /// 4: Invalid order of decrypted txs - /// 5. More decrypted txs than expected + /// 5: More decrypted txs than expected + /// 6: Undecryptable inner tx + /// 7: Replay attack /// /// INVARIANT: Any changes applied in this method must be reverted if the /// proposal is rejected (unless we can simply overwrite them in the @@ -204,7 +205,7 @@ where .0 { return TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ErrorCodes::ReplayTx.into(), info: format!("Inner transaction hash {} already in storage, replay attempt", &tx.tx_hash) }; } @@ -214,7 +215,7 @@ where // Check in WAL for replay attack in the same block if let StorageModification::Write { value: _ } = m { return TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ErrorCodes::ReplayTx.into(), info: format!("Inner transaction hash {} already in storage, replay attempt", &tx.tx_hash) }; } @@ -229,7 +230,7 @@ where replay_protection::get_tx_hash_key(&wrapper_hash); if temp_wl_storage.storage.has_key(&wrapper_hash_key).expect("Error while checking wrapper tx hash key in storage").0 { return TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ErrorCodes::ReplayTx.into(), info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) }; } @@ -239,7 +240,7 @@ where // Check in WAL for replay attack in the same block if let StorageModification::Write { value: _ } = m { return TxResult { - code: ErrorCodes::InvalidTx.into(), + code: ErrorCodes::ReplayTx.into(), info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) }; } From b592344ecbb13eeaaa195a5410bafc34611109e6 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 11 Jan 2023 18:10:27 +0100 Subject: [PATCH 14/58] Removes tx hash from storage in `finalize_block` --- .../lib/node/ledger/shell/finalize_block.rs | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index e37f7bb27b..c4a4e8e1e4 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -146,13 +146,14 @@ where response.events.push(tx_event); // if the rejected tx was decrypted, remove it // from the queue of txs to be processed + // Tx hash has already been removed from storage in process_proposal if let TxType::Decrypted(_) = &tx_type { self.wl_storage.storage.tx_queue.pop(); } continue; } - let mut tx_event = match &tx_type { + let (mut tx_event, tx_unsigned_hash) = match &tx_type { TxType::Wrapper(wrapper) => { let mut tx_event = Event::new_tx_event(&tx_type, height.0); @@ -216,11 +217,16 @@ where #[cfg(not(feature = "mainnet"))] has_valid_pow, }); - tx_event + (tx_event, None) } TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue - self.wl_storage.storage.tx_queue.pop(); + let wrapper = self + .wl_storage + .storage + .tx_queue + .pop() + .expect("Missing wrapper tx in queue"); let mut event = Event::new_tx_event(&tx_type, height.0); match inner { @@ -239,8 +245,7 @@ where event["code"] = ErrorCodes::Undecryptable.into(); } } - - event + (event, Some(wrapper.tx.tx_hash)) } TxType::Raw(_) => { tracing::error!( @@ -333,6 +338,25 @@ where msg ); stats.increment_errored_txs(); + self.wl_storage.drop_tx(); + // FIXME: unit test + + // If transaction type is Decrypted and failed because of + // out of gas, remove its hash from storage to allow + // repwrapping it + if let Some(hash) = tx_unsigned_hash { + if let Error::GasOverflow = msg { + let tx_hash_key = + replay_protection::get_tx_hash_key(&hash); + self.wl_storage + .storage + .delete(&tx_hash_key) + .expect( + "Error while deleting tx hash key from storage", + ); + } + } + self.wl_storage.drop_tx(); tx_event["gas_used"] = self .gas_meter From f720b0aec5f32bec0fa5be60158d73e79d199088 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 12 Jan 2023 15:18:22 +0100 Subject: [PATCH 15/58] Updates `process_proposal` unit tests --- .../lib/node/ledger/shell/process_proposal.rs | 246 ++++++++++++------ 1 file changed, 172 insertions(+), 74 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 8174fcbb30..fe2cd18c30 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -100,7 +100,6 @@ where tx_queue_iter: &mut impl Iterator, temp_wl_storage: &mut TempWlStorage, ) -> TxResult { - //FIXME: unit test let tx = match Tx::try_from(tx_bytes) { Ok(tx) => tx, Err(_) => { @@ -325,7 +324,7 @@ mod test_process_proposal { gen_keypair, ProcessProposal, TestError, TestShell, }; - /// Test that if a wrapper tx is not signed, it is rejected + /// Test that if a wrapper tx is not signed, the block is rejected /// by [`process_proposal`]. #[test] fn test_unsigned_wrapper_rejected() { @@ -358,23 +357,22 @@ mod test_process_proposal { txs: vec![tx.clone()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); - assert_eq!( - response.result.info, - String::from("Wrapper transactions must be signed") - ); + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidSig) + ); + assert_eq!( + response[0].result.info, + String::from("Wrapper transactions must be signed") + ); + } + } } - /// Test that a wrapper tx with invalid signature is rejected + /// Test that a block including a wrapper tx with invalid signature is rejected #[test] fn test_wrapper_bad_signature_rejected() { let (mut shell, _) = TestShell::new(); @@ -439,27 +437,28 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![new_tx.to_bytes()], }; - let response = if let [response] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - response.clone() - } else { - panic!("Test failed") - }; - let expected_error = "Signature verification failed: Invalid signature"; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); - assert!( - response.result.info.contains(expected_error), - "Result info {} doesn't contain the expected error {}", - response.result.info, - expected_error - ); + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + let expected_error = + "Signature verification failed: Invalid signature"; + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidSig) + ); + assert!( + response[0].result.info.contains(expected_error), + "Result info {} doesn't contain the expected error {}", + response[0].result.info, + expected_error + ); + } + } } /// Test that if the account submitting the tx is not known and the fee is - /// non-zero, [`process_proposal`] rejects that tx + /// non-zero, [`process_proposal`] rejects that block #[test] fn test_wrapper_unknown_address() { let (mut shell, _) = TestShell::new(); @@ -486,26 +485,26 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![wrapper.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, "The address given does not have sufficient balance to pay fee" .to_string(), ); + } + } } /// Test that if the account submitting the tx does /// not have sufficient balance to pay the fee, - /// [`process_proposal`] rejects that tx + /// [`process_proposal`] rejects the entire block #[test] fn test_wrapper_insufficient_balance_address() { let (mut shell, _) = TestShell::new(); @@ -545,22 +544,21 @@ mod test_process_proposal { txs: vec![wrapper.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, String::from( "The address given does not have sufficient balance to pay fee" ) ); + } + } } /// Test that if the expected order of decrypted txs is @@ -676,7 +674,7 @@ mod test_process_proposal { } else { panic!("Test failed") }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); + assert_eq!(response.result.code, u32::from(ErrorCodes::Undecryptable)); assert_eq!( response.result.info, String::from( @@ -833,7 +831,7 @@ mod test_process_proposal { ); } - /// Process Proposal should reject a RawTx, but not panic + /// Process Proposal should reject a block containing a RawTx, but not panic #[test] fn test_raw_tx_rejected() { let (mut shell, _) = TestShell::new(); @@ -846,22 +844,122 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![tx.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failes"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, String::from( "Transaction rejected: Non-encrypted transactions are not \ supported" ), ); + } + } + } + + /// Test that if the unsigned wrapper tx hash is known (replay attack), the + /// block is rejected + #[test] + fn test_wrapper_tx_hash() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + ); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + // Write wrapper hash to storage + let wrapper_unsigned_hash = + transaction::unsigned_hash_tx(&signed.to_bytes()); + let hash_key = + replay_protection::get_tx_hash_key(&wrapper_unsigned_hash); + shell.storage.write(&hash_key, vec![]).expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[0].result.info, +format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_unsigned_hash) + ); + } + } + } + + /// Test that if the unsigned inner tx hash is known (replay attack), the + /// block is rejected + #[test] + fn test_inner_tx_hash() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + ); + let inner_unsigned_hash = wrapper.tx_hash.clone(); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + // Write inner hash to storage + let hash_key = replay_protection::get_tx_hash_key(&inner_unsigned_hash); + shell.storage.write(&hash_key, vec![]).expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[0].result.info, +format!("Inner transaction hash {} already in storage, replay attempt", inner_unsigned_hash) + ); + } + } } } From a764bdb9d83d1a8f06ee5e0746c8ad684b442db1 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 12 Jan 2023 15:21:12 +0100 Subject: [PATCH 16/58] Updates replay protection specs with protocol txs --- .../specs/src/base-ledger/replay-protection.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index 613fe21e25..041619edb7 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -254,6 +254,16 @@ proposal is executed. Therefore, replay protection is not a solution to prevent attacks on governance proposals' code. Instead, to protect these transactions, Namada relies on its proposal id mechanism in conjunction with the VP set. +#### Protocol transactions + +At the moment, protocol transactions are only used for ETH bridge related +operations. The current implementation already takes care of replay attempts by +keeping track of the validators' signature on the events: this also includes +replay attacks in the same block. + +In the future, new types of protocol transactions may be supported: in this +case, a review of the replay protection mechanism might be required. + ### Forks In the case of a fork, the transaction hash is not enough to prevent replay From 1f834347475c097daf2c53b51a55a7a22a6bdde3 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 12 Jan 2023 16:31:50 +0100 Subject: [PATCH 17/58] Fixes `finalize_block` and adds unit test --- .../lib/node/ledger/shell/finalize_block.rs | 79 ++++++++++++++++++- .../lib/node/ledger/shell/process_proposal.rs | 4 + 2 files changed, 81 insertions(+), 2 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index c4a4e8e1e4..e0c04a09eb 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -345,7 +345,9 @@ where // out of gas, remove its hash from storage to allow // repwrapping it if let Some(hash) = tx_unsigned_hash { - if let Error::GasOverflow = msg { + if let Error::TxApply(protocol::Error::GasError(namada::ledger::gas::Error::TransactionGasExceededError)) = + msg + { let tx_hash_key = replay_protection::get_tx_hash_key(&hash); self.wl_storage @@ -715,7 +717,7 @@ mod test_finalize_block { let mut processed_txs = vec![]; let mut valid_txs = vec![]; - // Add unshielded balance for fee paymenty + // Add unshielded balance for fee payment let balance_key = token::balance_key( &shell.wl_storage.storage.native_token, &Address::from(&keypair.ref_to()), @@ -939,4 +941,77 @@ mod test_finalize_block { last_storage_state = store_block_state(&shell); } } + + /// Test that if a decrypted transaction fails because of out-of-gas, its + /// hash is removed from storage to allow rewrapping it + #[test] + fn test_remove_tx_hash() { + let (mut shell, _) = setup(); + let keypair = gen_keypair(); + + let mut wasm_path = top_level_directory(); + wasm_path.push("wasm_for_tests/tx_no_op.wasm"); + let tx_code = std::fs::read(wasm_path) + .expect("Expected a file at given code path"); + let raw_tx = Tx::new( + tx_code.clone(), + Some("Encrypted transaction data".as_bytes().to_owned()), + ); + let wrapper_tx = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + raw_tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + + // Write inner hash in storage + let inner_hash_key = + replay_protection::get_tx_hash_key(&wrapper_tx.tx_hash); + shell + .storage + .write(&inner_hash_key, vec![]) + .expect("Test failed"); + + let processed_tx = ProcessedTx { + tx: Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { + tx: raw_tx.clone(), + #[cfg(not(feature = "mainnet"))] + has_valid_pow: false, + })) + .to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }; + shell.enqueue_tx(wrapper_tx); + + let _event = &shell + .finalize_block(FinalizeBlock { + txs: vec![processed_tx], + ..Default::default() + }) + .expect("Test failed")[0]; + + //FIXME: @grarco, uncomment when proper gas metering is in place + // // Check inner tx hash has been removed from storage + // assert_eq!(event.event_type.to_string(), String::from("applied")); + // let code = event.attributes.get("code").expect("Test failed").as_str(); + // assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + + // assert!( + // !shell + // .storage + // .has_key(&inner_hash_key) + // .expect("Test failed") + // .0 + // ) + } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index fe2cd18c30..decf6d62ef 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -885,6 +885,8 @@ mod test_process_proposal { 0.into(), tx, Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ); let signed = wrapper.sign(&keypair).expect("Test failed"); @@ -936,6 +938,8 @@ format!("Wrapper transaction hash {} already in storage, replay attempt", wrappe 0.into(), tx, Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ); let inner_unsigned_hash = wrapper.tx_hash.clone(); let signed = wrapper.sign(&keypair).expect("Test failed"); From ada51c6ddb76d0cff49d814493253f6bc12c682c Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 12 Jan 2023 16:59:53 +0100 Subject: [PATCH 18/58] Updates `process_proposal` unit tests --- .../lib/node/ledger/shell/process_proposal.rs | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index decf6d62ef..1d4f4ccbf1 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -916,6 +916,52 @@ format!("Wrapper transaction hash {} already in storage, replay attempt", wrappe } } + /// Test that a block containing two identical wrapper txs is rejected + #[test] + fn test_wrapper_tx_hash_same_block() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + ); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(); 2], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!(response[0].result.code, u32::from(ErrorCodes::Ok)); + assert_eq!( + response[1].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + // The checks happens on the inner hash first, do the tx is rejected because of this + // hash, not the wrapper one + assert_eq!( + response[1].result.info, +format!("Inner transaction hash {} already in storage, replay attempt", wrapper.tx_hash) + ); + } + } + } + /// Test that if the unsigned inner tx hash is known (replay attack), the /// block is rejected #[test] @@ -961,6 +1007,65 @@ format!("Wrapper transaction hash {} already in storage, replay attempt", wrappe ); assert_eq!( response[0].result.info, +format!("Inner transaction hash {} already in storage, replay attempt", inner_unsigned_hash) + ); + } + } + } + + /// Test that a block containing two identical inner transactions is rejected + #[test] + fn test_inner_tx_hash_same_block() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + let keypair_2 = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx.clone(), + Default::default(), + ); + let inner_unsigned_hash = wrapper.tx_hash.clone(); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + let new_wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.storage.native_token.clone(), + }, + &keypair_2, + Epoch(0), + 0.into(), + tx, + Default::default(), + ); + let new_signed = new_wrapper.sign(&keypair).expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(), new_signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!(response[0].result.code, u32::from(ErrorCodes::Ok)); + assert_eq!( + response[1].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[1].result.info, format!("Inner transaction hash {} already in storage, replay attempt", inner_unsigned_hash) ); } From 15eaf8dc7bbecfef0fc86915182967ab89a4ba45 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 12 Jan 2023 17:29:12 +0100 Subject: [PATCH 19/58] Fmt --- .../lib/node/ledger/shell/finalize_block.rs | 13 +- apps/src/lib/node/ledger/shell/mod.rs | 39 +++- .../lib/node/ledger/shell/process_proposal.rs | 217 +++++++++++++----- core/src/types/address.rs | 3 +- core/src/types/internal.rs | 6 +- core/src/types/transaction/decrypted.rs | 4 +- .../src/ledger/native_vp/replay_protection.rs | 7 +- 7 files changed, 207 insertions(+), 82 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index e0c04a09eb..793cf56cff 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -146,7 +146,8 @@ where response.events.push(tx_event); // if the rejected tx was decrypted, remove it // from the queue of txs to be processed - // Tx hash has already been removed from storage in process_proposal + // Tx hash has already been removed from storage in + // process_proposal if let TxType::Decrypted(_) = &tx_type { self.wl_storage.storage.tx_queue.pop(); } @@ -960,7 +961,7 @@ mod test_finalize_block { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -975,6 +976,7 @@ mod test_finalize_block { let inner_hash_key = replay_protection::get_tx_hash_key(&wrapper_tx.tx_hash); shell + .wl_storage .storage .write(&inner_hash_key, vec![]) .expect("Test failed"); @@ -1000,11 +1002,12 @@ mod test_finalize_block { }) .expect("Test failed")[0]; - //FIXME: @grarco, uncomment when proper gas metering is in place + // FIXME: @grarco, uncomment when proper gas metering is in place // // Check inner tx hash has been removed from storage // assert_eq!(event.event_type.to_string(), String::from("applied")); - // let code = event.attributes.get("code").expect("Test failed").as_str(); - // assert_eq!(code, String::from(ErrorCodes::WasmRuntimeError).as_str()); + // let code = event.attributes.get("code").expect("Test + // failed").as_str(); assert_eq!(code, + // String::from(ErrorCodes::WasmRuntimeError).as_str()); // assert!( // !shell diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 314e4c5cef..cbe07dad88 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -626,7 +626,11 @@ where .0 { response.code = ErrorCodes::ReplayTx.into(); - response.log = format!("Inner transaction hash {} already in storage, replay attempt", wrapper.tx_hash); + response.log = format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + wrapper.tx_hash + ); return response; } @@ -641,7 +645,11 @@ where .0 { response.code = ErrorCodes::ReplayTx.into(); - response.log = format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash); + response.log = format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ); return response; } @@ -1124,13 +1132,13 @@ mod test_utils { /// Test the failure cases of [`mempool_validate`] #[cfg(test)] mod test_mempool_validate { - use super::test_utils::TestShell; - use super::MempoolTxType; - use super::*; use namada::proto::SignedTxData; use namada::types::storage::Epoch; use namada::types::transaction::{Fee, WrapperTx}; + use super::test_utils::TestShell; + use super::{MempoolTxType, *}; + /// Mempool validation must reject unsigned wrappers #[test] fn test_missing_signature() { @@ -1277,7 +1285,8 @@ mod test_mempool_validate { assert_eq!(result.log, "Unsupported tx type") } - /// Mempool validation must reject already applied wrapper and decrypted transactions + /// Mempool validation must reject already applied wrapper and decrypted + /// transactions #[test] fn test_replay_attack() { let (mut shell, _) = TestShell::new(); @@ -1327,14 +1336,28 @@ mod test_mempool_validate { MempoolTxType::NewTransaction, ); assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); - assert_eq!(result.log, format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash)); + assert_eq!( + result.log, + format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ) + ); let result = shell.mempool_validate( wrapper.to_bytes().as_ref(), MempoolTxType::RecheckTransaction, ); assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); - assert_eq!(result.log, format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash)); + assert_eq!( + result.log, + format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ) + ); // Write inner hash in storage let inner_hash_key = diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 1d4f4ccbf1..2322b48d08 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -139,21 +139,22 @@ where if wrapper.tx.tx_hash != tx.hash_commitment() { TxResult { code: ErrorCodes::InvalidOrder.into(), - info: - "Process proposal rejected a decrypted \ - transaction that violated the tx order \ - determined in the previous block" - .into(), + info: "Process proposal rejected a \ + decrypted transaction that \ + violated the tx order determined \ + in the previous block" + .into(), } } else if verify_decrypted_correctly(&tx, privkey) { TxResult { code: ErrorCodes::Ok.into(), info: "Process Proposal accepted this \ - transaction" + transaction" .into(), } } else { - // Remove decrypted transaction hash from storage + // Remove decrypted transaction hash from + // storage let inner_hash_key = replay_protection::get_tx_hash_key( &wrapper.tx.tx_hash, @@ -165,7 +166,8 @@ where TxResult { code: ErrorCodes::Undecryptable.into(), info: "The encrypted payload of tx was \ - incorrectly marked as un-decryptable" + incorrectly marked as \ + un-decryptable" .into(), } } @@ -200,13 +202,20 @@ where if temp_wl_storage .storage .has_key(&inner_hash_key) - .expect("Error while checking inner tx hash key in storage") + .expect( + "Error while checking inner tx hash key in \ + storage", + ) .0 { return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!("Inner transaction hash {} already in storage, replay attempt", &tx.tx_hash) - }; + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Inner transaction hash {} already in \ + storage, replay attempt", + &tx.tx_hash + ), + }; } if let (Some(m), _) = temp_wl_storage.write_log.read(&inner_hash_key) @@ -214,14 +223,18 @@ where // Check in WAL for replay attack in the same block if let StorageModification::Write { value: _ } = m { return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!("Inner transaction hash {} already in storage, replay attempt", &tx.tx_hash) - }; + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Inner transaction hash {} already in \ + storage, replay attempt", + &tx.tx_hash + ), + }; } } // Write inner hash to WAL - temp_wl_storage.write_log.write(&inner_hash_key, vec![]).expect("Couldn't write inner tranasction hash to write log"); + temp_wl_storage.write_log.write(&inner_hash_key, vec![]).expect("Couldn't write inner transaction hash to write log"); let wrapper_hash = transaction::unsigned_hash_tx(tx_bytes); @@ -233,15 +246,37 @@ where info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) }; } + if temp_wl_storage + .storage + .has_key(&wrapper_hash_key) + .expect( + "Error while checking wrapper tx hash key in \ + storage", + ) + .0 + { + return TxResult { + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Wrapper transaction hash {} already in \ + storage, replay attempt", + wrapper_hash + ), + }; + } if let (Some(m), _) = temp_wl_storage.write_log.read(&wrapper_hash_key) { // Check in WAL for replay attack in the same block if let StorageModification::Write { value: _ } = m { return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) - }; + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Wrapper transaction hash {} already \ + in storage, replay attempt", + wrapper_hash + ), + }; } } @@ -372,7 +407,8 @@ mod test_process_proposal { } } - /// Test that a block including a wrapper tx with invalid signature is rejected + /// Test that a block including a wrapper tx with invalid signature is + /// rejected #[test] fn test_wrapper_bad_signature_rejected() { let (mut shell, _) = TestShell::new(); @@ -494,10 +530,11 @@ mod test_process_proposal { u32::from(ErrorCodes::InvalidTx) ); assert_eq!( - response[0].result.info, - "The address given does not have sufficient balance to pay fee" - .to_string(), - ); + response[0].result.info, + "The address given does not have sufficient balance to \ + pay fee" + .to_string(), + ); } } } @@ -552,11 +589,12 @@ mod test_process_proposal { u32::from(ErrorCodes::InvalidTx) ); assert_eq!( - response[0].result.info, - String::from( - "The address given does not have sufficient balance to pay fee" - ) - ); + response[0].result.info, + String::from( + "The address given does not have sufficient balance \ + to pay fee" + ) + ); } } } @@ -853,12 +891,12 @@ mod test_process_proposal { u32::from(ErrorCodes::InvalidTx) ); assert_eq!( - response[0].result.info, - String::from( - "Transaction rejected: Non-encrypted transactions are not \ - supported" - ), - ); + response[0].result.info, + String::from( + "Transaction rejected: Non-encrypted transactions are \ + not supported" + ), + ); } } } @@ -878,7 +916,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -895,7 +933,11 @@ mod test_process_proposal { transaction::unsigned_hash_tx(&signed.to_bytes()); let hash_key = replay_protection::get_tx_hash_key(&wrapper_unsigned_hash); - shell.storage.write(&hash_key, vec![]).expect("Test failed"); + shell + .wl_storage + .storage + .write(&hash_key, vec![]) + .expect("Test failed"); // Run validation let request = ProcessProposal { @@ -909,9 +951,13 @@ mod test_process_proposal { u32::from(ErrorCodes::ReplayTx) ); assert_eq!( - response[0].result.info, -format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_unsigned_hash) - ); + response[0].result.info, + format!( + "Wrapper transaction hash {} already in storage, \ + replay attempt", + wrapper_unsigned_hash + ) + ); } } } @@ -923,6 +969,17 @@ format!("Wrapper transaction hash {} already in storage, replay attempt", wrappe let keypair = crate::wallet::defaults::daewon_keypair(); + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::from(1000).try_to_vec().unwrap()) + .unwrap(); + let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), @@ -930,13 +987,15 @@ format!("Wrapper transaction hash {} already in storage, replay attempt", wrappe let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), 0.into(), tx, Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ); let signed = wrapper.sign(&keypair).expect("Test failed"); @@ -952,12 +1011,17 @@ format!("Wrapper transaction hash {} already in storage, replay attempt", wrappe response[1].result.code, u32::from(ErrorCodes::ReplayTx) ); - // The checks happens on the inner hash first, do the tx is rejected because of this - // hash, not the wrapper one + // The checks happens on the inner hash first, so the tx is + // rejected because of this hash, not the + // wrapper one assert_eq!( - response[1].result.info, -format!("Inner transaction hash {} already in storage, replay attempt", wrapper.tx_hash) - ); + response[1].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + wrapper.tx_hash + ) + ); } } } @@ -977,7 +1041,7 @@ format!("Inner transaction hash {} already in storage, replay attempt", wrapper. let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), @@ -992,7 +1056,11 @@ format!("Inner transaction hash {} already in storage, replay attempt", wrapper. // Write inner hash to storage let hash_key = replay_protection::get_tx_hash_key(&inner_unsigned_hash); - shell.storage.write(&hash_key, vec![]).expect("Test failed"); + shell + .wl_storage + .storage + .write(&hash_key, vec![]) + .expect("Test failed"); // Run validation let request = ProcessProposal { @@ -1006,14 +1074,19 @@ format!("Inner transaction hash {} already in storage, replay attempt", wrapper. u32::from(ErrorCodes::ReplayTx) ); assert_eq!( - response[0].result.info, -format!("Inner transaction hash {} already in storage, replay attempt", inner_unsigned_hash) - ); + response[0].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + inner_unsigned_hash + ) + ); } } } - /// Test that a block containing two identical inner transactions is rejected + /// Test that a block containing two identical inner transactions is + /// rejected #[test] fn test_inner_tx_hash_same_block() { let (mut shell, _) = TestShell::new(); @@ -1021,6 +1094,28 @@ format!("Inner transaction hash {} already in storage, replay attempt", inner_un let keypair = crate::wallet::defaults::daewon_keypair(); let keypair_2 = crate::wallet::defaults::daewon_keypair(); + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::from(1000).try_to_vec().unwrap()) + .unwrap(); + + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair_2.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::from(1000).try_to_vec().unwrap()) + .unwrap(); + let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), @@ -1028,13 +1123,15 @@ format!("Inner transaction hash {} already in storage, replay attempt", inner_un let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair, Epoch(0), 0.into(), tx.clone(), Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ); let inner_unsigned_hash = wrapper.tx_hash.clone(); let signed = wrapper.sign(&keypair).expect("Test failed"); @@ -1042,13 +1139,15 @@ format!("Inner transaction hash {} already in storage, replay attempt", inner_un let new_wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: shell.storage.native_token.clone(), + token: shell.wl_storage.storage.native_token.clone(), }, &keypair_2, Epoch(0), 0.into(), tx, Default::default(), + #[cfg(not(feature = "mainnet"))] + None, ); let new_signed = new_wrapper.sign(&keypair).expect("Test failed"); @@ -1065,9 +1164,13 @@ format!("Inner transaction hash {} already in storage, replay attempt", inner_un u32::from(ErrorCodes::ReplayTx) ); assert_eq!( - response[1].result.info, -format!("Inner transaction hash {} already in storage, replay attempt", inner_unsigned_hash) - ); + response[1].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + inner_unsigned_hash + ) + ); } } } diff --git a/core/src/types/address.rs b/core/src/types/address.rs index a17298130a..1547b70e22 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -788,7 +788,8 @@ pub mod testing { InternalAddress::IbcBurn => {} InternalAddress::IbcMint => {} InternalAddress::EthBridge => {} - InternalAddress::ReplayProtection => {} /* Add new addresses in the + InternalAddress::ReplayProtection => {} /* Add new addresses in + * the * `prop_oneof` below. */ }; prop_oneof![ diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs index ebaaa43ed8..d13d392381 100644 --- a/core/src/types/internal.rs +++ b/core/src/types/internal.rs @@ -40,11 +40,7 @@ impl HostEnvResult { impl From for HostEnvResult { fn from(success: bool) -> Self { - if success { - Self::Success - } else { - Self::Fail - } + if success { Self::Success } else { Self::Fail } } } diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 42367524ec..60c9ef5231 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -56,8 +56,8 @@ pub mod decrypted_tx { } /// Return the hash used as a commitment to the tx's contents in the - /// wrapper tx that includes this tx as an encrypted payload. The commitment - /// is computed on the unsigned tx if tx is signed + /// wrapper tx that includes this tx as an encrypted payload. The + /// commitment is computed on the unsigned tx if tx is signed pub fn hash_commitment(&self) -> Hash { match self { DecryptedTx::Decrypted { diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs index b764870957..13bd35903a 100644 --- a/shared/src/ledger/native_vp/replay_protection.rs +++ b/shared/src/ledger/native_vp/replay_protection.rs @@ -2,11 +2,10 @@ use std::collections::BTreeSet; -use thiserror::Error; - use namada_core::ledger::{replay_protection, storage}; use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::storage::Key; +use thiserror::Error; use crate::ledger::native_vp::{self, Ctx, NativeVp}; use crate::vm::WasmCacheAccess; @@ -38,10 +37,10 @@ where H: 'static + storage::StorageHasher, CA: 'static + WasmCacheAccess, { - const ADDR: InternalAddress = InternalAddress::ReplayProtection; - type Error = Error; + const ADDR: InternalAddress = InternalAddress::ReplayProtection; + fn validate_tx( &self, _tx_data: &[u8], From fbb2bf5385d8a90491a0cdf37be7ae6d92c7ebef Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 13 Jan 2023 10:50:37 +0100 Subject: [PATCH 20/58] Clippy --- .../lib/node/ledger/shell/finalize_block.rs | 6 +- apps/src/lib/node/ledger/shell/mod.rs | 8 +-- .../lib/node/ledger/shell/process_proposal.rs | 60 +++++++++---------- .../src/ledger/native_vp/replay_protection.rs | 3 +- 4 files changed, 38 insertions(+), 39 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 793cf56cff..2699b14512 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -955,7 +955,7 @@ mod test_finalize_block { let tx_code = std::fs::read(wasm_path) .expect("Expected a file at given code path"); let raw_tx = Tx::new( - tx_code.clone(), + tx_code, Some("Encrypted transaction data".as_bytes().to_owned()), ); let wrapper_tx = WrapperTx::new( @@ -983,7 +983,7 @@ mod test_finalize_block { let processed_tx = ProcessedTx { tx: Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { - tx: raw_tx.clone(), + tx: raw_tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: false, })) @@ -1002,7 +1002,7 @@ mod test_finalize_block { }) .expect("Test failed")[0]; - // FIXME: @grarco, uncomment when proper gas metering is in place + // FIXME: uncomment when proper gas metering is in place // // Check inner tx hash has been removed from storage // assert_eq!(event.event_type.to_string(), String::from("applied")); // let code = event.attributes.get("code").expect("Test diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index cbe07dad88..d6fd256bf9 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -36,10 +36,10 @@ use namada::ledger::storage_api::{self, StorageRead}; use namada::ledger::{ibc, pos, protocol}; use namada::proof_of_stake::{self, read_pos_params, slash}; use namada::proto::{self, Tx}; -use namada::types::address; use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; use namada::types::internal::WrapperTxInQueue; +use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::types::token::{self}; @@ -47,7 +47,7 @@ use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, MIN_FEE, }; -use namada::types::{key::*, transaction}; +use namada::types::{address, transaction}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; @@ -672,8 +672,8 @@ where if !has_valid_pow && self.get_wrapper_tx_fees() > balance { response.code = 1; response.log = String::from( - "The address given does not have sufficient \ - balance to pay fee", + "The address given does not have sufficient balance to \ + pay fee", ); return response; } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 2322b48d08..28e2d0fa29 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -39,10 +39,10 @@ where let tx_results = self.process_txs(&req.txs); ProcessProposal { - status: if tx_results.iter().any(|res| match res.code { - 1 | 2 | 4 | 5 | 7 => true, - _ => false, - }) { + status: if tx_results + .iter() + .any(|res| matches!(res.code, 1 | 2 | 4 | 5 | 7)) + { ProposalStatus::Reject as i32 } else { ProposalStatus::Accept as i32 @@ -217,20 +217,20 @@ where ), }; } - if let (Some(m), _) = - temp_wl_storage.write_log.read(&inner_hash_key) + // Check in WAL for replay attack in the same block + if let ( + Some(StorageModification::Write { value: _ }), + _, + ) = temp_wl_storage.write_log.read(&inner_hash_key) { - // Check in WAL for replay attack in the same block - if let StorageModification::Write { value: _ } = m { - return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!( - "Inner transaction hash {} already in \ - storage, replay attempt", - &tx.tx_hash - ), - }; - } + return TxResult { + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Inner transaction hash {} already in \ + storage, replay attempt", + &tx.tx_hash + ), + }; } // Write inner hash to WAL @@ -264,20 +264,20 @@ where ), }; } - if let (Some(m), _) = - temp_wl_storage.write_log.read(&wrapper_hash_key) + // Check in WAL for replay attack in the same block + if let ( + Some(StorageModification::Write { value: _ }), + _, + ) = temp_wl_storage.write_log.read(&wrapper_hash_key) { - // Check in WAL for replay attack in the same block - if let StorageModification::Write { value: _ } = m { - return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!( - "Wrapper transaction hash {} already \ - in storage, replay attempt", - wrapper_hash - ), - }; - } + return TxResult { + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Wrapper transaction hash {} already in \ + storage, replay attempt", + wrapper_hash + ), + }; } // Write wrapper hash to WAL diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs index 13bd35903a..949ff70fdc 100644 --- a/shared/src/ledger/native_vp/replay_protection.rs +++ b/shared/src/ledger/native_vp/replay_protection.rs @@ -61,11 +61,10 @@ where } } +#[allow(clippy::upper_case_acronyms)] enum KeyType { - #[allow(clippy::upper_case_acronyms)] #[allow(non_camel_case_types)] TX_HASH, - #[allow(clippy::upper_case_acronyms)] UNKNOWN, } From ef04e1c3b54e9affec76e1e87f3771a3e59ea42e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 13 Jan 2023 10:18:58 +0000 Subject: [PATCH 21/58] [ci] wasm checksums update --- wasm/checksums.json | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index 7c2b824921..b5f60d67b1 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.f0094b887c57565472bede01d98fb77f6faac2f72597e2efb2ebfe9b1bf7c234.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.02dca468021b1ec811d0f35cc4b55a24f7c3f7b5e51f16399709257421f4a1f4.wasm", - "tx_ibc.wasm": "tx_ibc.a1735e3221f1ae055c74bb52327765dd37e8676e15fab496f9ab0ed4d0628f51.wasm", - "tx_init_account.wasm": "tx_init_account.7b6eafeceb81b679c382279a5d9c40dfd81fcf37e5a1940340355c9f55af1543.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.f2ed71fe70fc564e1d67e4e7d2ea25466327b62ba2eee18ece0021abff9e2c82.wasm", - "tx_init_validator.wasm": "tx_init_validator.fedcfaecaf37e3e7d050c76a4512baa399fc528710a27038573df53596613a2c.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.3e5417561e8108d4045775bf6d095cbaad22c73ff17a5ba2ad11a1821665a58a.wasm", - "tx_transfer.wasm": "tx_transfer.833a3849ca2c417f4e907c95c6eb15e6b52827458cf603e1c4f5511ab3e4fe76.wasm", - "tx_unbond.wasm": "tx_unbond.d4fd6c94abb947533a2728940b43fb021a008ad593c7df7a3886c4260cac39b5.wasm", - "tx_update_vp.wasm": "tx_update_vp.6d1eabab15dc6d04eec5b25ad687f026a4d6c3f118a1d7aca9232394496bd845.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.54b594f686a72869b0d7f15492591854f26e287a1cf3b6e543b0246d5ac61003.wasm", - "tx_withdraw.wasm": "tx_withdraw.342c222d0707eb5b5a44b89fc1245f527be3fdf841af64152a9ab35a4766e1b5.wasm", - "vp_implicit.wasm": "vp_implicit.73678ac01aa009ac4e0d4a49eecaa19b49cdd3d95f6862a9558c9b175ae68260.wasm", - "vp_masp.wasm": "vp_masp.85446251f8e1defed81549dab37edfe8e640339c7230e678b65340cf71ce1369.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.573b882a806266d6cdfa635fe803e46d6ce89c99321196c231c61d05193a086d.wasm", - "vp_token.wasm": "vp_token.8c6e5a86f047e7b1f1004f0d8a4e91fad1b1c0226a6e42d7fe350f98dc84359b.wasm", - "vp_user.wasm": "vp_user.75c68f018f163d18d398cb4082b261323d115aae43ec021c868d1128e4b0ee29.wasm", - "vp_validator.wasm": "vp_validator.2dc9f1c8f106deeef5ee988955733955444d16b400ebb16a25e7d71e4b1be874.wasm" + "tx_bond.wasm": "tx_bond.ce4056ba250c6c8ab7db69e92ebdf6a4e55bf0025ab34eb1722daf805c068c13.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.9435458f53464264cb5baae9c785abae0407883efea49c7b31d23ec020552dbb.wasm", + "tx_ibc.wasm": "tx_ibc.676c792fbaef8e1b343232ab0d21aef813b58634fc8608ca912a30f5ed1c70ae.wasm", + "tx_init_account.wasm": "tx_init_account.97c1bb239724ef47220e118af6e165e0305fda1029cd11acb2c6115eb9a52b15.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.172aba92f957a845e0da72e17ffa2fa1ef792351d1dd5efef87eea7564b523d5.wasm", + "tx_init_validator.wasm": "tx_init_validator.d17556e476ee3da9b49517c6e95d489d158bd747150c40ac841b65ba6cd4fb9b.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.32cfb9013faba82110e7e2c89b85ab670398443b02273de24a93e23cd9c15dcd.wasm", + "tx_transfer.wasm": "tx_transfer.87323ef9752a9a1c401f40adb25ee24446a1c7e923bd9f8824ff43a6f062c2c8.wasm", + "tx_unbond.wasm": "tx_unbond.68580ee3680cc7510fd11e619ba85fa256ae6c92b98b67f0013543df699ebdd9.wasm", + "tx_update_vp.wasm": "tx_update_vp.5db8fed8deed5496ed948ac7926e66d704ceb1a86c97df85c9ead2496a8e7bdb.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.0717fc649ac9f8fc2c1a9ffc09fe63530e3f917f067f59600cf1c7b888b62232.wasm", + "tx_withdraw.wasm": "tx_withdraw.671caf342d11b7b83a75f9dca2a0e9a698db21c95f1dcc106348cb2c5028d4c9.wasm", + "vp_implicit.wasm": "vp_implicit.5d63bdb0500742684f4dfb0aa2c1e483c64f578bf660b73846cb7f96407420a5.wasm", + "vp_masp.wasm": "vp_masp.5d4f60fe5c5d7e3736d6a6e37e08bd95ab49a9400202c592ec01fe6b7f11f2a7.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.2334ed514ed765df8db42a19091696da3734a66bf58f367a36319840f3202f78.wasm", + "vp_token.wasm": "vp_token.31cbc8236f5912a704bc1796b9af3c049693ab9cceaf673865d6d43912f789d4.wasm", + "vp_user.wasm": "vp_user.49802e857c1b955e4901b370738190e31d5cb465fe0f3858082bc074339a0491.wasm", + "vp_validator.wasm": "vp_validator.57b64ff60678d16a0dbe2c55ede96b4e65437a97ddaf9edfba313c4eb15f8f4f.wasm" } \ No newline at end of file From ec6a5703189e0b906c2d35742ff824d20f563b07 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 13 Jan 2023 11:38:23 +0100 Subject: [PATCH 22/58] changelog: add #1017 --- .../unreleased/improvements/1017-replay-protection-impl.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1017-replay-protection-impl.md diff --git a/.changelog/unreleased/improvements/1017-replay-protection-impl.md b/.changelog/unreleased/improvements/1017-replay-protection-impl.md new file mode 100644 index 0000000000..1783a89251 --- /dev/null +++ b/.changelog/unreleased/improvements/1017-replay-protection-impl.md @@ -0,0 +1,2 @@ +- Adds hash-based replay protection + ([#1017](https://github.com/anoma/namada/pull/1017)) \ No newline at end of file From 83915773c9f53824aed1eebca2ffbb582e0fc2b8 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 19 Jan 2023 18:21:13 +0100 Subject: [PATCH 23/58] Fixes typos --- apps/src/lib/node/ledger/shell/finalize_block.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 2699b14512..b031327bb5 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -339,12 +339,10 @@ where msg ); stats.increment_errored_txs(); - self.wl_storage.drop_tx(); - // FIXME: unit test // If transaction type is Decrypted and failed because of // out of gas, remove its hash from storage to allow - // repwrapping it + // rewrapping it if let Some(hash) = tx_unsigned_hash { if let Error::TxApply(protocol::Error::GasError(namada::ledger::gas::Error::TransactionGasExceededError)) = msg From 8b21bfd39ec30badc1043c6a0b06923fac007720 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 19 Jan 2023 18:28:24 +0100 Subject: [PATCH 24/58] Replay protection VP always rejects --- .../src/ledger/native_vp/replay_protection.rs | 31 ++----------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs index 949ff70fdc..3e3c4b7ca0 100644 --- a/shared/src/ledger/native_vp/replay_protection.rs +++ b/shared/src/ledger/native_vp/replay_protection.rs @@ -2,7 +2,7 @@ use std::collections::BTreeSet; -use namada_core::ledger::{replay_protection, storage}; +use namada_core::ledger::storage; use namada_core::types::address::{Address, InternalAddress}; use namada_core::types::storage::Key; use thiserror::Error; @@ -44,36 +44,11 @@ where fn validate_tx( &self, _tx_data: &[u8], - keys_changed: &BTreeSet, + _keys_changed: &BTreeSet, _verifiers: &BTreeSet
, ) -> Result { // VP should prevent any modification of the subspace. // Changes are only allowed from protocol - let result = keys_changed.iter().all(|key| { - let key_type: KeyType = key.into(); - match key_type { - KeyType::TX_HASH => false, - KeyType::UNKNOWN => true, - } - }); - - Ok(result) - } -} - -#[allow(clippy::upper_case_acronyms)] -enum KeyType { - #[allow(non_camel_case_types)] - TX_HASH, - UNKNOWN, -} - -impl From<&Key> for KeyType { - fn from(value: &Key) -> Self { - if replay_protection::is_tx_hash_key(value) { - KeyType::TX_HASH - } else { - KeyType::UNKNOWN - } + Ok(false) } } From f3c3cdbf391c88a2290fad7ccf0e5dcf87e261c6 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 20 Jan 2023 15:25:14 +0100 Subject: [PATCH 25/58] Fixes tx unsigned hash --- apps/src/lib/node/ledger/shell/mod.rs | 10 +++--- .../lib/node/ledger/shell/process_proposal.rs | 32 ++++++++++--------- core/src/proto/types.rs | 26 +++++++++++++++ core/src/types/transaction/decrypted.rs | 4 +-- core/src/types/transaction/mod.rs | 13 -------- core/src/types/transaction/wrapper.rs | 13 +++----- 6 files changed, 55 insertions(+), 43 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index d6fd256bf9..aac4b51fbe 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -36,8 +36,10 @@ use namada::ledger::storage_api::{self, StorageRead}; use namada::ledger::{ibc, pos, protocol}; use namada::proof_of_stake::{self, read_pos_params, slash}; use namada::proto::{self, Tx}; +use namada::types::address; use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; +use namada::types::hash; use namada::types::internal::WrapperTxInQueue; use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; @@ -47,7 +49,6 @@ use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, MIN_FEE, }; -use namada::types::{address, transaction}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; @@ -604,7 +605,7 @@ where }; // Tx signature check - let tx_type = match process_tx(tx) { + let tx_type = match process_tx(tx.clone()) { Ok(ty) => ty, Err(msg) => { response.code = ErrorCodes::InvalidSig.into(); @@ -634,7 +635,7 @@ where return response; } - let wrapper_hash = transaction::unsigned_hash_tx(tx_bytes); + let wrapper_hash = hash::Hash(tx.unsigned_hash()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); if self @@ -1320,8 +1321,7 @@ mod test_mempool_validate { }; // Write wrapper hash to storage - let wrapper_hash = - super::transaction::unsigned_hash_tx(&wrapper.to_bytes()); + let wrapper_hash = hash::Hash(wrapper.unsigned_hash()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); shell diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 28e2d0fa29..f9765fb8ef 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1,6 +1,7 @@ //! Implementation of the ['VerifyHeader`], [`ProcessProposal`], //! and [`RevertProposal`] ABCI++ methods for the Shell +use namada::core::types::hash::Hash; use namada::ledger::storage::write_log::StorageModification; use namada::ledger::storage::TempWlStorage; use namada::types::internal::WrapperTxInQueue; @@ -113,7 +114,7 @@ where // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); - match process_tx(tx) { + match process_tx(tx.clone()) { // This occurs if the wrapper / protocol tx signature is invalid Err(err) => TxResult { code: ErrorCodes::InvalidSig.into(), @@ -179,9 +180,9 @@ where }, } } - TxType::Wrapper(tx) => { + TxType::Wrapper(wrapper) => { // validate the ciphertext via Ferveo - if !tx.validate_ciphertext() { + if !wrapper.validate_ciphertext() { TxResult { code: ErrorCodes::InvalidTx.into(), info: format!( @@ -197,8 +198,9 @@ where // are listed with the Wrapper txs before the decrypted // ones, so there's no need to check the WAL before the // storage - let inner_hash_key = - replay_protection::get_tx_hash_key(&tx.tx_hash); + let inner_hash_key = replay_protection::get_tx_hash_key( + &wrapper.tx_hash, + ); if temp_wl_storage .storage .has_key(&inner_hash_key) @@ -213,7 +215,7 @@ where info: format!( "Inner transaction hash {} already in \ storage, replay attempt", - &tx.tx_hash + &wrapper.tx_hash ), }; } @@ -228,7 +230,7 @@ where info: format!( "Inner transaction hash {} already in \ storage, replay attempt", - &tx.tx_hash + &wrapper.tx_hash ), }; } @@ -236,8 +238,7 @@ where // Write inner hash to WAL temp_wl_storage.write_log.write(&inner_hash_key, vec![]).expect("Couldn't write inner transaction hash to write log"); - let wrapper_hash = - transaction::unsigned_hash_tx(tx_bytes); + let wrapper_hash = Hash(tx.unsigned_hash()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); if temp_wl_storage.storage.has_key(&wrapper_hash_key).expect("Error while checking wrapper tx hash key in storage").0 { @@ -292,19 +293,21 @@ where // transaction key, then the fee payer is effectively // the MASP, otherwise derive // they payer from public key. - let fee_payer = if tx.pk != masp_tx_key().ref_to() { - tx.fee_payer() + let fee_payer = if wrapper.pk != masp_tx_key().ref_to() + { + wrapper.fee_payer() } else { masp() }; // check that the fee payer has sufficient balance let balance = - self.get_balance(&tx.fee.token, &fee_payer); + self.get_balance(&wrapper.fee.token, &fee_payer); // In testnets, tx is allowed to skip fees if it // includes a valid PoW #[cfg(not(feature = "mainnet"))] - let has_valid_pow = self.has_valid_pow_solution(&tx); + let has_valid_pow = + self.has_valid_pow_solution(&wrapper); #[cfg(feature = "mainnet")] let has_valid_pow = false; @@ -929,8 +932,7 @@ mod test_process_proposal { let signed = wrapper.sign(&keypair).expect("Test failed"); // Write wrapper hash to storage - let wrapper_unsigned_hash = - transaction::unsigned_hash_tx(&signed.to_bytes()); + let wrapper_unsigned_hash = Hash(signed.unsigned_hash()); let hash_key = replay_protection::get_tx_hash_key(&wrapper_unsigned_hash); shell diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 40e343d1bf..4b73644e46 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -362,6 +362,32 @@ impl Tx { SigningTx::from(self.clone()).hash() } + pub fn unsigned_hash(&self) -> [u8; 32] { + match self.data { + Some(ref data) => { + match SignedTxData::try_from_slice(data) { + Ok(signed_data) => { + // Reconstruct unsigned tx + let unsigned_tx = Tx { + code: self.code.clone(), + data: signed_data.data, + timestamp: self.timestamp, + }; + unsigned_tx.hash() + } + Err(_) => { + // Unsigned tx + self.hash() + } + } + } + None => { + // Unsigned tx + self.hash() + } + } + } + pub fn code_hash(&self) -> [u8; 32] { SigningTx::from(self.clone()).code_hash } diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 60c9ef5231..7f76bf9e49 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -11,7 +11,7 @@ pub mod decrypted_tx { use super::EllipticCurve; use crate::proto::Tx; - use crate::types::transaction::{self, Hash, TxType, WrapperTx}; + use crate::types::transaction::{Hash, TxType, WrapperTx}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] #[allow(clippy::large_enum_variant)] @@ -64,7 +64,7 @@ pub mod decrypted_tx { tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: _, - } => transaction::unsigned_hash_tx(tx.to_bytes().as_ref()), + } => Hash(tx.unsigned_hash()), DecryptedTx::Undecryptable(wrapper) => wrapper.tx_hash.clone(), } } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 3a6822777a..0e0a5e980e 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -27,7 +27,6 @@ use sha2::{Digest, Sha256}; pub use wrapper::*; use crate::ledger::gas::VpsGas; -use crate::proto::SignedTxData; use crate::types::address::Address; use crate::types::hash::Hash; use crate::types::ibc::IbcEvent; @@ -40,18 +39,6 @@ pub fn hash_tx(tx_bytes: &[u8]) -> Hash { Hash(*digest.as_ref()) } -/// Get the hash of the unsigned transaction (if signed), otherwise the hash of -/// entire tx. -pub fn unsigned_hash_tx(tx_bytes: &[u8]) -> Hash { - match SignedTxData::try_from_slice(tx_bytes) { - Ok(signed) => { - // Exclude the signature from the digest computation - hash_tx(signed.data.unwrap_or_default().as_ref()) - } - Err(_) => hash_tx(tx_bytes), - } -} - /// Transaction application result // TODO derive BorshSchema after #[derive(Clone, Debug, Default, BorshSerialize, BorshDeserialize)] diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index d9b9b0d157..a87b3e1fff 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -17,9 +17,7 @@ pub mod wrapper_tx { use crate::types::storage::Epoch; use crate::types::token::Amount; use crate::types::transaction::encrypted::EncryptedTx; - use crate::types::transaction::{ - self, EncryptionKey, Hash, TxError, TxType, - }; + use crate::types::transaction::{EncryptionKey, Hash, TxError, TxType}; /// Minimum fee amount in micro NAMs pub const MIN_FEE: u64 = 100; @@ -206,7 +204,7 @@ pub mod wrapper_tx { epoch, gas_limit, inner_tx, - tx_hash: transaction::unsigned_hash_tx(&tx.to_bytes()), + tx_hash: Hash(tx.unsigned_hash()), #[cfg(not(feature = "mainnet"))] pow_solution, } @@ -240,7 +238,7 @@ pub mod wrapper_tx { .map_err(|_| WrapperTxErr::InvalidTx)?; // check that the hash equals commitment - if transaction::unsigned_hash_tx(&decrypted) != self.tx_hash { + if decrypted_tx.unsigned_hash() != self.tx_hash.0 { return Err(WrapperTxErr::DecryptedHash); } @@ -349,7 +347,6 @@ pub mod wrapper_tx { use super::*; use crate::proto::SignedTxData; use crate::types::address::nam; - use crate::types::transaction::hash_tx; fn gen_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; @@ -418,7 +415,7 @@ pub mod wrapper_tx { assert_matches!(err, WrapperTxErr::DecryptedHash); } - /// We check that even if the encrypted payload and has of its + /// We check that even if the encrypted payload and hash of its /// contents are correctly changed, we detect fraudulent activity /// via the signature. #[test] @@ -472,7 +469,7 @@ pub mod wrapper_tx { ); // We change the commitment appropriately - wrapper.tx_hash = hash_tx(&malicious.to_bytes()); + wrapper.tx_hash = Hash(malicious.unsigned_hash()); // we check ciphertext validity still passes assert!(wrapper.validate_ciphertext()); From 9f7ff17fb69f9cc884d8a2abc085a16efdddf767 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 20 Jan 2023 15:46:38 +0100 Subject: [PATCH 26/58] Removes unnecessary clones --- apps/src/lib/node/ledger/shell/mod.rs | 4 +++- apps/src/lib/node/ledger/shell/process_proposal.rs | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index aac4b51fbe..6ffd85e040 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -605,7 +605,7 @@ where }; // Tx signature check - let tx_type = match process_tx(tx.clone()) { + let tx_type = match process_tx(tx) { Ok(ty) => ty, Err(msg) => { response.code = ErrorCodes::InvalidSig.into(); @@ -635,6 +635,8 @@ where return response; } + let tx = + Tx::try_from(tx_bytes).expect("Deserialization shouldn't fail"); let wrapper_hash = hash::Hash(tx.unsigned_hash()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index f9765fb8ef..4e0e6e23ec 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -114,7 +114,7 @@ where // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); - match process_tx(tx.clone()) { + match process_tx(tx) { // This occurs if the wrapper / protocol tx signature is invalid Err(err) => TxResult { code: ErrorCodes::InvalidSig.into(), @@ -238,6 +238,8 @@ where // Write inner hash to WAL temp_wl_storage.write_log.write(&inner_hash_key, vec![]).expect("Couldn't write inner transaction hash to write log"); + let tx = Tx::try_from(tx_bytes) + .expect("Deserialization shouldn't fail"); let wrapper_hash = Hash(tx.unsigned_hash()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); From 906c74214c4b5af45d9ce7602594c20072830b2e Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 20 Jan 2023 23:52:12 +0100 Subject: [PATCH 27/58] Removes wal from replay protection specs --- documentation/specs/src/base-ledger/replay-protection.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index 041619edb7..e0754523a0 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -176,11 +176,10 @@ Both in `mempool_validation` and `process_proposal` we will perform a check (together with others, see the [relative](#wrapper-checks) section) on both the digests against the storage to check that neither of the transactions has already been executed: if this doesn't hold, the `WrapperTx` will not be -included into the mempool/block respectively. If both checks pass then both of -the hashes are added to the write ahead log in `process_proposal` to be then -committed to storage: using the WAL allows us to prevent a replay of a -transaction in the same block. The transaction is then included in the block and -executed. +included into the mempool/block respectively. In `process_proposal` we'll use a +temporary cache to prevent a replay of a transaction in the same block. If both +checks pass then the transaction is included in the block. The hashes are +committed to storage in `finalize_block` and the transaction is executed. In the next block we deserialize the inner transaction, check the validity of the decrypted txs and their correct order: if the order is off a new round of From b824f4e12f41c6f93dc8c39b21315d112a50c8e2 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 23 Jan 2023 18:53:31 +0100 Subject: [PATCH 28/58] Refactors replay protection logic --- .../lib/node/ledger/shell/finalize_block.rs | 80 +++++++-------- apps/src/lib/node/ledger/shell/mod.rs | 2 +- .../lib/node/ledger/shell/process_proposal.rs | 99 ++++--------------- shared/src/ledger/mod.rs | 2 +- 4 files changed, 59 insertions(+), 124 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index b031327bb5..b52f544e8d 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -3,7 +3,9 @@ use namada::ledger::pos::namada_proof_of_stake; use namada::ledger::pos::types::into_tm_voting_power; use namada::ledger::protocol; +use namada::ledger::replay_protection; use namada::ledger::storage_api::StorageRead; +use namada::types::hash; use namada::types::storage::{BlockHash, BlockResults, Header}; use namada::types::token::Amount; @@ -90,39 +92,6 @@ where continue; }; let tx_length = processed_tx.tx.len(); - // If [`process_proposal`] rejected a Tx due to invalid signature, - // emit an event here and move on to next tx. - if ErrorCodes::from_u32(processed_tx.result.code).unwrap() - == ErrorCodes::InvalidSig - { - let mut tx_event = match process_tx(tx.clone()) { - Ok(tx @ TxType::Wrapper(_)) - | Ok(tx @ TxType::Protocol(_)) => { - Event::new_tx_event(&tx, height.0) - } - _ => match TxType::try_from(tx) { - Ok(tx @ TxType::Wrapper(_)) - | Ok(tx @ TxType::Protocol(_)) => { - Event::new_tx_event(&tx, height.0) - } - _ => { - tracing::error!( - "Internal logic error: FinalizeBlock received \ - a tx with an invalid signature error code \ - that could not be deserialized to a \ - WrapperTx / ProtocolTx type" - ); - continue; - } - }, - }; - tx_event["code"] = processed_tx.result.code.to_string(); - tx_event["info"] = - format!("Tx rejected: {}", &processed_tx.result.info); - tx_event["gas_used"] = "0".into(); - response.events.push(tx_event); - continue; - } let tx_type = if let Ok(tx_type) = process_tx(tx) { tx_type @@ -145,11 +114,22 @@ where tx_event["gas_used"] = "0".into(); response.events.push(tx_event); // if the rejected tx was decrypted, remove it - // from the queue of txs to be processed - // Tx hash has already been removed from storage in - // process_proposal + // from the queue of txs to be processed and remove the hash from storage if let TxType::Decrypted(_) = &tx_type { - self.wl_storage.storage.tx_queue.pop(); + let tx_hash = self + .wl_storage + .storage + .tx_queue + .pop() + .expect("Missing wrapper tx in queue") + .tx + .tx_hash; + let tx_hash_key = + replay_protection::get_tx_hash_key(&tx_hash); + self.wl_storage + .storage + .delete(&tx_hash_key) + .expect("Error while deleting tx hash from storage"); } continue; } @@ -158,6 +138,24 @@ where TxType::Wrapper(wrapper) => { let mut tx_event = Event::new_tx_event(&tx_type, height.0); + // Writes both txs hash to storage + let tx = Tx::try_from(processed_tx.tx.as_ref()).unwrap(); + let wrapper_tx_hash_key = + replay_protection::get_tx_hash_key(&hash::Hash( + tx.unsigned_hash(), + )); + self.wl_storage + .storage + .write(&wrapper_tx_hash_key, vec![]) + .expect("Error while writing tx hash to storage"); + + let inner_tx_hash_key = + replay_protection::get_tx_hash_key(&wrapper.tx_hash); + self.wl_storage + .storage + .write(&inner_tx_hash_key, vec![]) + .expect("Error while writing tx hash to storage"); + #[cfg(not(feature = "mainnet"))] let has_valid_pow = self.invalidate_pow_solution_if_valid(wrapper); @@ -222,12 +220,14 @@ where } TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue - let wrapper = self + let wrapper_hash = self .wl_storage .storage .tx_queue .pop() - .expect("Missing wrapper tx in queue"); + .expect("Missing wrapper tx in queue") + .tx + .tx_hash; let mut event = Event::new_tx_event(&tx_type, height.0); match inner { @@ -246,7 +246,7 @@ where event["code"] = ErrorCodes::Undecryptable.into(); } } - (event, Some(wrapper.tx.tx_hash)) + (event, Some(wrapper_hash)) } TxType::Raw(_) => { tracing::error!( diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 6ffd85e040..4a2702fd38 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -21,13 +21,13 @@ use std::path::{Path, PathBuf}; use std::rc::Rc; use borsh::{BorshDeserialize, BorshSerialize}; -use namada::core::ledger::replay_protection; use namada::ledger::events::log::EventLog; use namada::ledger::events::Event; use namada::ledger::gas::BlockGasMeter; use namada::ledger::pos::namada_proof_of_stake::types::{ ConsensusValidator, ValidatorSetUpdate, }; +use namada::ledger::replay_protection; use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, WlStorage, DB, diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 4e0e6e23ec..c84144f02f 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -40,14 +40,17 @@ where let tx_results = self.process_txs(&req.txs); ProcessProposal { - status: if tx_results - .iter() - .any(|res| matches!(res.code, 1 | 2 | 4 | 5 | 7)) - { - ProposalStatus::Reject as i32 - } else { + status: if tx_results.iter().all(|res| { + matches!( + ErrorCodes::from_u32(res.code).unwrap(), + ErrorCodes::Ok | ErrorCodes::Undecryptable + ) + }) { ProposalStatus::Accept as i32 + } else { + ProposalStatus::Reject as i32 }, + tx_results, } } @@ -63,9 +66,9 @@ where &mut tx_queue_iter, &mut temp_wl_storage, ); - if result.code == 0 || result.code == 6 { - // Commit write log in case of success or if the decrypted - // tx was invalid to remove its hash from storage + if let ErrorCodes::Ok = + ErrorCodes::from_u32(result.code).unwrap() + { temp_wl_storage.write_log.commit_tx(); } else { temp_wl_storage.write_log.drop_tx(); @@ -154,16 +157,7 @@ where .into(), } } else { - // Remove decrypted transaction hash from - // storage - let inner_hash_key = - replay_protection::get_tx_hash_key( - &wrapper.tx.tx_hash, - ); - temp_wl_storage.write_log.delete(&inner_hash_key).expect( - "Couldn't delete transaction hash from write log", - ); - + // Wrong inner tx commitment TxResult { code: ErrorCodes::Undecryptable.into(), info: "The encrypted payload of tx was \ @@ -193,38 +187,12 @@ where } } else { // Replay protection checks - // Decrypted txs hash may be removed from storage in - // case the tx was invalid. Txs in the block, though, - // are listed with the Wrapper txs before the decrypted - // ones, so there's no need to check the WAL before the - // storage let inner_hash_key = replay_protection::get_tx_hash_key( &wrapper.tx_hash, ); - if temp_wl_storage - .storage - .has_key(&inner_hash_key) - .expect( - "Error while checking inner tx hash key in \ - storage", - ) - .0 - { - return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!( - "Inner transaction hash {} already in \ - storage, replay attempt", - &wrapper.tx_hash - ), - }; - } - // Check in WAL for replay attack in the same block - if let ( - Some(StorageModification::Write { value: _ }), - _, - ) = temp_wl_storage.write_log.read(&inner_hash_key) - { + if temp_wl_storage.has_key(&inner_hash_key).expect( + "Error while checking inner tx hash key in storage", + ) { return TxResult { code: ErrorCodes::ReplayTx.into(), info: format!( @@ -243,45 +211,12 @@ where let wrapper_hash = Hash(tx.unsigned_hash()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); - if temp_wl_storage.storage.has_key(&wrapper_hash_key).expect("Error while checking wrapper tx hash key in storage").0 { + if temp_wl_storage.has_key(&wrapper_hash_key).expect("Error while checking wrapper tx hash key in storage"){ return TxResult { code: ErrorCodes::ReplayTx.into(), info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) }; } - if temp_wl_storage - .storage - .has_key(&wrapper_hash_key) - .expect( - "Error while checking wrapper tx hash key in \ - storage", - ) - .0 - { - return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!( - "Wrapper transaction hash {} already in \ - storage, replay attempt", - wrapper_hash - ), - }; - } - // Check in WAL for replay attack in the same block - if let ( - Some(StorageModification::Write { value: _ }), - _, - ) = temp_wl_storage.write_log.read(&wrapper_hash_key) - { - return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!( - "Wrapper transaction hash {} already in \ - storage, replay attempt", - wrapper_hash - ), - }; - } // Write wrapper hash to WAL temp_wl_storage diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 73f39dda05..fb0d2197ee 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -13,5 +13,5 @@ pub mod storage; pub mod vp_host_fns; pub use namada_core::ledger::{ - gas, governance, parameters, storage_api, tx_env, vp_env, + gas, governance, parameters, replay_protection, storage_api, tx_env, vp_env, }; From 8222615cb46e322c22ac6148ead75d728f6fa3c7 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 23 Jan 2023 19:04:48 +0100 Subject: [PATCH 29/58] Fmt --- .../lib/node/ledger/shell/finalize_block.rs | 6 ++-- apps/src/lib/node/ledger/shell/mod.rs | 6 ++-- .../lib/node/ledger/shell/process_proposal.rs | 28 +++++++++++++------ 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index b52f544e8d..dfa9c326d6 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2,9 +2,8 @@ use namada::ledger::pos::namada_proof_of_stake; use namada::ledger::pos::types::into_tm_voting_power; -use namada::ledger::protocol; -use namada::ledger::replay_protection; use namada::ledger::storage_api::StorageRead; +use namada::ledger::{protocol, replay_protection}; use namada::types::hash; use namada::types::storage::{BlockHash, BlockResults, Header}; use namada::types::token::Amount; @@ -114,7 +113,8 @@ where tx_event["gas_used"] = "0".into(); response.events.push(tx_event); // if the rejected tx was decrypted, remove it - // from the queue of txs to be processed and remove the hash from storage + // from the queue of txs to be processed and remove the hash + // from storage if let TxType::Decrypted(_) = &tx_type { let tx_hash = self .wl_storage diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 4a2702fd38..62e5d01602 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -27,19 +27,16 @@ use namada::ledger::gas::BlockGasMeter; use namada::ledger::pos::namada_proof_of_stake::types::{ ConsensusValidator, ValidatorSetUpdate, }; -use namada::ledger::replay_protection; use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, WlStorage, DB, }; use namada::ledger::storage_api::{self, StorageRead}; -use namada::ledger::{ibc, pos, protocol}; +use namada::ledger::{ibc, pos, protocol, replay_protection}; use namada::proof_of_stake::{self, read_pos_params, slash}; use namada::proto::{self, Tx}; -use namada::types::address; use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; -use namada::types::hash; use namada::types::internal::WrapperTxInQueue; use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; @@ -49,6 +46,7 @@ use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, MIN_FEE, }; +use namada::types::{address, hash}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index c84144f02f..06611dd3b0 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2,7 +2,6 @@ //! and [`RevertProposal`] ABCI++ methods for the Shell use namada::core::types::hash::Hash; -use namada::ledger::storage::write_log::StorageModification; use namada::ledger::storage::TempWlStorage; use namada::types::internal::WrapperTxInQueue; @@ -204,19 +203,32 @@ where } // Write inner hash to WAL - temp_wl_storage.write_log.write(&inner_hash_key, vec![]).expect("Couldn't write inner transaction hash to write log"); + temp_wl_storage + .write_log + .write(&inner_hash_key, vec![]) + .expect( + "Couldn't write inner transaction hash to \ + write log", + ); let tx = Tx::try_from(tx_bytes) .expect("Deserialization shouldn't fail"); let wrapper_hash = Hash(tx.unsigned_hash()); let wrapper_hash_key = replay_protection::get_tx_hash_key(&wrapper_hash); - if temp_wl_storage.has_key(&wrapper_hash_key).expect("Error while checking wrapper tx hash key in storage"){ - return TxResult { - code: ErrorCodes::ReplayTx.into(), - info: format!("Wrapper transaction hash {} already in storage, replay attempt", wrapper_hash) - }; - } + if temp_wl_storage.has_key(&wrapper_hash_key).expect( + "Error while checking wrapper tx hash key in \ + storage", + ) { + return TxResult { + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Wrapper transaction hash {} already in \ + storage, replay attempt", + wrapper_hash + ), + }; + } // Write wrapper hash to WAL temp_wl_storage From e1f17ee6f9efa04c74ce9dcbe5185e6636f0081e Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 24 Jan 2023 11:52:14 +0100 Subject: [PATCH 30/58] Fixes fee in unit tests --- apps/src/lib/node/ledger/shell/process_proposal.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 06611dd3b0..5a5a55ff63 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -928,7 +928,7 @@ mod test_process_proposal { shell .wl_storage .storage - .write(&balance_key, Amount::from(1000).try_to_vec().unwrap()) + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) .unwrap(); let tx = Tx::new( @@ -1053,7 +1053,7 @@ mod test_process_proposal { shell .wl_storage .storage - .write(&balance_key, Amount::from(1000).try_to_vec().unwrap()) + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) .unwrap(); // Add unshielded balance for fee payment @@ -1064,7 +1064,7 @@ mod test_process_proposal { shell .wl_storage .storage - .write(&balance_key, Amount::from(1000).try_to_vec().unwrap()) + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) .unwrap(); let tx = Tx::new( From 519be43438259ccc77f65f85146d227e2879b345 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 24 Jan 2023 11:27:46 +0000 Subject: [PATCH 31/58] [ci] wasm checksums update --- wasm/checksums.json | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index b5f60d67b1..311461e202 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.ce4056ba250c6c8ab7db69e92ebdf6a4e55bf0025ab34eb1722daf805c068c13.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.9435458f53464264cb5baae9c785abae0407883efea49c7b31d23ec020552dbb.wasm", - "tx_ibc.wasm": "tx_ibc.676c792fbaef8e1b343232ab0d21aef813b58634fc8608ca912a30f5ed1c70ae.wasm", - "tx_init_account.wasm": "tx_init_account.97c1bb239724ef47220e118af6e165e0305fda1029cd11acb2c6115eb9a52b15.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.172aba92f957a845e0da72e17ffa2fa1ef792351d1dd5efef87eea7564b523d5.wasm", - "tx_init_validator.wasm": "tx_init_validator.d17556e476ee3da9b49517c6e95d489d158bd747150c40ac841b65ba6cd4fb9b.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.32cfb9013faba82110e7e2c89b85ab670398443b02273de24a93e23cd9c15dcd.wasm", - "tx_transfer.wasm": "tx_transfer.87323ef9752a9a1c401f40adb25ee24446a1c7e923bd9f8824ff43a6f062c2c8.wasm", - "tx_unbond.wasm": "tx_unbond.68580ee3680cc7510fd11e619ba85fa256ae6c92b98b67f0013543df699ebdd9.wasm", - "tx_update_vp.wasm": "tx_update_vp.5db8fed8deed5496ed948ac7926e66d704ceb1a86c97df85c9ead2496a8e7bdb.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.0717fc649ac9f8fc2c1a9ffc09fe63530e3f917f067f59600cf1c7b888b62232.wasm", - "tx_withdraw.wasm": "tx_withdraw.671caf342d11b7b83a75f9dca2a0e9a698db21c95f1dcc106348cb2c5028d4c9.wasm", - "vp_implicit.wasm": "vp_implicit.5d63bdb0500742684f4dfb0aa2c1e483c64f578bf660b73846cb7f96407420a5.wasm", - "vp_masp.wasm": "vp_masp.5d4f60fe5c5d7e3736d6a6e37e08bd95ab49a9400202c592ec01fe6b7f11f2a7.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.2334ed514ed765df8db42a19091696da3734a66bf58f367a36319840f3202f78.wasm", - "vp_token.wasm": "vp_token.31cbc8236f5912a704bc1796b9af3c049693ab9cceaf673865d6d43912f789d4.wasm", - "vp_user.wasm": "vp_user.49802e857c1b955e4901b370738190e31d5cb465fe0f3858082bc074339a0491.wasm", - "vp_validator.wasm": "vp_validator.57b64ff60678d16a0dbe2c55ede96b4e65437a97ddaf9edfba313c4eb15f8f4f.wasm" + "tx_bond.wasm": "tx_bond.41f89d674bfd38d65ecd630ada23f798102059d6a5790132cd552fbb76e3f1c6.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.e2642c4ed666fd18b822c7af216db49e405e5478a0df2027b2f6577a6aef3819.wasm", + "tx_ibc.wasm": "tx_ibc.93ec9d0307c0f63e45617f3576eccaa2915728cde61cde9a7b17f6d5c423d38b.wasm", + "tx_init_account.wasm": "tx_init_account.98c5a7b3a692b68416e52277a92370b221259c95bdcd175041f3b067db19355b.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.fc86a9c2ae53a3c8c3eaeb1b277d2f18da26232b6b9e8c6aaf5847d599d68f65.wasm", + "tx_init_validator.wasm": "tx_init_validator.5c2f64a892568605a0c84e1617dfa79637e019fc47e5555843434cab1fb079ef.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.d4f959a8ed4e0a8af0178ce420bec6d1cecc33f0dd6ef64f12980fddcad78a2e.wasm", + "tx_transfer.wasm": "tx_transfer.8c98b588df55c7399b1ea7dfdd100f15e073c4f84037842af044ef76b7964098.wasm", + "tx_unbond.wasm": "tx_unbond.c50f105eba94a4cddd23adaaacbe1107145380cd62b28df197c78f5e578740bc.wasm", + "tx_update_vp.wasm": "tx_update_vp.2d358a47e5f4ffb6755e953997e62d627380edc5d70ce6b098ad9186fe4e7dd1.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.f930edbd93d0e8b5b0cc49ae6eb62ff585d22f11c78ff056a4cb8d62002f92fc.wasm", + "tx_withdraw.wasm": "tx_withdraw.91375a1ba8d23b67fbc9fe9a4f635f9c6ee0f91214bdaae29ef1a7f99a80c1c9.wasm", + "vp_implicit.wasm": "vp_implicit.4f557277d8cd91babf66d4b0d6c8ce9fa578cfaf10b5baccfbf22289dc144443.wasm", + "vp_masp.wasm": "vp_masp.648d2df14b877a2c62d8b4c630ed5ffd3ae643329358a8d1b8a92c79a938bc8a.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.f4dcab4d010e5e47f7e0143d4516dbf4f01da77e778321fe6c1046500c5493bf.wasm", + "vp_token.wasm": "vp_token.442fa2cd6d9e8d195ab526d354cab69326ceba756bf701b38a163bb6f5b8a360.wasm", + "vp_user.wasm": "vp_user.b90d64e6b82d247c38809cdc05fca35f479af613a8d65c8a90fb3065c1cfa181.wasm", + "vp_validator.wasm": "vp_validator.56b35e3f271a10a90de874bedc308ea53ac87f51e22e3b3b1c2b34ecb3303fee.wasm" } \ No newline at end of file From b19e3860a50e3d4b96aed7c66fac41f338c40bfd Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 6 Feb 2023 12:32:33 +0100 Subject: [PATCH 32/58] Fixes fee error code --- apps/src/lib/node/ledger/shell/mod.rs | 4 ++-- apps/src/lib/node/ledger/shell/process_proposal.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 62e5d01602..3d7a694901 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -671,9 +671,9 @@ where let has_valid_pow = false; if !has_valid_pow && self.get_wrapper_tx_fees() > balance { - response.code = 1; + response.code = ErrorCodes::InvalidTx.into(); response.log = String::from( - "The address given does not have sufficient balance to \ + "The given address does not have a sufficient balance to \ pay fee", ); return response; diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 5a5a55ff63..7df477d8ef 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -272,7 +272,7 @@ where } else { TxResult { code: ErrorCodes::InvalidTx.into(), - info: "The address given does not have \ + info: "The given address does not have a \ sufficient balance to pay fee" .into(), } @@ -483,7 +483,7 @@ mod test_process_proposal { ); assert_eq!( response[0].result.info, - "The address given does not have sufficient balance to \ + "The given address does not have a sufficient balance to \ pay fee" .to_string(), ); @@ -543,7 +543,7 @@ mod test_process_proposal { assert_eq!( response[0].result.info, String::from( - "The address given does not have sufficient balance \ + "The given address does not have a sufficient balance \ to pay fee" ) ); From 1d8e1dabcbc42ddcbd2a10e7e9d05b820971d017 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 8 Feb 2023 11:08:30 +0100 Subject: [PATCH 33/58] Brings back sig check in `finalize_block` --- .../lib/node/ledger/shell/finalize_block.rs | 33 +++++++++++++++++++ .../lib/node/ledger/shell/process_proposal.rs | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index dfa9c326d6..944bc6fd4c 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -91,6 +91,39 @@ where continue; }; let tx_length = processed_tx.tx.len(); + // If [`process_proposal`] rejected a Tx due to invalid signature, + // emit an event here and move on to next tx. + if ErrorCodes::from_u32(processed_tx.result.code).unwrap() + == ErrorCodes::InvalidSig + { + let mut tx_event = match process_tx(tx.clone()) { + Ok(tx @ TxType::Wrapper(_)) + | Ok(tx @ TxType::Protocol(_)) => { + Event::new_tx_event(&tx, height.0) + } + _ => match TxType::try_from(tx) { + Ok(tx @ TxType::Wrapper(_)) + | Ok(tx @ TxType::Protocol(_)) => { + Event::new_tx_event(&tx, height.0) + } + _ => { + tracing::error!( + "Internal logic error: FinalizeBlock received \ + a tx with an invalid signature error code \ + that could not be deserialized to a \ + WrapperTx / ProtocolTx type" + ); + continue; + } + }, + }; + tx_event["code"] = processed_tx.result.code.to_string(); + tx_event["info"] = + format!("Tx rejected: {}", &processed_tx.result.info); + tx_event["gas_used"] = "0".into(); + response.events.push(tx_event); + continue; + } let tx_type = if let Ok(tx_type) = process_tx(tx) { tx_type diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 7df477d8ef..5a3a2ebc59 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -33,7 +33,7 @@ where /// their order has already been committed in storage, so we simply discard /// the single invalid inner tx pub fn process_proposal( - &mut self, + &self, req: RequestProcessProposal, ) -> ProcessProposal { let tx_results = self.process_txs(&req.txs); From 42e056d5aba4feed03fcd4506097fae9e0ee5a5f Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 8 Feb 2023 11:29:22 +0100 Subject: [PATCH 34/58] Updates fees in replay protection specs --- .../src/base-ledger/replay-protection.md | 34 +++++-------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index e0754523a0..85001729a5 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -388,7 +388,11 @@ validate it. These will involve: - `ChainId` - Transaction hash - Expiration +- Wrapper signer has enough funds to pay the fee - Unshielding tx (if present), is indeed a masp unshielding transfer +- The unshielding tx (if present) releases the minimum amount of tokens required + to pay fees +- The unshielding tx (if present) runs succesfully For gas, fee and the unshielding tx more details can be found in the [fee specs](../economics/fee-system.md). @@ -397,10 +401,10 @@ These checks can all be done before executing the transactions themselves. If any of these fails, the transaction should be considered invalid and the action to take will be one of the followings: -1. If the checks fail on the signature, chainId, expiration, transaction hash or - the unshielding tx, then this transaction will be forever invalid, regardless - of the possible evolution of the ledger's state. There's no need to include - the transaction in the block. Moreover, we **cannot** include this +1. If the checks fail on the signature, chainId, expiration, transaction hash, + balance or the unshielding tx, then this transaction will be forever invalid, + regardless of the possible evolution of the ledger's state. There's no need + to include the transaction in the block. Moreover, we **cannot** include this transaction in the block to charge a fee (as a sort of punishment) because these errors may not depend on the signer of the tx (could be due to malicious users or simply a delay in the tx inclusion in the block) @@ -415,27 +419,7 @@ to take will be one of the followings: If instead all the checks pass validation we will include the transaction in the block to store the hash and charge the fee. -All these checks are also run in `process_proposal` with a few additions: - -- Wrapper signer has enough funds to pay the fee. This check should not be done - in mempool because the funds available for a certain address are variable in - time and should only be checked at block inclusion time. If any of the checks - fail here, the entire block is rejected forcing a new Tendermint round to - begin (see a better explanation of this choice in the - [relative](#block-rejection) section) -- The unshielding tx (if present) releases the minimum amount of tokens required - to pay fees -- The unshielding tx (if present) runs succesfully - -The `expiration` parameter also justifies that the check on funds is only done -in `process_proposal` and not in mempool. Without it, the transaction could be -potentially executed at any future moment, possibly going against the mutated -interests of the submitter. With the expiration parameter, now, the submitter -commits himself to accept the execution of the transaction up to the specified -time: it's going to be his responsibility to provide a sensible value for this -parameter. Given this constraint the transaction will be kept in mempool up -until the expiration (since it would become invalid after that in any case), to -prevent the mempool from increasing too much in size. +All these checks are also run in `process_proposal`. This mechanism can also be applied to another scenario. Suppose a transaction was not propagated to the network by a node (or a group of colluding nodes). From 48bcb8c140323d39d00c10e4136570dfdafdebdb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 10 Feb 2023 21:24:49 +0000 Subject: [PATCH 35/58] [ci] wasm checksums update --- wasm/checksums.json | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index 311461e202..0e50d4c7aa 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.41f89d674bfd38d65ecd630ada23f798102059d6a5790132cd552fbb76e3f1c6.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.e2642c4ed666fd18b822c7af216db49e405e5478a0df2027b2f6577a6aef3819.wasm", - "tx_ibc.wasm": "tx_ibc.93ec9d0307c0f63e45617f3576eccaa2915728cde61cde9a7b17f6d5c423d38b.wasm", - "tx_init_account.wasm": "tx_init_account.98c5a7b3a692b68416e52277a92370b221259c95bdcd175041f3b067db19355b.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.fc86a9c2ae53a3c8c3eaeb1b277d2f18da26232b6b9e8c6aaf5847d599d68f65.wasm", - "tx_init_validator.wasm": "tx_init_validator.5c2f64a892568605a0c84e1617dfa79637e019fc47e5555843434cab1fb079ef.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.d4f959a8ed4e0a8af0178ce420bec6d1cecc33f0dd6ef64f12980fddcad78a2e.wasm", - "tx_transfer.wasm": "tx_transfer.8c98b588df55c7399b1ea7dfdd100f15e073c4f84037842af044ef76b7964098.wasm", - "tx_unbond.wasm": "tx_unbond.c50f105eba94a4cddd23adaaacbe1107145380cd62b28df197c78f5e578740bc.wasm", - "tx_update_vp.wasm": "tx_update_vp.2d358a47e5f4ffb6755e953997e62d627380edc5d70ce6b098ad9186fe4e7dd1.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.f930edbd93d0e8b5b0cc49ae6eb62ff585d22f11c78ff056a4cb8d62002f92fc.wasm", - "tx_withdraw.wasm": "tx_withdraw.91375a1ba8d23b67fbc9fe9a4f635f9c6ee0f91214bdaae29ef1a7f99a80c1c9.wasm", - "vp_implicit.wasm": "vp_implicit.4f557277d8cd91babf66d4b0d6c8ce9fa578cfaf10b5baccfbf22289dc144443.wasm", - "vp_masp.wasm": "vp_masp.648d2df14b877a2c62d8b4c630ed5ffd3ae643329358a8d1b8a92c79a938bc8a.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.f4dcab4d010e5e47f7e0143d4516dbf4f01da77e778321fe6c1046500c5493bf.wasm", - "vp_token.wasm": "vp_token.442fa2cd6d9e8d195ab526d354cab69326ceba756bf701b38a163bb6f5b8a360.wasm", - "vp_user.wasm": "vp_user.b90d64e6b82d247c38809cdc05fca35f479af613a8d65c8a90fb3065c1cfa181.wasm", - "vp_validator.wasm": "vp_validator.56b35e3f271a10a90de874bedc308ea53ac87f51e22e3b3b1c2b34ecb3303fee.wasm" + "tx_bond.wasm": "tx_bond.6be00c580c78034f0ff2fb74f26b3f79d61abb77f27b63162e6f3c2cd8dabcdf.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.ed1d1cdfbf9abe3644719a37d3041f05e7eee718236124480a6e969bb9b245aa.wasm", + "tx_ibc.wasm": "tx_ibc.6b52ff9f1c9b4266f614d24bd41a949cc803a312fce6cb017ee73f68390bf39f.wasm", + "tx_init_account.wasm": "tx_init_account.0f6113d881e9762c62d97b7cc9841da5f7fe5feef3b7739192391f029d10ebcd.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.6355ce198ca39e982de7bd71c009d4a71a536c7f1c987f9b56e326be5d69f702.wasm", + "tx_init_validator.wasm": "tx_init_validator.285957b0ba5111251d4447f5cffe0e03632f0940f5658d27f59592bd7a29d64f.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.fc5cb0ef1d45a1ff475d67f98c0dd2f0e6a34fbbc83716f5975c6e62733adfe1.wasm", + "tx_transfer.wasm": "tx_transfer.bdf43ccce2603c528482b6b09da41b814017bf0d776d04cf7caad82b18bb0a09.wasm", + "tx_unbond.wasm": "tx_unbond.c53cf3bbe8c7ac3c03f0b3b8d3dee837aa84f04a1227d4c560946454ef232383.wasm", + "tx_update_vp.wasm": "tx_update_vp.b78247f292a7e2423204f3a29961e2783938f110d53e31bf858094b03e1c92ac.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.403456a31ccbe5fc6df98c3eab77abd89cbdabcb78bb16a6255ad487859b0f53.wasm", + "tx_withdraw.wasm": "tx_withdraw.6b2d90f3cc024d8930bca9965a010d4c879e4b88698168625d49d245096afa74.wasm", + "vp_implicit.wasm": "vp_implicit.9824a09d636fb9af1840352b2de3fb04fa90e5fd2dfbe86d1c7664a7dbeeec06.wasm", + "vp_masp.wasm": "vp_masp.70f3b9de71e4fbfb5a06a01cf7e8667ab112bb56f9efbb2bfc2fa8094e66c323.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.6c8f0e1ac279cb0f66b2fade5854f50a7c48418db3643da45961a98ea300db6f.wasm", + "vp_token.wasm": "vp_token.dc0ac90117a834f86a006279a03b8530a100008efc0480fee797e0142fa25cca.wasm", + "vp_user.wasm": "vp_user.e625762fc14007b08a62036b2ec4a473777af7f9ba26ffa9416d6fb465fcbb08.wasm", + "vp_validator.wasm": "vp_validator.3033c78aa87107e50dd3eadfd18dbf0ff3b320ac796fd225f44d944bde111c74.wasm" } \ No newline at end of file From b23b7ebf42962e0b5fcb26fadfc40f494ae56fab Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 27 Jan 2023 19:20:49 +0100 Subject: [PATCH 36/58] Adds `ChainId` to struct `Tx` --- core/src/proto/types.rs | 24 +++++++++++++++++-- core/src/types/chain.rs | 3 ++- core/src/types/transaction/decrypted.rs | 2 ++ core/src/types/transaction/mod.rs | 31 +++++++++++++++++++++---- core/src/types/transaction/protocol.rs | 5 ++++ core/src/types/transaction/wrapper.rs | 15 +++++++++--- proto/types.proto | 1 + 7 files changed, 71 insertions(+), 10 deletions(-) diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 4b73644e46..538737f2c9 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -9,6 +9,7 @@ use thiserror::Error; use super::generated::types; #[cfg(any(feature = "tendermint", feature = "tendermint-abcipp"))] use crate::tendermint_proto::abci::ResponseDeliverTx; +use crate::types::chain::ChainId; use crate::types::key::*; use crate::types::time::DateTimeUtc; #[cfg(feature = "ferveo-tpke")] @@ -136,6 +137,7 @@ pub struct SigningTx { pub code_hash: [u8; 32], pub data: Option>, pub timestamp: DateTimeUtc, + pub chain_id: ChainId, } impl SigningTx { @@ -146,6 +148,7 @@ impl SigningTx { code: self.code_hash.to_vec(), data: self.data.clone(), timestamp, + chain_id: self.chain_id.as_str().to_owned(), } .encode(&mut bytes) .expect("encoding a transaction failed"); @@ -166,6 +169,7 @@ impl SigningTx { code_hash: self.code_hash, data: Some(signed), timestamp: self.timestamp, + chain_id: self.chain_id, } } @@ -185,6 +189,7 @@ impl SigningTx { code_hash: self.code_hash, data, timestamp: self.timestamp, + chain_id: self.chain_id, }; let signed_data = tx.hash(); common::SigScheme::verify_signature_raw(pk, &signed_data, sig) @@ -198,6 +203,7 @@ impl SigningTx { code, data: self.data, timestamp: self.timestamp, + chain_id: self.chain_id, }) } else { None @@ -211,6 +217,7 @@ impl From for SigningTx { code_hash: hash_tx(&tx.code).0, data: tx.data, timestamp: tx.timestamp, + chain_id: tx.chain_id, } } } @@ -225,6 +232,7 @@ pub struct Tx { pub code: Vec, pub data: Option>, pub timestamp: DateTimeUtc, + pub chain_id: ChainId, } impl TryFrom<&[u8]> for Tx { @@ -236,10 +244,13 @@ impl TryFrom<&[u8]> for Tx { Some(t) => t.try_into().map_err(Error::InvalidTimestamp)?, None => return Err(Error::NoTimestampError), }; + let chain_id = ChainId(tx.chain_id); + Ok(Tx { code: tx.code, data: tx.data, timestamp, + chain_id, }) } } @@ -251,6 +262,7 @@ impl From for types::Tx { code: tx.code, data: tx.data, timestamp, + chain_id: tx.chain_id.as_str().to_owned(), } } } @@ -342,11 +354,16 @@ impl From for ResponseDeliverTx { } impl Tx { - pub fn new(code: Vec, data: Option>) -> Self { + pub fn new( + code: Vec, + data: Option>, + chain_id: ChainId, + ) -> Self { Tx { code, data, timestamp: DateTimeUtc::now(), + chain_id, } } @@ -372,6 +389,7 @@ impl Tx { code: self.code.clone(), data: signed_data.data, timestamp: self.timestamp, + chain_id: self.chain_id, }; unsigned_tx.hash() } @@ -494,7 +512,8 @@ mod tests { fn test_tx() { let code = "wasm code".as_bytes().to_owned(); let data = "arbitrary data".as_bytes().to_owned(); - let tx = Tx::new(code.clone(), Some(data.clone())); + let chain_id = ChainId("This chain".to_string()); + let tx = Tx::new(code.clone(), Some(data.clone()), chain_id.clone()); let bytes = tx.to_bytes(); let tx_from_bytes = @@ -505,6 +524,7 @@ mod tests { code, data: Some(data), timestamp: None, + chain_id, }; let mut bytes = vec![]; types_tx.encode(&mut bytes).expect("encoding failed"); diff --git a/core/src/types/chain.rs b/core/src/types/chain.rs index 7437793cfc..b14fdbbef2 100644 --- a/core/src/types/chain.rs +++ b/core/src/types/chain.rs @@ -192,6 +192,7 @@ pub const DEFAULT_CHAIN_ID: &str = "namada-internal.00000000000000"; Deserialize, BorshSerialize, BorshDeserialize, + BorshSchema, PartialOrd, Ord, PartialEq, @@ -199,7 +200,7 @@ pub const DEFAULT_CHAIN_ID: &str = "namada-internal.00000000000000"; Hash, )] #[serde(transparent)] -pub struct ChainId(String); +pub struct ChainId(pub String); impl ChainId { /// Extracts a string slice containing the entire chain ID. diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 7f76bf9e49..9a1404f865 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -11,6 +11,7 @@ pub mod decrypted_tx { use super::EllipticCurve; use crate::proto::Tx; + use crate::types::chain::ChainId; use crate::types::transaction::{Hash, TxType, WrapperTx}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] @@ -92,6 +93,7 @@ pub mod decrypted_tx { .try_to_vec() .expect("Encrypting transaction should not fail"), ), + ChainId("".to_string()), ) } } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 0e0a5e980e..a9141ae93e 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -210,6 +210,7 @@ pub mod tx_types { use super::*; use crate::proto::{SignedTxData, Tx}; + use crate::types::chain::ChainId; use crate::types::transaction::protocol::ProtocolTx; /// Errors relating to decrypting a wrapper tx and its @@ -241,7 +242,11 @@ pub mod tx_types { impl From for Tx { fn from(ty: TxType) -> Self { - Tx::new(vec![], Some(ty.try_to_vec().unwrap())) + Tx::new( + vec![], + Some(ty.try_to_vec().unwrap()), + ChainId("".to_string()), + ) } } @@ -296,12 +301,14 @@ pub mod tx_types { code: tx.code, data: Some(data.clone()), timestamp: tx.timestamp, + chain_id: tx.chain_id, } .hash(); match TxType::try_from(Tx { code: vec![], data: Some(data), timestamp: tx.timestamp, + chain_id: tx.chain_id, }) .map_err(|err| TxError::Deserialization(err.to_string()))? { @@ -355,7 +362,11 @@ pub mod tx_types { /// data and returns an identical copy #[test] fn test_process_tx_raw_tx_no_data() { - let tx = Tx::new("wasm code".as_bytes().to_owned(), None); + let tx = Tx::new( + "wasm code".as_bytes().to_owned(), + None, + ChainId("this chain".to_string()), + ); match process_tx(tx.clone()).expect("Test failed") { TxType::Raw(raw) => assert_eq!(tx, raw), @@ -371,6 +382,7 @@ pub mod tx_types { let inner = Tx::new( "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("this chain".to_string()), ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -379,6 +391,7 @@ pub mod tx_types { .try_to_vec() .expect("Test failed"), ), + inner.chain_id, ); match process_tx(tx).expect("Test failed") { @@ -394,6 +407,7 @@ pub mod tx_types { let inner = Tx::new( "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("this chain".to_string()), ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -402,6 +416,7 @@ pub mod tx_types { .try_to_vec() .expect("Test failed"), ), + inner.chain_id, ) .sign(&gen_keypair()); @@ -419,6 +434,7 @@ pub mod tx_types { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("this chain".to_string()), ); // the signed tx let wrapper = WrapperTx::new( @@ -456,6 +472,7 @@ pub mod tx_types { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("this chain".to_string()), ); // the signed tx let wrapper = WrapperTx::new( @@ -477,6 +494,7 @@ pub mod tx_types { Some( TxType::Wrapper(wrapper).try_to_vec().expect("Test failed"), ), + ChainId("this chain".to_string()), ); let result = process_tx(tx).expect_err("Test failed"); assert_matches!(result, TxError::Unsigned(_)); @@ -490,6 +508,7 @@ pub mod tx_types { let payload = Tx::new( "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("this chain".to_string()), ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -517,6 +536,7 @@ pub mod tx_types { let payload = Tx::new( "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("this chain".to_string()), ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -535,8 +555,11 @@ pub mod tx_types { sig: common::Signature::try_from_sig(&ed_sig).unwrap(), }; // create the tx with signed decrypted data - let tx = - Tx::new(vec![], Some(signed.try_to_vec().expect("Test failed"))); + let tx = Tx::new( + vec![], + Some(signed.try_to_vec().expect("Test failed")), + ChainId("this chain".to_string()), + ); match process_tx(tx).expect("Test failed") { TxType::Decrypted(DecryptedTx::Decrypted { tx: processed, diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index becc17941f..3139ee598e 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -33,6 +33,7 @@ mod protocol_txs { use super::*; use crate::proto::Tx; + use crate::types::chain::ChainId; use crate::types::key::*; use crate::types::transaction::{EllipticCurve, TxError, TxType}; @@ -87,6 +88,7 @@ mod protocol_txs { self, pk: &common::PublicKey, signing_key: &common::SecretKey, + chain_id: ChainId, ) -> Tx { Tx::new( vec![], @@ -98,6 +100,7 @@ mod protocol_txs { .try_to_vec() .expect("Could not serialize ProtocolTx"), ), + chain_id, ) .sign(signing_key) } @@ -108,6 +111,7 @@ mod protocol_txs { signing_key: &common::SecretKey, wasm_dir: &'a Path, wasm_loader: F, + chain_id: ChainId, ) -> Self where F: FnOnce(&'a str, &'static str) -> Vec, @@ -125,6 +129,7 @@ mod protocol_txs { data.try_to_vec() .expect("Serializing request should not fail"), ), + chain_id, ) .sign(signing_key), ) diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index a87b3e1fff..308663d1b8 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -13,6 +13,7 @@ pub mod wrapper_tx { use crate::proto::Tx; use crate::types::address::Address; + use crate::types::chain::ChainId; use crate::types::key::*; use crate::types::storage::Epoch; use crate::types::token::Amount; @@ -249,6 +250,7 @@ pub mod wrapper_tx { pub fn sign( &self, keypair: &common::SecretKey, + chain_id: ChainId, ) -> Result { if self.pk != keypair.ref_to() { return Err(WrapperTxErr::InvalidKeyPair); @@ -260,6 +262,7 @@ pub mod wrapper_tx { .try_to_vec() .expect("Could not serialize WrapperTx"), ), + chain_id, ) .sign(keypair)) } @@ -364,6 +367,7 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("This chain id".to_string()), ); let wrapper = WrapperTx::new( @@ -392,6 +396,7 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainId("This chain id".to_string()), ); let mut wrapper = WrapperTx::new( @@ -426,6 +431,7 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + ChainID("This Chain id".to_string()), ); // the signed tx let mut tx = WrapperTx::new( @@ -441,7 +447,7 @@ pub mod wrapper_tx { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, ChainId("This chain id".to_string())) .expect("Test failed"); // we now try to alter the inner tx maliciously @@ -459,8 +465,11 @@ pub mod wrapper_tx { .expect("Test failed"); // malicious transaction - let malicious = - Tx::new("Give me all the money".as_bytes().to_owned(), None); + let malicious = Tx::new( + "Give me all the money".as_bytes().to_owned(), + None, + ChainId("This chain id".to_string()), + ); // We replace the inner tx with a malicious one wrapper.inner_tx = EncryptedTx::encrypt( diff --git a/proto/types.proto b/proto/types.proto index 58494ec824..371416cff7 100644 --- a/proto/types.proto +++ b/proto/types.proto @@ -9,6 +9,7 @@ message Tx { // TODO this optional is useless because it's default on proto3 optional bytes data = 2; google.protobuf.Timestamp timestamp = 3; + string chain_id = 4; } message Dkg { string data = 1; } From a5a437fa9498a032f3ce17cd146841549dee3daf Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 31 Jan 2023 14:06:29 +0100 Subject: [PATCH 37/58] Adds chain id in `Tx` instantiations --- apps/src/lib/client/signing.rs | 2 +- apps/src/lib/client/tx.rs | 32 ++++++++++++-------- apps/src/lib/node/ledger/shell/governance.rs | 6 +++- core/src/proto/types.rs | 4 +-- core/src/types/transaction/mod.rs | 2 +- 5 files changed, 28 insertions(+), 18 deletions(-) diff --git a/apps/src/lib/client/signing.rs b/apps/src/lib/client/signing.rs index 9b1a00b987..5fb6a2410b 100644 --- a/apps/src/lib/client/signing.rs +++ b/apps/src/lib/client/signing.rs @@ -310,7 +310,7 @@ pub async fn sign_wrapper( let decrypted_hash = tx.tx_hash.to_string(); TxBroadcastData::Wrapper { tx: tx - .sign(keypair) + .sign(keypair, ctx.config.ledger.chain_id.clone()) .expect("Wrapper tx signing keypair should be correct"), wrapper_hash, decrypted_hash, diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 0014833ca8..8abcf33f02 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -106,7 +106,7 @@ pub async fn submit_custom(ctx: Context, args: args::TxCustom) { let data = args.data_path.map(|data_path| { std::fs::read(data_path).expect("Expected a file at given data path") }); - let tx = Tx::new(tx_code, data); + let tx = Tx::new(tx_code, data, ctx.config.ledger.chain_id.clone()); let (ctx, initialized_accounts) = process_tx( ctx, &args.tx, @@ -169,7 +169,7 @@ pub async fn submit_update_vp(ctx: Context, args: args::TxUpdateVp) { let data = UpdateVp { addr, vp_code }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); process_tx( ctx, &args.tx, @@ -202,7 +202,7 @@ pub async fn submit_init_account(mut ctx: Context, args: args::TxInitAccount) { }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); let (ctx, initialized_accounts) = process_tx( ctx, &args.tx, @@ -335,7 +335,7 @@ pub async fn submit_init_validator( validator_vp_code, }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); let (mut ctx, initialized_accounts) = process_tx( ctx, &tx_args, @@ -1677,7 +1677,7 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { .try_to_vec() .expect("Encoding tx data shouldn't fail"); let tx_code = ctx.read_wasm(TX_TRANSFER_WASM); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); let signing_address = TxSigningKey::WalletAddress(args.source.to_address()); process_tx( @@ -1797,7 +1797,7 @@ pub async fn submit_ibc_transfer(ctx: Context, args: args::TxIbcTransfer) { prost::Message::encode(&any_msg, &mut data) .expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); process_tx( ctx, &args.tx, @@ -1942,7 +1942,8 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { .try_to_vec() .expect("Encoding proposal data shouldn't fail"); let tx_code = ctx.read_wasm(TX_INIT_PROPOSAL); - let tx = Tx::new(tx_code, Some(data)); + let tx = + Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); process_tx( ctx, @@ -2082,7 +2083,11 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { .try_to_vec() .expect("Encoding proposal data shouldn't fail"); let tx_code = ctx.read_wasm(TX_VOTE_PROPOSAL); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + ); process_tx( ctx, @@ -2154,7 +2159,8 @@ pub async fn submit_reveal_pk_aux( .try_to_vec() .expect("Encoding a public key shouldn't fail"); let tx_code = ctx.read_wasm(TX_REVEAL_PK); - let tx = Tx::new(tx_code, Some(tx_data)); + let chain_id = ctx.config.ledger.chain_id.clone(); + let tx = Tx::new(tx_code, Some(tx_data), chain_id); // submit_tx without signing the inner tx let keypair = if let Some(signing_key) = &args.signing_key { @@ -2357,7 +2363,7 @@ pub async fn submit_bond(ctx: Context, args: args::Bond) { }; let data = bond.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); let default_signer = args.source.unwrap_or(args.validator); process_tx( ctx, @@ -2412,7 +2418,7 @@ pub async fn submit_unbond(ctx: Context, args: args::Unbond) { let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); let tx_code = ctx.read_wasm(TX_UNBOND_WASM); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); let default_signer = args.source.unwrap_or(args.validator); let (_ctx, _) = process_tx( ctx, @@ -2477,7 +2483,7 @@ pub async fn submit_withdraw(ctx: Context, args: args::Withdraw) { let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); let tx_code = ctx.read_wasm(TX_WITHDRAW_WASM); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); let default_signer = args.source.unwrap_or(args.validator); process_tx( ctx, @@ -2563,7 +2569,7 @@ pub async fn submit_validator_commission_change( }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data)); + let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); let default_signer = args.validator; process_tx( ctx, diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index a27814029d..330410e0b9 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -73,7 +73,11 @@ where shell.read_storage_key_bytes(&proposal_code_key); match proposal_code { Some(proposal_code) => { - let tx = Tx::new(proposal_code, Some(encode(&id))); + let tx = Tx::new( + proposal_code, + Some(encode(&id)), + shell.chain_id.clone(), + ); let tx_type = TxType::Decrypted(DecryptedTx::Decrypted { tx, diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 538737f2c9..d62ddd776b 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -189,7 +189,7 @@ impl SigningTx { code_hash: self.code_hash, data, timestamp: self.timestamp, - chain_id: self.chain_id, + chain_id: self.chain_id.clone(), }; let signed_data = tx.hash(); common::SigScheme::verify_signature_raw(pk, &signed_data, sig) @@ -389,7 +389,7 @@ impl Tx { code: self.code.clone(), data: signed_data.data, timestamp: self.timestamp, - chain_id: self.chain_id, + chain_id: self.chain_id.clone(), }; unsigned_tx.hash() } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index a9141ae93e..e78c00cda1 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -301,7 +301,7 @@ pub mod tx_types { code: tx.code, data: Some(data.clone()), timestamp: tx.timestamp, - chain_id: tx.chain_id, + chain_id: tx.chain_id.clone(), } .hash(); match TxType::try_from(Tx { From b96dd58e6222b8c066140eb6c2cf56ae716d4ff0 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 31 Jan 2023 15:38:37 +0100 Subject: [PATCH 38/58] Adds tx `chain_id` in tests --- .../lib/node/ledger/shell/finalize_block.rs | 13 +- apps/src/lib/node/ledger/shell/mod.rs | 19 +- .../lib/node/ledger/shell/prepare_proposal.rs | 8 +- .../lib/node/ledger/shell/process_proposal.rs | 41 ++- core/src/proto/mod.rs | 3 + core/src/proto/types.rs | 4 +- core/src/types/transaction/mod.rs | 26 +- core/src/types/transaction/wrapper.rs | 10 +- shared/src/ledger/ibc/vp/mod.rs | 282 +++++++----------- shared/src/ledger/queries/shell.rs | 3 +- shared/src/vm/wasm/run.rs | 17 +- tests/src/vm_host_env/mod.rs | 71 +++-- tests/src/vm_host_env/tx.rs | 12 +- tests/src/vm_host_env/vp.rs | 12 +- 14 files changed, 268 insertions(+), 253 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 944bc6fd4c..ebdc289b22 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -563,6 +563,7 @@ mod test_finalize_block { let raw_tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -577,7 +578,9 @@ mod test_finalize_block { #[cfg(not(feature = "mainnet"))] None, ); - let tx = wrapper.sign(&keypair).expect("Test failed"); + let tx = wrapper + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); if i > 1 { processed_txs.push(ProcessedTx { tx: tx.to_bytes(), @@ -636,6 +639,7 @@ mod test_finalize_block { let raw_tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(String::from("transaction data").as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -773,6 +777,7 @@ mod test_finalize_block { .as_bytes() .to_owned(), ), + shell.chain_id.clone(), ); let wrapper_tx = WrapperTx::new( Fee { @@ -810,6 +815,7 @@ mod test_finalize_block { .as_bytes() .to_owned(), ), + shell.chain_id.clone(), ); let wrapper_tx = WrapperTx::new( Fee { @@ -824,7 +830,9 @@ mod test_finalize_block { #[cfg(not(feature = "mainnet"))] None, ); - let wrapper = wrapper_tx.sign(&keypair).expect("Test failed"); + let wrapper = wrapper_tx + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); valid_txs.push(wrapper_tx); processed_txs.push(ProcessedTx { tx: wrapper.to_bytes(), @@ -988,6 +996,7 @@ mod test_finalize_block { let raw_tx = Tx::new( tx_code, Some("Encrypted transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper_tx = WrapperTx::new( Fee { diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 3d7a694901..887aaeb1da 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -1064,6 +1064,7 @@ mod test_utils { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -1150,6 +1151,7 @@ mod test_mempool_validate { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let mut wrapper = WrapperTx::new( @@ -1165,7 +1167,7 @@ mod test_mempool_validate { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone()) .expect("Wrapper signing failed"); let unsigned_wrapper = if let Some(Ok(SignedTxData { @@ -1176,7 +1178,7 @@ mod test_mempool_validate { .take() .map(|data| SignedTxData::try_from_slice(&data[..])) { - Tx::new(vec![], Some(data)) + Tx::new(vec![], Some(data), shell.chain_id.clone()) } else { panic!("Test failed") }; @@ -1203,6 +1205,7 @@ mod test_mempool_validate { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let mut wrapper = WrapperTx::new( @@ -1218,7 +1221,7 @@ mod test_mempool_validate { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone()) .expect("Wrapper signing failed"); let invalid_wrapper = if let Some(Ok(SignedTxData { @@ -1253,6 +1256,7 @@ mod test_mempool_validate { .try_to_vec() .expect("Test failed"), ), + shell.chain_id.clone(), ) } else { panic!("Test failed"); @@ -1276,7 +1280,11 @@ mod test_mempool_validate { let (shell, _) = TestShell::new(); // Test Raw TxType - let tx = Tx::new("wasm_code".as_bytes().to_owned(), None); + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + None, + shell.chain_id.clone(), + ); let result = shell.mempool_validate( tx.to_bytes().as_ref(), @@ -1297,6 +1305,7 @@ mod test_mempool_validate { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( @@ -1312,7 +1321,7 @@ mod test_mempool_validate { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone()) .expect("Wrapper signing failed"); let tx_type = match process_tx(wrapper.clone()).expect("Test failed") { diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 1783a127fd..6231a8ac0f 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -192,6 +192,7 @@ mod test_prepare_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let req = RequestPrepareProposal { txs: vec![tx.to_bytes()], @@ -217,6 +218,7 @@ mod test_prepare_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), + shell.chain_id.clone(), ); // an unsigned wrapper will cause an error in processing let wrapper = Tx::new( @@ -238,6 +240,7 @@ mod test_prepare_proposal { .try_to_vec() .expect("Test failed"), ), + shell.chain_id.clone(), ) .to_bytes(); #[allow(clippy::redundant_clone)] @@ -276,6 +279,7 @@ mod test_prepare_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), + shell.chain_id.clone(), ); expected_decrypted.push(Tx::from(DecryptedTx::Decrypted { tx: tx.clone(), @@ -295,7 +299,9 @@ mod test_prepare_proposal { #[cfg(not(feature = "mainnet"))] None, ); - let wrapper = wrapper_tx.sign(&keypair).expect("Test failed"); + let wrapper = wrapper_tx + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); shell.enqueue_tx(wrapper_tx); expected_wrapper.push(wrapper.clone()); req.txs.push(wrapper.to_bytes()); diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 5a3a2ebc59..c37da58f12 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -320,6 +320,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -337,6 +338,7 @@ mod test_process_proposal { let tx = Tx::new( vec![], Some(TxType::Wrapper(wrapper).try_to_vec().expect("Test failed")), + shell.chain_id.clone(), ) .to_bytes(); #[allow(clippy::redundant_clone)] @@ -368,6 +370,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let timestamp = tx.timestamp; let mut wrapper = WrapperTx::new( @@ -383,7 +386,7 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone()) .expect("Test failed"); let new_tx = if let Some(Ok(SignedTxData { data: Some(data), @@ -418,6 +421,7 @@ mod test_process_proposal { .expect("Test failed"), ), timestamp, + chain_id: shell.chain_id.clone(), } } else { panic!("Test failed"); @@ -454,6 +458,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -468,7 +473,7 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone()) .expect("Test failed"); let request = ProcessProposal { txs: vec![wrapper.to_bytes()], @@ -512,6 +517,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -526,7 +532,7 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, shell.chain_id.clone()) .expect("Test failed"); let request = ProcessProposal { @@ -562,6 +568,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -632,6 +639,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -693,6 +701,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let mut wrapper = WrapperTx::new( Fee { @@ -792,6 +801,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let tx = Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { @@ -829,6 +839,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let tx = Tx::from(TxType::Raw(tx)); let request = ProcessProposal { @@ -864,6 +875,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -878,7 +890,9 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ); - let signed = wrapper.sign(&keypair).expect("Test failed"); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); // Write wrapper hash to storage let wrapper_unsigned_hash = Hash(signed.unsigned_hash()); @@ -934,6 +948,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -948,7 +963,9 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ); - let signed = wrapper.sign(&keypair).expect("Test failed"); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); // Run validation let request = ProcessProposal { @@ -988,6 +1005,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -1003,7 +1021,9 @@ mod test_process_proposal { None, ); let inner_unsigned_hash = wrapper.tx_hash.clone(); - let signed = wrapper.sign(&keypair).expect("Test failed"); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); // Write inner hash to storage let hash_key = replay_protection::get_tx_hash_key(&inner_unsigned_hash); @@ -1070,6 +1090,7 @@ mod test_process_proposal { let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), ); let wrapper = WrapperTx::new( Fee { @@ -1085,7 +1106,9 @@ mod test_process_proposal { None, ); let inner_unsigned_hash = wrapper.tx_hash.clone(); - let signed = wrapper.sign(&keypair).expect("Test failed"); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); let new_wrapper = WrapperTx::new( Fee { @@ -1100,7 +1123,9 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ); - let new_signed = new_wrapper.sign(&keypair).expect("Test failed"); + let new_signed = new_wrapper + .sign(&keypair, shell.chain_id.clone()) + .expect("Test failed"); // Run validation let request = ProcessProposal { diff --git a/core/src/proto/mod.rs b/core/src/proto/mod.rs index 3271037595..ae1b111588 100644 --- a/core/src/proto/mod.rs +++ b/core/src/proto/mod.rs @@ -11,6 +11,8 @@ mod tests { use generated::types::Tx; use prost::Message; + use crate::types::chain::ChainId; + use super::*; #[test] @@ -19,6 +21,7 @@ mod tests { code: "wasm code".as_bytes().to_owned(), data: Some("arbitrary data".as_bytes().to_owned()), timestamp: Some(std::time::SystemTime::now().into()), + chain_id: ChainId::default().0, }; let mut tx_bytes = vec![]; tx.encode(&mut tx_bytes).unwrap(); diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index d62ddd776b..657083b924 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -512,7 +512,7 @@ mod tests { fn test_tx() { let code = "wasm code".as_bytes().to_owned(); let data = "arbitrary data".as_bytes().to_owned(); - let chain_id = ChainId("This chain".to_string()); + let chain_id = ChainId::default(); let tx = Tx::new(code.clone(), Some(data.clone()), chain_id.clone()); let bytes = tx.to_bytes(); @@ -524,7 +524,7 @@ mod tests { code, data: Some(data), timestamp: None, - chain_id, + chain_id: chain_id.0, }; let mut bytes = vec![]; types_tx.encode(&mut bytes).expect("encoding failed"); diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index e78c00cda1..ff90bc95c2 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -245,7 +245,7 @@ pub mod tx_types { Tx::new( vec![], Some(ty.try_to_vec().unwrap()), - ChainId("".to_string()), + ChainId("".to_string()), //FIXME: leave this empty? ) } } @@ -365,7 +365,7 @@ pub mod tx_types { let tx = Tx::new( "wasm code".as_bytes().to_owned(), None, - ChainId("this chain".to_string()), + ChainId::default(), ); match process_tx(tx.clone()).expect("Test failed") { @@ -382,7 +382,7 @@ pub mod tx_types { let inner = Tx::new( "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("this chain".to_string()), + ChainId::default(), ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -391,7 +391,7 @@ pub mod tx_types { .try_to_vec() .expect("Test failed"), ), - inner.chain_id, + inner.chain_id.clone(), ); match process_tx(tx).expect("Test failed") { @@ -407,7 +407,7 @@ pub mod tx_types { let inner = Tx::new( "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("this chain".to_string()), + ChainId::default(), ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -416,7 +416,7 @@ pub mod tx_types { .try_to_vec() .expect("Test failed"), ), - inner.chain_id, + inner.chain_id.clone(), ) .sign(&gen_keypair()); @@ -434,7 +434,7 @@ pub mod tx_types { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("this chain".to_string()), + ChainId::default(), ); // the signed tx let wrapper = WrapperTx::new( @@ -450,7 +450,7 @@ pub mod tx_types { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair) + .sign(&keypair, tx.chain_id.clone()) .expect("Test failed"); match process_tx(wrapper).expect("Test failed") { @@ -472,7 +472,7 @@ pub mod tx_types { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("this chain".to_string()), + ChainId::default(), ); // the signed tx let wrapper = WrapperTx::new( @@ -494,7 +494,7 @@ pub mod tx_types { Some( TxType::Wrapper(wrapper).try_to_vec().expect("Test failed"), ), - ChainId("this chain".to_string()), + ChainId::default(), ); let result = process_tx(tx).expect_err("Test failed"); assert_matches!(result, TxError::Unsigned(_)); @@ -508,7 +508,7 @@ pub mod tx_types { let payload = Tx::new( "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("this chain".to_string()), + ChainId::default(), ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -536,7 +536,7 @@ pub mod tx_types { let payload = Tx::new( "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("this chain".to_string()), + ChainId::default(), ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -558,7 +558,7 @@ pub mod tx_types { let tx = Tx::new( vec![], Some(signed.try_to_vec().expect("Test failed")), - ChainId("this chain".to_string()), + ChainId::default(), ); match process_tx(tx).expect("Test failed") { TxType::Decrypted(DecryptedTx::Decrypted { diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index 308663d1b8..555186f221 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -367,7 +367,7 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("This chain id".to_string()), + ChainId::default(), ); let wrapper = WrapperTx::new( @@ -396,7 +396,7 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainId("This chain id".to_string()), + ChainId::default(), ); let mut wrapper = WrapperTx::new( @@ -431,7 +431,7 @@ pub mod wrapper_tx { let tx = Tx::new( "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), - ChainID("This Chain id".to_string()), + ChainId::default(), ); // the signed tx let mut tx = WrapperTx::new( @@ -447,7 +447,7 @@ pub mod wrapper_tx { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, ChainId("This chain id".to_string())) + .sign(&keypair, ChainId::default()) .expect("Test failed"); // we now try to alter the inner tx maliciously @@ -468,7 +468,7 @@ pub mod wrapper_tx { let malicious = Tx::new( "Give me all the money".as_bytes().to_owned(), None, - ChainId("This chain id".to_string()), + ChainId::default(), ); // We replace the inner tx with a malicious one diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index 108942b548..b3cfe870b6 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -359,16 +359,6 @@ mod tests { use crate::tendermint::time::Time as TmTime; use crate::tendermint_proto::Protobuf; - use super::get_dummy_header; - use namada_core::ledger::ibc::actions::{ - self, commitment_prefix, init_connection, make_create_client_event, - make_open_ack_channel_event, make_open_ack_connection_event, - make_open_confirm_channel_event, make_open_confirm_connection_event, - make_open_init_channel_event, make_open_init_connection_event, - make_open_try_channel_event, make_open_try_connection_event, - make_send_packet_event, make_update_client_event, packet_from_message, - try_connection, - }; use super::super::storage::{ ack_key, capability_key, channel_key, client_state_key, client_type_key, client_update_height_key, client_update_timestamp_key, @@ -376,16 +366,26 @@ mod tests { next_sequence_ack_key, next_sequence_recv_key, next_sequence_send_key, port_key, receipt_key, }; + use super::get_dummy_header; use super::*; - use crate::types::key::testing::keypair_1; use crate::ledger::gas::VpGasMeter; use crate::ledger::storage::testing::TestStorage; use crate::ledger::storage::write_log::WriteLog; use crate::proto::Tx; use crate::types::ibc::data::{PacketAck, PacketReceipt}; - use crate::vm::wasm; + use crate::types::key::testing::keypair_1; use crate::types::storage::TxIndex; use crate::types::storage::{BlockHash, BlockHeight}; + use crate::vm::wasm; + use namada_core::ledger::ibc::actions::{ + self, commitment_prefix, init_connection, make_create_client_event, + make_open_ack_channel_event, make_open_ack_connection_event, + make_open_confirm_channel_event, make_open_confirm_connection_event, + make_open_init_channel_event, make_open_init_connection_event, + make_open_try_channel_event, make_open_try_connection_event, + make_send_packet_event, make_update_client_event, packet_from_message, + try_connection, + }; const ADDRESS: Address = Address::Internal(InternalAddress::Ibc); @@ -602,7 +602,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -624,14 +625,9 @@ mod tests { let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -641,7 +637,8 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -722,7 +719,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -744,14 +742,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -781,7 +774,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -803,14 +797,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -837,7 +826,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -919,7 +909,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -941,14 +932,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1007,7 +993,8 @@ mod tests { let tx_index = TxIndex::default(); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1028,14 +1015,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1082,7 +1064,8 @@ mod tests { let tx_index = TxIndex::default(); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1103,14 +1086,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1143,7 +1121,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1164,14 +1143,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1223,7 +1197,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1244,14 +1219,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1311,7 +1281,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1332,14 +1303,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1396,7 +1362,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1417,14 +1384,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1436,7 +1398,8 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1457,14 +1420,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1477,7 +1435,8 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1500,14 +1459,9 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1561,7 +1515,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1582,14 +1537,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1650,7 +1600,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1671,14 +1622,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1744,7 +1690,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1765,14 +1712,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1830,7 +1772,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1851,14 +1794,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1923,7 +1861,8 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1944,14 +1883,9 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1972,7 +1906,8 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data)).sign(&keypair_1()); + let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1993,13 +1928,8 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } } diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 43a51a1b0f..3a799a5c71 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -387,7 +387,8 @@ mod test { // Request dry run tx let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); - let tx = Tx::new(tx_no_op, None); + let tx = + Tx::new(tx_no_op, None, client.wl_storage.storage.chain_id.clone()); let tx_bytes = tx.to_bytes(); let result = RPC .shell() diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 0efdac499c..2de7bfae99 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -409,6 +409,7 @@ fn get_gas_rules() -> rules::Set { mod tests { use borsh::BorshSerialize; use itertools::Either; + use namada_core::types::chain::ChainId; use test_log::test; use wasmer_vm::TrapCode; @@ -550,7 +551,7 @@ mod tests { input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // When the `eval`ed VP doesn't run out of memory, it should return // `true` @@ -579,7 +580,7 @@ mod tests { input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); // When the `eval`ed VP runs out of memory, its result should be // `false`, hence we should also get back `false` from the VP that // called `eval`. @@ -624,7 +625,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail let tx_data = 2_usize.pow(23).try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( vp_code.clone(), @@ -645,7 +646,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail let tx_data = 2_usize.pow(24).try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); let error = vp( vp_code, &tx, @@ -738,7 +739,7 @@ mod tests { // limit and should fail let len = 2_usize.pow(24); let tx_data: Vec = vec![6_u8; len]; - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( vp_code, @@ -847,7 +848,7 @@ mod tests { // Borsh. storage.write(&key, value.try_to_vec().unwrap()).unwrap(); let tx_data = key.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let error = vp( vp_read_key, @@ -905,7 +906,7 @@ mod tests { input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data)); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let passed = vp( vp_eval, @@ -1010,7 +1011,7 @@ mod tests { ) .expect("unexpected error converting wat2wasm").into_owned(); - let tx = Tx::new(vec![], None); + let tx = Tx::new(vec![], None, ChainId::default()); let tx_index = TxIndex::default(); let mut storage = TestStorage::default(); let addr = storage.address_gen.generate_address("rng seed"); diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 04a545e8b1..4f6aa62f00 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -34,7 +34,7 @@ mod tests { use namada::types::storage::{self, BlockHash, BlockHeight, Key, KeySeg}; use namada::types::time::DateTimeUtc; use namada::types::token::{self, Amount}; - use namada::types::{address, key}; + use namada::types::{address, chain::ChainId, key}; use namada_tx_prelude::{ BorshDeserialize, BorshSerialize, StorageRead, StorageWrite, }; @@ -134,13 +134,11 @@ mod tests { // Trying to delete a validity predicate should fail let key = storage::Key::validity_predicate(&test_account); - assert!( - panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) - .err() - .map(|a| a.downcast_ref::().cloned().unwrap()) - .unwrap() - .contains("CannotDeleteVp") - ); + assert!(panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("CannotDeleteVp")); } #[test] @@ -452,28 +450,29 @@ mod tests { None, ] { let signed_tx_data = vp_host_env::with(|env| { - env.tx = Tx::new(code.clone(), data.clone()).sign(&keypair); + env.tx = Tx::new( + code.clone(), + data.clone(), + env.wl_storage.storage.chain_id.clone(), + ) + .sign(&keypair); let tx_data = env.tx.data.as_ref().expect("data should exist"); SignedTxData::try_from_slice(&tx_data[..]) .expect("decoding signed data we just signed") }); assert_eq!(&signed_tx_data.data, data); - assert!( - vp::CTX - .verify_tx_signature(&pk, &signed_tx_data.sig) - .unwrap() - ); + assert!(vp::CTX + .verify_tx_signature(&pk, &signed_tx_data.sig) + .unwrap()); let other_keypair = key::testing::keypair_2(); - assert!( - !vp::CTX - .verify_tx_signature( - &other_keypair.ref_to(), - &signed_tx_data.sig - ) - .unwrap() - ); + assert!(!vp::CTX + .verify_tx_signature( + &other_keypair.ref_to(), + &signed_tx_data.sig + ) + .unwrap()); } } @@ -561,6 +560,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // get and increment the connection counter @@ -598,6 +598,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); @@ -635,6 +636,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // get and update the client without a header @@ -680,6 +682,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // update the client with the message @@ -713,6 +716,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // upgrade the client with the message @@ -754,6 +758,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // get and increment the connection counter @@ -791,6 +796,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // init a connection with the message @@ -820,6 +826,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // open the connection with the message @@ -859,6 +866,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // open try a connection with the message @@ -889,6 +897,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // open the connection with the mssage @@ -933,6 +942,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // not bind a port @@ -974,6 +984,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // bind a port @@ -1018,6 +1029,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // init a channel with the message @@ -1042,6 +1054,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // open the channle with the message @@ -1083,6 +1096,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // try open a channel with the message @@ -1108,6 +1122,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // open a channel with the message @@ -1151,6 +1166,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // close the channel with the message @@ -1194,6 +1210,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); @@ -1242,6 +1259,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // send the token and a packet with the data @@ -1282,6 +1300,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // ack the packet with the message @@ -1334,6 +1353,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // send the token and a packet with the data @@ -1402,6 +1422,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // receive a packet with the message @@ -1485,6 +1506,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // receive a packet with the message @@ -1535,6 +1557,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // send a packet with the message @@ -1564,6 +1587,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // ack the packet with the message @@ -1618,6 +1642,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); // receive a packet with the message @@ -1683,6 +1708,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); @@ -1758,6 +1784,7 @@ mod tests { code: vec![], data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), + chain_id: ChainId::default(), } .sign(&key::testing::keypair_1()); diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 6c3ccd55ae..5b3efbf762 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -62,11 +62,13 @@ impl Default for TestTxEnv { let (tx_wasm_cache, tx_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let wl_storage = WlStorage { + storage: TestStorage::default(), + write_log: WriteLog::default(), + }; + let chain_id = wl_storage.storage.chain_id.clone(); Self { - wl_storage: WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }, + wl_storage: wl_storage, iterators: PrefixIterators::default(), gas_meter: BlockGasMeter::default(), tx_index: TxIndex::default(), @@ -76,7 +78,7 @@ impl Default for TestTxEnv { vp_cache_dir, tx_wasm_cache, tx_cache_dir, - tx: Tx::new(vec![], None), + tx: Tx::new(vec![], None, chain_id), } } } diff --git a/tests/src/vm_host_env/vp.rs b/tests/src/vm_host_env/vp.rs index 4d5bbf3dde..f736a158dc 100644 --- a/tests/src/vm_host_env/vp.rs +++ b/tests/src/vm_host_env/vp.rs @@ -64,15 +64,17 @@ impl Default for TestVpEnv { let (vp_wasm_cache, vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); + let wl_storage = WlStorage { + storage: TestStorage::default(), + write_log: WriteLog::default(), + }; + let chain_id = wl_storage.storage.chain_id.clone(); Self { addr: address::testing::established_address_1(), - wl_storage: WlStorage { - storage: TestStorage::default(), - write_log: WriteLog::default(), - }, + wl_storage: wl_storage, iterators: PrefixIterators::default(), gas_meter: VpGasMeter::default(), - tx: Tx::new(vec![], None), + tx: Tx::new(vec![], None, chain_id), tx_index: TxIndex::default(), keys_changed: BTreeSet::default(), verifiers: BTreeSet::default(), From c6c242600dabd7fea514908a47fbd30391f8fd09 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 31 Jan 2023 17:03:12 +0100 Subject: [PATCH 39/58] Validates tx `ChainId` --- apps/src/lib/node/ledger/shell/mod.rs | 13 +++++ .../lib/node/ledger/shell/process_proposal.rs | 49 ++++++++++++++----- core/src/types/transaction/mod.rs | 2 +- 3 files changed, 50 insertions(+), 14 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 887aaeb1da..3ef49368d8 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -130,6 +130,7 @@ pub enum ErrorCodes { ExtraTxs = 5, Undecryptable = 6, ReplayTx = 7, + InvalidChainId = 8, } impl From for u32 { @@ -585,6 +586,7 @@ where /// 1: Invalid tx /// 2: Tx is invalidly signed /// 7: Replay attack + /// 8: Invalid chain id in tx pub fn mempool_validate( &self, tx_bytes: &[u8], @@ -602,7 +604,18 @@ where } }; + // Tx chain id + if tx.chain_id != self.chain_id { + response.code = ErrorCodes::InvalidChainId.into(); + response.log = format!( + "Tx carries a wrong chain id: expected {}, found {}", + self.chain_id, tx.chain_id + ); + return response; + } + // Tx signature check + // FIXME: merge this first 3 checks in process_tx? let tx_type = match process_tx(tx) { Ok(ty) => ty, Err(msg) => { diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index c37da58f12..8d8382e77d 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -103,6 +103,7 @@ where tx_queue_iter: &mut impl Iterator, temp_wl_storage: &mut TempWlStorage, ) -> TxResult { + //FIXME: move these first two checks inside process_tx? let tx = match Tx::try_from(tx_bytes) { Ok(tx) => tx, Err(_) => { @@ -113,6 +114,17 @@ where }; } }; + + if tx.chain_id != self.chain_id { + return TxResult { + code: ErrorCodes::InvalidChainId.into(), + info: format!( + "Tx carries a wrong chain id: expected {}, found {}", + self.chain_id, tx.chain_id + ), + }; + } + // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); @@ -557,6 +569,8 @@ mod test_process_proposal { } } + // FIXME: add unit tests for chain id both here and in mempool_validate + /// Test that if the expected order of decrypted txs is /// validated, [`process_proposal`] rejects it #[test] @@ -584,11 +598,14 @@ mod test_process_proposal { None, ); shell.enqueue_tx(wrapper); - txs.push(Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { - tx, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: false, - }))); + let mut decrypted_tx = + Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { + tx, + #[cfg(not(feature = "mainnet"))] + has_valid_pow: false, + })); + decrypted_tx.chain_id = shell.chain_id.clone(); + txs.push(decrypted_tx); } let req_1 = ProcessProposal { txs: vec![txs[0].to_bytes()], @@ -656,8 +673,9 @@ mod test_process_proposal { ); shell.enqueue_tx(wrapper.clone()); - let tx = + let mut tx = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable(wrapper))); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], @@ -719,10 +737,11 @@ mod test_process_proposal { wrapper.tx_hash = Hash([0; 32]); shell.enqueue_tx(wrapper.clone()); - let tx = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( + let mut tx = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( #[allow(clippy::redundant_clone)] wrapper.clone(), ))); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], @@ -773,10 +792,12 @@ mod test_process_proposal { }; shell.enqueue_tx(wrapper.clone()); - let signed = Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( - #[allow(clippy::redundant_clone)] - wrapper.clone(), - ))); + let mut signed = + Tx::from(TxType::Decrypted(DecryptedTx::Undecryptable( + #[allow(clippy::redundant_clone)] + wrapper.clone(), + ))); + signed.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![signed.to_bytes()], }; @@ -804,11 +825,12 @@ mod test_process_proposal { shell.chain_id.clone(), ); - let tx = Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { + let mut tx = Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: false, })); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], @@ -841,7 +863,8 @@ mod test_process_proposal { Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), ); - let tx = Tx::from(TxType::Raw(tx)); + let mut tx = Tx::from(TxType::Raw(tx)); + tx.chain_id = shell.chain_id.clone(); let request = ProcessProposal { txs: vec![tx.to_bytes()], }; diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index ff90bc95c2..a1c80fda25 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -245,7 +245,7 @@ pub mod tx_types { Tx::new( vec![], Some(ty.try_to_vec().unwrap()), - ChainId("".to_string()), //FIXME: leave this empty? + ChainId("".to_string()), //FIXME: leave this empty? New method to provide a chain id? ) } } From 5a626c4f18d6605b596e2d99d51ced5869d2ee85 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 1 Feb 2023 14:08:07 +0100 Subject: [PATCH 40/58] Adjusts tx chain id check --- .../lib/node/ledger/shell/process_proposal.rs | 60 ++++++++++++++----- core/src/types/transaction/decrypted.rs | 14 ++++- core/src/types/transaction/mod.rs | 2 +- 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 8d8382e77d..a2729fdfa9 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -115,15 +115,7 @@ where } }; - if tx.chain_id != self.chain_id { - return TxResult { - code: ErrorCodes::InvalidChainId.into(), - info: format!( - "Tx carries a wrong chain id: expected {}, found {}", - self.chain_id, tx.chain_id - ), - }; - } + let tx_chain_id = tx.chain_id.clone(); // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); @@ -142,12 +134,24 @@ where are not supported" .into(), }, - TxType::Protocol(_) => TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "Protocol transactions are a fun new feature that \ + TxType::Protocol(_) => { + if tx_chain_id != self.chain_id { + return TxResult { + code: ErrorCodes::InvalidChainId.into(), + info: format!( + "Tx carries a wrong chain id: expected {}, found {}", + self.chain_id, tx_chain_id + ), + }; + } + TxResult { + code: ErrorCodes::InvalidTx.into(), + info: + "Protocol transactions are a fun new feature that \ is coming soon to a blockchain near you. Patience." - .into(), - }, + .into(), + } + } TxType::Decrypted(tx) => { match tx_queue_iter.next() { Some(wrapper) => { @@ -161,6 +165,23 @@ where .into(), } } else if verify_decrypted_correctly(&tx, privkey) { + if let DecryptedTx::Decrypted { + tx, + has_valid_pow: _, + } = tx + { + if tx.chain_id != self.chain_id { + return TxResult { + code: ErrorCodes::InvalidChainId + .into(), + info: format!( + "Tx carries a wrong chain id: expected {}, found {}", + self.chain_id, tx.chain_id + ), + }; + } + } + TxResult { code: ErrorCodes::Ok.into(), info: "Process Proposal accepted this \ @@ -186,6 +207,17 @@ where } } TxType::Wrapper(wrapper) => { + // ChainId check + if tx_chain_id != self.chain_id { + return TxResult { + code: ErrorCodes::InvalidChainId.into(), + info: format!( + "Tx carries a wrong chain id: expected {}, found {}", + self.chain_id, tx_chain_id + ), + }; + } + // validate the ciphertext via Ferveo if !wrapper.validate_ciphertext() { TxResult { diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 9a1404f865..c6d547cd85 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -86,6 +86,18 @@ pub mod decrypted_tx { impl From for Tx { fn from(decrypted: DecryptedTx) -> Self { + let chain_id = match &decrypted { + DecryptedTx::Decrypted { + tx, + has_valid_pow: _, + } => tx.chain_id.to_owned(), + // If undecrytable we cannot extract the ChainId. The ChainId for the + // wrapper has already been checked previously and the inner transaction + // will fail because undecryptable. Here we simply put an empty string + // as a placeholder + DecryptedTx::Undecryptable(_) => ChainId(String::new()), + }; + Tx::new( vec![], Some( @@ -93,7 +105,7 @@ pub mod decrypted_tx { .try_to_vec() .expect("Encrypting transaction should not fail"), ), - ChainId("".to_string()), + chain_id, ) } } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index a1c80fda25..95895bef00 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -245,7 +245,7 @@ pub mod tx_types { Tx::new( vec![], Some(ty.try_to_vec().unwrap()), - ChainId("".to_string()), //FIXME: leave this empty? New method to provide a chain id? + ChainId(String::new()), // No need to provide a valid ChainId when casting back from TxType ) } } From 7e9643feb91375750bc790481d15fcaf49826663 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 1 Feb 2023 17:41:43 +0100 Subject: [PATCH 41/58] Unit tests for tx chain id --- apps/src/lib/node/ledger/shell/mod.rs | 30 +++++- .../lib/node/ledger/shell/process_proposal.rs | 98 ++++++++++++++++++- 2 files changed, 124 insertions(+), 4 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 3ef49368d8..32e8332245 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -615,7 +615,6 @@ where } // Tx signature check - // FIXME: merge this first 3 checks in process_tx? let tx_type = match process_tx(tx) { Ok(ty) => ty, Err(msg) => { @@ -1417,4 +1416,33 @@ mod test_mempool_validate { ) ) } + + /// Check that a transaction with a wrong chain id gets discarded + #[test] + fn test_wrong_chain_id() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let wrong_chain_id = ChainId("Wrong chain id".to_string()); + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + wrong_chain_id.clone(), + ) + .sign(&keypair); + + let result = shell.mempool_validate( + tx.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidChainId)); + assert_eq!( + result.log, + format!( + "Tx carries a wrong chain id: expected {}, found {}", + shell.chain_id, wrong_chain_id + ) + ) + } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index a2729fdfa9..4a1ba4c285 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -103,7 +103,6 @@ where tx_queue_iter: &mut impl Iterator, temp_wl_storage: &mut TempWlStorage, ) -> TxResult { - //FIXME: move these first two checks inside process_tx? let tx = match Tx::try_from(tx_bytes) { Ok(tx) => tx, Err(_) => { @@ -346,6 +345,7 @@ mod test_process_proposal { use namada::types::storage::Epoch; use namada::types::token::Amount; use namada::types::transaction::encrypted::EncryptedTx; + use namada::types::transaction::protocol::ProtocolTxType; use namada::types::transaction::{EncryptionKey, Fee, WrapperTx}; use super::*; @@ -601,8 +601,6 @@ mod test_process_proposal { } } - // FIXME: add unit tests for chain id both here and in mempool_validate - /// Test that if the expected order of decrypted txs is /// validated, [`process_proposal`] rejects it #[test] @@ -1205,4 +1203,98 @@ mod test_process_proposal { } } } + + /// Test that a transaction with a mismatching chain id causes the entire + /// block to be rejected + #[test] + fn test_wong_chain_id() { + let (mut shell, _) = TestShell::new(); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrong_chain_id = ChainId("Wrong chain id".to_string()); + let signed = wrapper + .sign(&keypair, wrong_chain_id.clone()) + .expect("Test failed"); + + let protocol_tx = ProtocolTxType::EthereumStateUpdate(tx.clone()).sign( + &keypair.ref_to(), + &keypair, + wrong_chain_id.clone(), + ); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("new transaction data".as_bytes().to_owned()), + wrong_chain_id.clone(), + ); + let decrypted: Tx = DecryptedTx::Decrypted { + tx: tx.clone(), + has_valid_pow: false, + } + .into(); + let signed_decrypted = decrypted.sign(&keypair); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrapper_in_queue = WrapperTxInQueue { + tx: wrapper, + has_valid_pow: false, + }; + shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); + + // Run validation + let request = ProcessProposal { + txs: vec![ + signed.to_bytes(), + protocol_tx.to_bytes(), + signed_decrypted.to_bytes(), + ], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + for res in response { + assert_eq!( + res.result.code, + u32::from(ErrorCodes::InvalidChainId) + ); + assert_eq!( + res.result.info, + format!( + "Tx carries a wrong chain id: expected {}, found {}", + shell.chain_id, wrong_chain_id + ) + ); + } + } + } + } } From dcc6d4b3ce110bc910130b73fe767d8cb5ef0bfa Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 1 Feb 2023 18:22:04 +0100 Subject: [PATCH 42/58] Clippy + fmt --- .../lib/node/ledger/shell/process_proposal.rs | 38 ++-- core/src/proto/mod.rs | 3 +- core/src/types/transaction/decrypted.rs | 9 +- core/src/types/transaction/mod.rs | 4 +- shared/src/ledger/ibc/vp/mod.rs | 198 +++++++++++++----- tests/src/vm_host_env/mod.rs | 37 ++-- tests/src/vm_host_env/tx.rs | 2 +- tests/src/vm_host_env/vp.rs | 2 +- wasm/wasm_source/src/tx_bond.rs | 3 +- .../src/tx_change_validator_commission.rs | 3 +- wasm/wasm_source/src/tx_unbond.rs | 3 +- wasm/wasm_source/src/tx_withdraw.rs | 3 +- 12 files changed, 206 insertions(+), 99 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 4a1ba4c285..902bdd54f2 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -138,17 +138,18 @@ where return TxResult { code: ErrorCodes::InvalidChainId.into(), info: format!( - "Tx carries a wrong chain id: expected {}, found {}", - self.chain_id, tx_chain_id - ), + "Tx carries a wrong chain id: expected {}, \ + found {}", + self.chain_id, tx_chain_id + ), }; } TxResult { code: ErrorCodes::InvalidTx.into(), - info: - "Protocol transactions are a fun new feature that \ - is coming soon to a blockchain near you. Patience." - .into(), + info: "Protocol transactions are a fun new feature \ + that is coming soon to a blockchain near you. \ + Patience." + .into(), } } TxType::Decrypted(tx) => { @@ -174,9 +175,10 @@ where code: ErrorCodes::InvalidChainId .into(), info: format!( - "Tx carries a wrong chain id: expected {}, found {}", - self.chain_id, tx.chain_id - ), + "Tx carries a wrong chain id: \ + expected {}, found {}", + self.chain_id, tx.chain_id + ), }; } } @@ -211,9 +213,10 @@ where return TxResult { code: ErrorCodes::InvalidChainId.into(), info: format!( - "Tx carries a wrong chain id: expected {}, found {}", - self.chain_id, tx_chain_id - ), + "Tx carries a wrong chain id: expected {}, \ + found {}", + self.chain_id, tx_chain_id + ), }; } @@ -1234,7 +1237,7 @@ mod test_process_proposal { .sign(&keypair, wrong_chain_id.clone()) .expect("Test failed"); - let protocol_tx = ProtocolTxType::EthereumStateUpdate(tx.clone()).sign( + let protocol_tx = ProtocolTxType::EthereumStateUpdate(tx).sign( &keypair.ref_to(), &keypair, wrong_chain_id.clone(), @@ -1289,9 +1292,10 @@ mod test_process_proposal { assert_eq!( res.result.info, format!( - "Tx carries a wrong chain id: expected {}, found {}", - shell.chain_id, wrong_chain_id - ) + "Tx carries a wrong chain id: expected {}, found \ + {}", + shell.chain_id, wrong_chain_id + ) ); } } diff --git a/core/src/proto/mod.rs b/core/src/proto/mod.rs index ae1b111588..c945d229b9 100644 --- a/core/src/proto/mod.rs +++ b/core/src/proto/mod.rs @@ -11,9 +11,8 @@ mod tests { use generated::types::Tx; use prost::Message; - use crate::types::chain::ChainId; - use super::*; + use crate::types::chain::ChainId; #[test] fn encoding_round_trip() { diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index c6d547cd85..ca13dd94a3 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -91,10 +91,11 @@ pub mod decrypted_tx { tx, has_valid_pow: _, } => tx.chain_id.to_owned(), - // If undecrytable we cannot extract the ChainId. The ChainId for the - // wrapper has already been checked previously and the inner transaction - // will fail because undecryptable. Here we simply put an empty string - // as a placeholder + // If undecrytable we cannot extract the ChainId. The ChainId + // for the wrapper has already been checked + // previously and the inner transaction + // will fail because undecryptable. Here we simply put an empty + // string as a placeholder DecryptedTx::Undecryptable(_) => ChainId(String::new()), }; diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 95895bef00..18f4475701 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -245,7 +245,9 @@ pub mod tx_types { Tx::new( vec![], Some(ty.try_to_vec().unwrap()), - ChainId(String::new()), // No need to provide a valid ChainId when casting back from TxType + ChainId(String::new()), /* No need to provide a valid + * ChainId when casting back from + * TxType */ ) } } diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index b3cfe870b6..abf21a195f 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -625,9 +625,14 @@ mod tests { let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -742,9 +747,14 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -797,9 +807,14 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -932,9 +947,14 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1015,9 +1035,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1086,9 +1111,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1143,9 +1173,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1219,9 +1254,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1303,9 +1343,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1384,9 +1429,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1420,9 +1470,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1459,9 +1514,14 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1537,9 +1597,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1622,9 +1687,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1712,9 +1782,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1794,9 +1869,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1883,9 +1963,14 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1928,8 +2013,13 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } } diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 4f6aa62f00..230306ecfb 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -30,11 +30,12 @@ mod tests { use namada::ledger::tx_env::TxEnv; use namada::proto::{SignedTxData, Tx}; use namada::tendermint_proto::Protobuf; + use namada::types::chain::ChainId; use namada::types::key::*; use namada::types::storage::{self, BlockHash, BlockHeight, Key, KeySeg}; use namada::types::time::DateTimeUtc; use namada::types::token::{self, Amount}; - use namada::types::{address, chain::ChainId, key}; + use namada::types::{address, key}; use namada_tx_prelude::{ BorshDeserialize, BorshSerialize, StorageRead, StorageWrite, }; @@ -134,11 +135,13 @@ mod tests { // Trying to delete a validity predicate should fail let key = storage::Key::validity_predicate(&test_account); - assert!(panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) - .err() - .map(|a| a.downcast_ref::().cloned().unwrap()) - .unwrap() - .contains("CannotDeleteVp")); + assert!( + panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("CannotDeleteVp") + ); } #[test] @@ -462,17 +465,21 @@ mod tests { .expect("decoding signed data we just signed") }); assert_eq!(&signed_tx_data.data, data); - assert!(vp::CTX - .verify_tx_signature(&pk, &signed_tx_data.sig) - .unwrap()); + assert!( + vp::CTX + .verify_tx_signature(&pk, &signed_tx_data.sig) + .unwrap() + ); let other_keypair = key::testing::keypair_2(); - assert!(!vp::CTX - .verify_tx_signature( - &other_keypair.ref_to(), - &signed_tx_data.sig - ) - .unwrap()); + assert!( + !vp::CTX + .verify_tx_signature( + &other_keypair.ref_to(), + &signed_tx_data.sig + ) + .unwrap() + ); } } diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 5b3efbf762..9565fe9619 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -68,7 +68,7 @@ impl Default for TestTxEnv { }; let chain_id = wl_storage.storage.chain_id.clone(); Self { - wl_storage: wl_storage, + wl_storage, iterators: PrefixIterators::default(), gas_meter: BlockGasMeter::default(), tx_index: TxIndex::default(), diff --git a/tests/src/vm_host_env/vp.rs b/tests/src/vm_host_env/vp.rs index f736a158dc..023167eee4 100644 --- a/tests/src/vm_host_env/vp.rs +++ b/tests/src/vm_host_env/vp.rs @@ -71,7 +71,7 @@ impl Default for TestVpEnv { let chain_id = wl_storage.storage.chain_id.clone(); Self { addr: address::testing::established_address_1(), - wl_storage: wl_storage, + wl_storage, iterators: PrefixIterators::default(), gas_meter: VpGasMeter::default(), tx: Tx::new(vec![], None, chain_id), diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index b949731a70..649dccaa2d 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -25,6 +25,7 @@ mod tests { read_total_stake, read_validator_stake, }; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -99,7 +100,7 @@ mod tests { let tx_code = vec![]; let tx_data = bond.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index 3b77c9197c..ef81fcb1b5 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -23,6 +23,7 @@ mod tests { use namada::ledger::pos::{PosParams, PosVP}; use namada::proof_of_stake::validator_commission_rate_handle; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -78,7 +79,7 @@ mod tests { let tx_code = vec![]; let tx_data = commission_change.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 42fa0666bc..2cdc11789e 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -25,6 +25,7 @@ mod tests { read_total_stake, read_validator_stake, unbond_handle, }; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -121,7 +122,7 @@ mod tests { let tx_code = vec![]; let tx_data = unbond.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index 80c0f00265..cafdca8f6b 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -24,6 +24,7 @@ mod tests { use namada::ledger::pos::{GenesisValidator, PosParams, PosVP}; use namada::proof_of_stake::unbond_handle; use namada::proto::Tx; + use namada::types::chain::ChainId; use namada::types::storage::Epoch; use namada_tests::log::test; use namada_tests::native_vp::pos::init_pos; @@ -154,7 +155,7 @@ mod tests { let tx_code = vec![]; let tx_data = withdraw.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data)); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); From ea1e448d64e3de67373923fa0715dfbc624d477d Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 3 Feb 2023 12:24:39 +0100 Subject: [PATCH 43/58] Adjusts decrypted tx conversion --- core/src/types/transaction/decrypted.rs | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index ca13dd94a3..285113c36a 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -86,19 +86,6 @@ pub mod decrypted_tx { impl From for Tx { fn from(decrypted: DecryptedTx) -> Self { - let chain_id = match &decrypted { - DecryptedTx::Decrypted { - tx, - has_valid_pow: _, - } => tx.chain_id.to_owned(), - // If undecrytable we cannot extract the ChainId. The ChainId - // for the wrapper has already been checked - // previously and the inner transaction - // will fail because undecryptable. Here we simply put an empty - // string as a placeholder - DecryptedTx::Undecryptable(_) => ChainId(String::new()), - }; - Tx::new( vec![], Some( @@ -106,7 +93,12 @@ pub mod decrypted_tx { .try_to_vec() .expect("Encrypting transaction should not fail"), ), - chain_id, + // If undecrytable we cannot extract the ChainId. + // If instead the tx gets decrypted successfully, the correct + // chain id is serialized inside the data field + // of the Tx, while the one available + // in the chain_id field is just a placeholder + ChainId(String::new()), ) } } From 976497c7a340530e6f3f8ca8043cccd3be18e85b Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Mon, 6 Feb 2023 12:52:11 +0100 Subject: [PATCH 44/58] Manages invalid chain id for decrypted txs --- apps/src/lib/node/ledger/shell/mod.rs | 1 + .../lib/node/ledger/shell/process_proposal.rs | 82 +++++++++++++------ 2 files changed, 57 insertions(+), 26 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 32e8332245..7c4bb21c09 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -131,6 +131,7 @@ pub enum ErrorCodes { Undecryptable = 6, ReplayTx = 7, InvalidChainId = 8, + InvalidDecryptedChainId = 9, } impl From for u32 { diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 902bdd54f2..4ecf020c10 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -42,7 +42,9 @@ where status: if tx_results.iter().all(|res| { matches!( ErrorCodes::from_u32(res.code).unwrap(), - ErrorCodes::Ok | ErrorCodes::Undecryptable + ErrorCodes::Ok + | ErrorCodes::Undecryptable + | ErrorCodes::InvalidDecryptedChainId ) }) { ProposalStatus::Accept as i32 @@ -172,10 +174,10 @@ where { if tx.chain_id != self.chain_id { return TxResult { - code: ErrorCodes::InvalidChainId + code: ErrorCodes::InvalidDecryptedChainId .into(), info: format!( - "Tx carries a wrong chain id: \ + "Decrypted tx carries a wrong chain id: \ expected {}, found {}", self.chain_id, tx.chain_id ), @@ -1207,8 +1209,8 @@ mod test_process_proposal { } } - /// Test that a transaction with a mismatching chain id causes the entire - /// block to be rejected + /// Test that a wrapper or protocol transaction with a mismatching chain id + /// causes the entire block to be rejected #[test] fn test_wong_chain_id() { let (mut shell, _) = TestShell::new(); @@ -1243,6 +1245,39 @@ mod test_process_proposal { wrong_chain_id.clone(), ); + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(), protocol_tx.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + for res in response { + assert_eq!( + res.result.code, + u32::from(ErrorCodes::InvalidChainId) + ); + assert_eq!( + res.result.info, + format!( + "Tx carries a wrong chain id: expected {}, found \ + {}", + shell.chain_id, wrong_chain_id + ) + ); + } + } + } + } + + /// Test that a decrypted transaction with a mismatching chain id gets + /// rejected without rejecting the entire block + #[test] + fn test_decrypted_wong_chain_id() { + let (mut shell, _) = TestShell::new(); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let wrong_chain_id = ChainId("Wrong chain id".to_string()); let tx = Tx::new( "wasm_code".as_bytes().to_owned(), Some("new transaction data".as_bytes().to_owned()), @@ -1275,30 +1310,25 @@ mod test_process_proposal { // Run validation let request = ProcessProposal { - txs: vec![ - signed.to_bytes(), - protocol_tx.to_bytes(), - signed_decrypted.to_bytes(), - ], + txs: vec![signed_decrypted.to_bytes()], }; + match shell.process_proposal(request) { - Ok(_) => panic!("Test failed"), - Err(TestError::RejectProposal(response)) => { - for res in response { - assert_eq!( - res.result.code, - u32::from(ErrorCodes::InvalidChainId) - ); - assert_eq!( - res.result.info, - format!( - "Tx carries a wrong chain id: expected {}, found \ - {}", - shell.chain_id, wrong_chain_id - ) - ); - } + Ok(response) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidDecryptedChainId) + ); + assert_eq!( + response[0].result.info, + format!( + "Decrypted tx carries a wrong chain id: expected {}, \ + found {}", + shell.chain_id, wrong_chain_id + ) + ) } + Err(_) => panic!("Test failed"), } } } From a7ef5fe0adcc7232d278626d40afed577392062e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 10 Feb 2023 23:25:30 +0000 Subject: [PATCH 45/58] [ci] wasm checksums update --- wasm/checksums.json | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index 0e50d4c7aa..bbd61b7a29 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.6be00c580c78034f0ff2fb74f26b3f79d61abb77f27b63162e6f3c2cd8dabcdf.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.ed1d1cdfbf9abe3644719a37d3041f05e7eee718236124480a6e969bb9b245aa.wasm", - "tx_ibc.wasm": "tx_ibc.6b52ff9f1c9b4266f614d24bd41a949cc803a312fce6cb017ee73f68390bf39f.wasm", - "tx_init_account.wasm": "tx_init_account.0f6113d881e9762c62d97b7cc9841da5f7fe5feef3b7739192391f029d10ebcd.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.6355ce198ca39e982de7bd71c009d4a71a536c7f1c987f9b56e326be5d69f702.wasm", - "tx_init_validator.wasm": "tx_init_validator.285957b0ba5111251d4447f5cffe0e03632f0940f5658d27f59592bd7a29d64f.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.fc5cb0ef1d45a1ff475d67f98c0dd2f0e6a34fbbc83716f5975c6e62733adfe1.wasm", - "tx_transfer.wasm": "tx_transfer.bdf43ccce2603c528482b6b09da41b814017bf0d776d04cf7caad82b18bb0a09.wasm", - "tx_unbond.wasm": "tx_unbond.c53cf3bbe8c7ac3c03f0b3b8d3dee837aa84f04a1227d4c560946454ef232383.wasm", - "tx_update_vp.wasm": "tx_update_vp.b78247f292a7e2423204f3a29961e2783938f110d53e31bf858094b03e1c92ac.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.403456a31ccbe5fc6df98c3eab77abd89cbdabcb78bb16a6255ad487859b0f53.wasm", - "tx_withdraw.wasm": "tx_withdraw.6b2d90f3cc024d8930bca9965a010d4c879e4b88698168625d49d245096afa74.wasm", - "vp_implicit.wasm": "vp_implicit.9824a09d636fb9af1840352b2de3fb04fa90e5fd2dfbe86d1c7664a7dbeeec06.wasm", - "vp_masp.wasm": "vp_masp.70f3b9de71e4fbfb5a06a01cf7e8667ab112bb56f9efbb2bfc2fa8094e66c323.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.6c8f0e1ac279cb0f66b2fade5854f50a7c48418db3643da45961a98ea300db6f.wasm", - "vp_token.wasm": "vp_token.dc0ac90117a834f86a006279a03b8530a100008efc0480fee797e0142fa25cca.wasm", - "vp_user.wasm": "vp_user.e625762fc14007b08a62036b2ec4a473777af7f9ba26ffa9416d6fb465fcbb08.wasm", - "vp_validator.wasm": "vp_validator.3033c78aa87107e50dd3eadfd18dbf0ff3b320ac796fd225f44d944bde111c74.wasm" + "tx_bond.wasm": "tx_bond.cd75887e287f0228576f3555a70a8cd7e82587ea336a1b8494f805719e303ba0.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.860609e27cdc77107966e8a8964cc98cf4d8a8f58e145c39824c3215547243a9.wasm", + "tx_ibc.wasm": "tx_ibc.9325c23a937267b53177e1d49b94ad1aa140f29df50e43dbe55b025b8cccf245.wasm", + "tx_init_account.wasm": "tx_init_account.290c2e570e265112a9026e00a5b172750b4a9a463979d2fc9a8ca573f5f345fa.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.5e15f66b9cf2b10d98b3744a0f49137af35237e953ea8a709e90a4ff86c29ebb.wasm", + "tx_init_validator.wasm": "tx_init_validator.0cc9413e5ee774ccbef9fca5869587cf1c067927e4b33cdaf4f336f950cfb49d.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.6d4a8adf662ba52c44dcd66fca0240e717b7b1d01949a736f41bcb464a746aee.wasm", + "tx_transfer.wasm": "tx_transfer.2f2e492b45b90ca7c32c8748116e62e60ad0885489da4d2356d6269a8128dc61.wasm", + "tx_unbond.wasm": "tx_unbond.119a32741d39f9cba683f8ca9c4ce9356bc1094d22d7e902e43c47f84e1ff144.wasm", + "tx_update_vp.wasm": "tx_update_vp.ccacec9b1afd97144022f1d5ae05c7d12f31ef3a2f08fe57bdbb357d34bc1dbf.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.97882252913e386a82e7e1d7381122856b467cedb36c8abe98d44e15b3ef5bd7.wasm", + "tx_withdraw.wasm": "tx_withdraw.0092f99316e60cda347d262169f6ff7dc85a6aa2af3082121c259e4f98c43497.wasm", + "vp_implicit.wasm": "vp_implicit.d418d65c79f666263d24c5a85d32716b502c31d04c88ce1b63538a7f6237b66c.wasm", + "vp_masp.wasm": "vp_masp.42eabefb4be329d315c28b8ee830e7069b8b11d3793b0d7b59a9541f3f158d3f.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.4ce1d9281ad0286a844fe81b81be19718c97fff8712bf09cd217723be646306c.wasm", + "vp_token.wasm": "vp_token.a8e7cff8c487ee4da2178db7b1b8285a3388bd63490ca4e66ec15b484aa9c982.wasm", + "vp_user.wasm": "vp_user.ebb5f0c15718622a1643b4fce61ae899c662d37857e83172b5fa4bc8c07b3678.wasm", + "vp_validator.wasm": "vp_validator.633839364ce085dc9163ee694c6046ebab6840bf727dc8f4a354b1067814d212.wasm" } \ No newline at end of file From d861a8080afaa513d7e3f532f429fb635707bb99 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 1 Feb 2023 18:25:40 +0100 Subject: [PATCH 46/58] changelog: add #1106 --- .changelog/unreleased/improvements/1106-tx-chain-id.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/1106-tx-chain-id.md diff --git a/.changelog/unreleased/improvements/1106-tx-chain-id.md b/.changelog/unreleased/improvements/1106-tx-chain-id.md new file mode 100644 index 0000000000..187ec93ca7 --- /dev/null +++ b/.changelog/unreleased/improvements/1106-tx-chain-id.md @@ -0,0 +1,2 @@ +- Adds chain id field to transactions + ([#1106](https://github.com/anoma/namada/pull/1106)) \ No newline at end of file From 2d6e42ee692126d5fddce2afdf9820914b4c9da6 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 24 Feb 2023 14:01:04 +0100 Subject: [PATCH 47/58] Wrapper `epoch` in replay protection specs --- .../src/base-ledger/replay-protection.md | 62 ++++--------------- 1 file changed, 12 insertions(+), 50 deletions(-) diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index 85001729a5..483bf37db1 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -296,11 +296,11 @@ a series of external factors (ledger state, etc.) might change the mind of the submitter who's now not interested in the execution of the transaction anymore. We have to introduce the concept of a lifetime (or timeout) for the -transactions: basically, the `Tx` struct will hold an extra field called -`expiration` stating the maximum `DateTimeUtc` up until which the submitter is -willing to see the transaction executed. After the specified time, the -transaction will be considered invalid and discarded regardless of all the other -checks. +transactions: basically, the `Tx` struct will hold an optional extra field +called `expiration` stating the maximum `DateTimeUtc` up until which the +submitter is willing to see the transaction executed. After the specified time, +the transaction will be considered invalid and discarded regardless of all the +other checks. By introducing this new field we are setting a new constraint in the transaction's contract, where the ledger will make sure to prevent the execution @@ -322,60 +322,22 @@ transaction submitter commits himself to one of these three conditions: The first condition satisfied will invalidate further executions of the same tx. -In anticipation of DKG implementation, the current struct `WrapperTx` holds a -field `epoch` stating the epoch in which the tx should be executed. This is -because Ferveo will produce a new public key each epoch, effectively limiting -the lifetime of the transaction (see section 2.2.2 of the -[documentation](https://eprint.iacr.org/2022/898.pdf)). Unfortunately, for -replay protection, a resolution of 1 epoch (~ 1 day) is too low for the possible -needs of the submitters, therefore we need the `expiration` field to hold a -maximum `DateTimeUtc` to increase resolution down to a single block (~ 10 -seconds). - ```rust pub struct Tx { pub code: Vec, pub data: Option>, pub timestamp: DateTimeUtc, pub chain_id: ChainId, - /// Lifetime of the transaction, also determines which decryption key will be used - pub expiration: DateTimeUtc, -} - -pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// the encrypted payload - pub inner_tx: EncryptedTx, - /// sha-2 hash of the inner transaction acting as a commitment - /// the contents of the encrypted payload - pub tx_hash: Hash, + /// Optional lifetime of the transaction + pub expiration: Option, } ``` -Since we now have more detailed information about the desired lifetime of the -transaction, we can remove the `epoch` field and rely solely on `expiration`. -Now, the producer of the inner transaction should make sure to set a sensible -value for this field, in the sense that it should not span more than one epoch. -If this happens, then the transaction will be correctly decrypted only in a -subset of the desired lifetime (the one expecting the actual key used for the -encryption), while, in the following epochs, the transaction will fail -decryption and won't be executed. In essence, the `expiration` parameter can -only restrict the implicit lifetime within the current epoch, it can not surpass -it as that would make the transaction fail in the decryption phase. - -The subject encrypting the inner transaction will also be responsible for using -the appropriate public key for encryption relative to the targeted time. - -The wrapper transaction will match the `expiration` of the inner for correct -execution. Note that we need this field also for the wrapper to anticipate the -check at mempool/proposal evaluation time, but also to prevent someone from -inserting a wrapper transaction after the corresponding inner has expired -forcing the wrapper signer to pay for the fees. +The wrapper transaction will match the `expiration` of the inner (if any) for a +correct execution. Note that we need this field also for the wrapper to +anticipate the check at mempool/proposal evaluation time, but also to prevent +someone from inserting a wrapper transaction after the corresponding inner has +expired forcing the wrapper signer to pay for the fees. ### Wrapper checks From 13e0fa99a3d4928125670faab26a5858c8e401f4 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 2 Feb 2023 16:13:46 +0100 Subject: [PATCH 48/58] Adds `expiration` field to `Tx` --- apps/src/lib/cli.rs | 5 + apps/src/lib/client/rpc.rs | 2 +- apps/src/lib/client/signing.rs | 4 +- apps/src/lib/client/tx.rs | 81 ++++- .../lib/node/ledger/shell/finalize_block.rs | 9 +- apps/src/lib/node/ledger/shell/governance.rs | 1 + apps/src/lib/node/ledger/shell/mod.rs | 15 +- .../lib/node/ledger/shell/prepare_proposal.rs | 12 +- .../lib/node/ledger/shell/process_proposal.rs | 37 ++- core/src/proto/mod.rs | 4 +- core/src/proto/types.rs | 23 +- core/src/types/transaction/decrypted.rs | 9 +- core/src/types/transaction/mod.rs | 19 +- core/src/types/transaction/protocol.rs | 2 + core/src/types/transaction/wrapper.rs | 9 +- proto/types.proto | 1 + shared/src/ledger/ibc/vp/mod.rs | 298 +++++++----------- shared/src/ledger/queries/shell.rs | 8 +- shared/src/vm/wasm/run.rs | 16 +- tests/src/vm_host_env/mod.rs | 64 ++-- tests/src/vm_host_env/tx.rs | 2 +- tests/src/vm_host_env/vp.rs | 2 +- 22 files changed, 363 insertions(+), 260 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 9c23cadb46..1de6b51b5e 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -1621,6 +1621,7 @@ pub mod args { const DRY_RUN_TX: ArgFlag = flag("dry-run"); const DUMP_TX: ArgFlag = flag("dump-tx"); const EPOCH: ArgOpt = arg_opt("epoch"); + const EXPIRATION: ArgOpt = arg_opt("expiration"); const FORCE: ArgFlag = flag("force"); const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); const GAS_AMOUNT: ArgDefault = @@ -2863,6 +2864,8 @@ pub mod args { pub fee_token: WalletAddress, /// The max amount of gas used to process tx pub gas_limit: GasLimit, + /// The optional expiration of the transaction + pub expiration: Option, /// Sign the tx with the key for the given alias from your wallet pub signing_key: Option, /// Sign the tx with the keypair of the public key of the given address @@ -2954,6 +2957,7 @@ pub mod args { let fee_amount = GAS_AMOUNT.parse(matches); let fee_token = GAS_TOKEN.parse(matches); let gas_limit = GAS_LIMIT.parse(matches).into(); + let expiration = EXPIRATION.parse(matches); let signing_key = SIGNING_KEY_OPT.parse(matches); let signer = SIGNER.parse(matches); @@ -2967,6 +2971,7 @@ pub mod args { fee_amount, fee_token, gas_limit, + expiration, signing_key, signer, } diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 4dd36ea2eb..7ffd4c10f0 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -227,7 +227,7 @@ pub async fn query_tx_deltas( let mut transfer = None; extract_payload(tx, &mut wrapper, &mut transfer); // Epoch data is not needed for transparent transactions - let epoch = wrapper.map(|x| x.epoch).unwrap_or_default(); + let epoch = Epoch::default(); if let Some(transfer) = transfer { // Skip MASP addresses as they are already handled by // ShieldedContext diff --git a/apps/src/lib/client/signing.rs b/apps/src/lib/client/signing.rs index 5fb6a2410b..cb5f28aee7 100644 --- a/apps/src/lib/client/signing.rs +++ b/apps/src/lib/client/signing.rs @@ -3,11 +3,11 @@ use borsh::BorshSerialize; use namada::ledger::parameters::storage as parameter_storage; +use namada::proof_of_stake::Epoch; use namada::proto::Tx; use namada::types::address::{Address, ImplicitAddress}; use namada::types::hash::Hash; use namada::types::key::*; -use namada::types::storage::Epoch; use namada::types::token; use namada::types::token::Amount; use namada::types::transaction::{hash_tx, Fee, WrapperTx, MIN_FEE}; @@ -310,7 +310,7 @@ pub async fn sign_wrapper( let decrypted_hash = tx.tx_hash.to_string(); TxBroadcastData::Wrapper { tx: tx - .sign(keypair, ctx.config.ledger.chain_id.clone()) + .sign(keypair, ctx.config.ledger.chain_id.clone(), args.expiration) .expect("Wrapper tx signing keypair should be correct"), wrapper_hash, decrypted_hash, diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 8abcf33f02..a54aefe90d 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -106,7 +106,12 @@ pub async fn submit_custom(ctx: Context, args: args::TxCustom) { let data = args.data_path.map(|data_path| { std::fs::read(data_path).expect("Expected a file at given data path") }); - let tx = Tx::new(tx_code, data, ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + data, + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); let (ctx, initialized_accounts) = process_tx( ctx, &args.tx, @@ -169,7 +174,12 @@ pub async fn submit_update_vp(ctx: Context, args: args::TxUpdateVp) { let data = UpdateVp { addr, vp_code }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); process_tx( ctx, &args.tx, @@ -202,7 +212,12 @@ pub async fn submit_init_account(mut ctx: Context, args: args::TxInitAccount) { }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); let (ctx, initialized_accounts) = process_tx( ctx, &args.tx, @@ -335,7 +350,12 @@ pub async fn submit_init_validator( validator_vp_code, }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + tx_args.expiration, + ); let (mut ctx, initialized_accounts) = process_tx( ctx, &tx_args, @@ -1677,7 +1697,12 @@ pub async fn submit_transfer(mut ctx: Context, args: args::TxTransfer) { .try_to_vec() .expect("Encoding tx data shouldn't fail"); let tx_code = ctx.read_wasm(TX_TRANSFER_WASM); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); let signing_address = TxSigningKey::WalletAddress(args.source.to_address()); process_tx( @@ -1797,7 +1822,12 @@ pub async fn submit_ibc_transfer(ctx: Context, args: args::TxIbcTransfer) { prost::Message::encode(&any_msg, &mut data) .expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); process_tx( ctx, &args.tx, @@ -1942,8 +1972,12 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { .try_to_vec() .expect("Encoding proposal data shouldn't fail"); let tx_code = ctx.read_wasm(TX_INIT_PROPOSAL); - let tx = - Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); process_tx( ctx, @@ -2087,6 +2121,7 @@ pub async fn submit_vote_proposal(mut ctx: Context, args: args::VoteProposal) { tx_code, Some(data), ctx.config.ledger.chain_id.clone(), + args.tx.expiration, ); process_tx( @@ -2160,7 +2195,7 @@ pub async fn submit_reveal_pk_aux( .expect("Encoding a public key shouldn't fail"); let tx_code = ctx.read_wasm(TX_REVEAL_PK); let chain_id = ctx.config.ledger.chain_id.clone(); - let tx = Tx::new(tx_code, Some(tx_data), chain_id); + let tx = Tx::new(tx_code, Some(tx_data), chain_id, args.expiration); // submit_tx without signing the inner tx let keypair = if let Some(signing_key) = &args.signing_key { @@ -2363,7 +2398,12 @@ pub async fn submit_bond(ctx: Context, args: args::Bond) { }; let data = bond.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); let default_signer = args.source.unwrap_or(args.validator); process_tx( ctx, @@ -2418,7 +2458,12 @@ pub async fn submit_unbond(ctx: Context, args: args::Unbond) { let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); let tx_code = ctx.read_wasm(TX_UNBOND_WASM); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); let default_signer = args.source.unwrap_or(args.validator); let (_ctx, _) = process_tx( ctx, @@ -2483,7 +2528,12 @@ pub async fn submit_withdraw(ctx: Context, args: args::Withdraw) { let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); let tx_code = ctx.read_wasm(TX_WITHDRAW_WASM); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); let default_signer = args.source.unwrap_or(args.validator); process_tx( ctx, @@ -2569,7 +2619,12 @@ pub async fn submit_validator_commission_change( }; let data = data.try_to_vec().expect("Encoding tx data shouldn't fail"); - let tx = Tx::new(tx_code, Some(data), ctx.config.ledger.chain_id.clone()); + let tx = Tx::new( + tx_code, + Some(data), + ctx.config.ledger.chain_id.clone(), + args.tx.expiration, + ); let default_signer = args.validator; process_tx( ctx, diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index ebdc289b22..056ef59fc2 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -564,6 +564,7 @@ mod test_finalize_block { "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -579,7 +580,7 @@ mod test_finalize_block { None, ); let tx = wrapper - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); if i > 1 { processed_txs.push(ProcessedTx { @@ -640,6 +641,7 @@ mod test_finalize_block { "wasm_code".as_bytes().to_owned(), Some(String::from("transaction data").as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -778,6 +780,7 @@ mod test_finalize_block { .to_owned(), ), shell.chain_id.clone(), + None, ); let wrapper_tx = WrapperTx::new( Fee { @@ -816,6 +819,7 @@ mod test_finalize_block { .to_owned(), ), shell.chain_id.clone(), + None, ); let wrapper_tx = WrapperTx::new( Fee { @@ -831,7 +835,7 @@ mod test_finalize_block { None, ); let wrapper = wrapper_tx - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); valid_txs.push(wrapper_tx); processed_txs.push(ProcessedTx { @@ -997,6 +1001,7 @@ mod test_finalize_block { tx_code, Some("Encrypted transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper_tx = WrapperTx::new( Fee { diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index 330410e0b9..15d0cf9166 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -77,6 +77,7 @@ where proposal_code, Some(encode(&id)), shell.chain_id.clone(), + None, ); let tx_type = TxType::Decrypted(DecryptedTx::Decrypted { diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 7c4bb21c09..fd59160bee 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -1078,6 +1078,7 @@ mod test_utils { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -1165,6 +1166,7 @@ mod test_mempool_validate { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let mut wrapper = WrapperTx::new( @@ -1180,7 +1182,7 @@ mod test_mempool_validate { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Wrapper signing failed"); let unsigned_wrapper = if let Some(Ok(SignedTxData { @@ -1191,7 +1193,7 @@ mod test_mempool_validate { .take() .map(|data| SignedTxData::try_from_slice(&data[..])) { - Tx::new(vec![], Some(data), shell.chain_id.clone()) + Tx::new(vec![], Some(data), shell.chain_id.clone(), None) } else { panic!("Test failed") }; @@ -1219,6 +1221,7 @@ mod test_mempool_validate { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let mut wrapper = WrapperTx::new( @@ -1234,7 +1237,7 @@ mod test_mempool_validate { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Wrapper signing failed"); let invalid_wrapper = if let Some(Ok(SignedTxData { @@ -1270,6 +1273,7 @@ mod test_mempool_validate { .expect("Test failed"), ), shell.chain_id.clone(), + None, ) } else { panic!("Test failed"); @@ -1297,6 +1301,7 @@ mod test_mempool_validate { "wasm_code".as_bytes().to_owned(), None, shell.chain_id.clone(), + None, ); let result = shell.mempool_validate( @@ -1319,6 +1324,7 @@ mod test_mempool_validate { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( @@ -1334,7 +1340,7 @@ mod test_mempool_validate { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Wrapper signing failed"); let tx_type = match process_tx(wrapper.clone()).expect("Test failed") { @@ -1430,6 +1436,7 @@ mod test_mempool_validate { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), wrong_chain_id.clone(), + None, ) .sign(&keypair); diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 6231a8ac0f..f83c07d826 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -177,8 +177,10 @@ pub(super) mod record { #[cfg(test)] mod test_prepare_proposal { use borsh::BorshSerialize; - use namada::types::storage::Epoch; - use namada::types::transaction::{Fee, WrapperTx}; + use namada::{ + proof_of_stake::Epoch, + types::transaction::{Fee, WrapperTx}, + }; use super::*; use crate::node::ledger::shell::test_utils::{gen_keypair, TestShell}; @@ -193,6 +195,7 @@ mod test_prepare_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let req = RequestPrepareProposal { txs: vec![tx.to_bytes()], @@ -219,6 +222,7 @@ mod test_prepare_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction_data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); // an unsigned wrapper will cause an error in processing let wrapper = Tx::new( @@ -241,6 +245,7 @@ mod test_prepare_proposal { .expect("Test failed"), ), shell.chain_id.clone(), + None, ) .to_bytes(); #[allow(clippy::redundant_clone)] @@ -280,6 +285,7 @@ mod test_prepare_proposal { "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), shell.chain_id.clone(), + None, ); expected_decrypted.push(Tx::from(DecryptedTx::Decrypted { tx: tx.clone(), @@ -300,7 +306,7 @@ mod test_prepare_proposal { None, ); let wrapper = wrapper_tx - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); shell.enqueue_tx(wrapper_tx); expected_wrapper.push(wrapper.clone()); diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 4ecf020c10..6932050a91 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -344,10 +344,10 @@ where #[cfg(test)] mod test_process_proposal { use borsh::BorshDeserialize; + use namada::proof_of_stake::Epoch; use namada::proto::SignedTxData; use namada::types::hash::Hash; use namada::types::key::*; - use namada::types::storage::Epoch; use namada::types::token::Amount; use namada::types::transaction::encrypted::EncryptedTx; use namada::types::transaction::protocol::ProtocolTxType; @@ -370,6 +370,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -388,6 +389,7 @@ mod test_process_proposal { vec![], Some(TxType::Wrapper(wrapper).try_to_vec().expect("Test failed")), shell.chain_id.clone(), + None, ) .to_bytes(); #[allow(clippy::redundant_clone)] @@ -420,6 +422,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let timestamp = tx.timestamp; let mut wrapper = WrapperTx::new( @@ -435,7 +438,7 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); let new_tx = if let Some(Ok(SignedTxData { data: Some(data), @@ -471,6 +474,7 @@ mod test_process_proposal { ), timestamp, chain_id: shell.chain_id.clone(), + expiration: None, } } else { panic!("Test failed"); @@ -508,6 +512,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -522,7 +527,7 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); let request = ProcessProposal { txs: vec![wrapper.to_bytes()], @@ -567,6 +572,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -581,7 +587,7 @@ mod test_process_proposal { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); let request = ProcessProposal { @@ -618,6 +624,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some(format!("transaction data: {}", i).as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -692,6 +699,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -755,6 +763,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let mut wrapper = WrapperTx::new( Fee { @@ -858,6 +867,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let mut tx = Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { @@ -897,6 +907,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let mut tx = Tx::from(TxType::Raw(tx)); tx.chain_id = shell.chain_id.clone(); @@ -934,6 +945,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -949,7 +961,7 @@ mod test_process_proposal { None, ); let signed = wrapper - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); // Write wrapper hash to storage @@ -1007,6 +1019,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -1022,7 +1035,7 @@ mod test_process_proposal { None, ); let signed = wrapper - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); // Run validation @@ -1064,6 +1077,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -1080,7 +1094,7 @@ mod test_process_proposal { ); let inner_unsigned_hash = wrapper.tx_hash.clone(); let signed = wrapper - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); // Write inner hash to storage @@ -1149,6 +1163,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -1165,7 +1180,7 @@ mod test_process_proposal { ); let inner_unsigned_hash = wrapper.tx_hash.clone(); let signed = wrapper - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); let new_wrapper = WrapperTx::new( @@ -1182,7 +1197,7 @@ mod test_process_proposal { None, ); let new_signed = new_wrapper - .sign(&keypair, shell.chain_id.clone()) + .sign(&keypair, shell.chain_id.clone(), None) .expect("Test failed"); // Run validation @@ -1220,6 +1235,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), shell.chain_id.clone(), + None, ); let wrapper = WrapperTx::new( Fee { @@ -1236,7 +1252,7 @@ mod test_process_proposal { ); let wrong_chain_id = ChainId("Wrong chain id".to_string()); let signed = wrapper - .sign(&keypair, wrong_chain_id.clone()) + .sign(&keypair, wrong_chain_id.clone(), None) .expect("Test failed"); let protocol_tx = ProtocolTxType::EthereumStateUpdate(tx).sign( @@ -1282,6 +1298,7 @@ mod test_process_proposal { "wasm_code".as_bytes().to_owned(), Some("new transaction data".as_bytes().to_owned()), wrong_chain_id.clone(), + None, ); let decrypted: Tx = DecryptedTx::Decrypted { tx: tx.clone(), diff --git a/core/src/proto/mod.rs b/core/src/proto/mod.rs index c945d229b9..cda7c8f1ac 100644 --- a/core/src/proto/mod.rs +++ b/core/src/proto/mod.rs @@ -10,6 +10,7 @@ mod tests { use data_encoding::HEXLOWER; use generated::types::Tx; use prost::Message; + use std::time::SystemTime; use super::*; use crate::types::chain::ChainId; @@ -19,8 +20,9 @@ mod tests { let tx = Tx { code: "wasm code".as_bytes().to_owned(), data: Some("arbitrary data".as_bytes().to_owned()), - timestamp: Some(std::time::SystemTime::now().into()), + timestamp: Some(SystemTime::now().into()), chain_id: ChainId::default().0, + expiration: Some(SystemTime::now().into()), }; let mut tx_bytes = vec![]; tx.encode(&mut tx_bytes).unwrap(); diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 657083b924..18d3ccbd1f 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -138,17 +138,20 @@ pub struct SigningTx { pub data: Option>, pub timestamp: DateTimeUtc, pub chain_id: ChainId, + pub expiration: Option, } impl SigningTx { pub fn hash(&self) -> [u8; 32] { let timestamp = Some(self.timestamp.into()); + let expiration = self.expiration.map(|e| e.into()); let mut bytes = vec![]; types::Tx { code: self.code_hash.to_vec(), data: self.data.clone(), timestamp, chain_id: self.chain_id.as_str().to_owned(), + expiration, } .encode(&mut bytes) .expect("encoding a transaction failed"); @@ -170,6 +173,7 @@ impl SigningTx { data: Some(signed), timestamp: self.timestamp, chain_id: self.chain_id, + expiration: self.expiration, } } @@ -190,6 +194,7 @@ impl SigningTx { data, timestamp: self.timestamp, chain_id: self.chain_id.clone(), + expiration: self.expiration, }; let signed_data = tx.hash(); common::SigScheme::verify_signature_raw(pk, &signed_data, sig) @@ -204,6 +209,7 @@ impl SigningTx { data: self.data, timestamp: self.timestamp, chain_id: self.chain_id, + expiration: self.expiration, }) } else { None @@ -218,6 +224,7 @@ impl From for SigningTx { data: tx.data, timestamp: tx.timestamp, chain_id: tx.chain_id, + expiration: tx.expiration, } } } @@ -233,6 +240,7 @@ pub struct Tx { pub data: Option>, pub timestamp: DateTimeUtc, pub chain_id: ChainId, + pub expiration: Option, } impl TryFrom<&[u8]> for Tx { @@ -245,12 +253,17 @@ impl TryFrom<&[u8]> for Tx { None => return Err(Error::NoTimestampError), }; let chain_id = ChainId(tx.chain_id); + let expiration = match tx.expiration { + Some(e) => Some(e.try_into().map_err(Error::InvalidTimestamp)?), + None => None, + }; Ok(Tx { code: tx.code, data: tx.data, timestamp, chain_id, + expiration, }) } } @@ -258,11 +271,14 @@ impl TryFrom<&[u8]> for Tx { impl From for types::Tx { fn from(tx: Tx) -> Self { let timestamp = Some(tx.timestamp.into()); + let expiration = tx.expiration.map(|e| e.into()); + types::Tx { code: tx.code, data: tx.data, timestamp, chain_id: tx.chain_id.as_str().to_owned(), + expiration, } } } @@ -358,12 +374,14 @@ impl Tx { code: Vec, data: Option>, chain_id: ChainId, + expiration: Option, ) -> Self { Tx { code, data, timestamp: DateTimeUtc::now(), chain_id, + expiration, } } @@ -390,6 +408,7 @@ impl Tx { data: signed_data.data, timestamp: self.timestamp, chain_id: self.chain_id.clone(), + expiration: self.expiration, }; unsigned_tx.hash() } @@ -513,7 +532,8 @@ mod tests { let code = "wasm code".as_bytes().to_owned(); let data = "arbitrary data".as_bytes().to_owned(); let chain_id = ChainId::default(); - let tx = Tx::new(code.clone(), Some(data.clone()), chain_id.clone()); + let tx = + Tx::new(code.clone(), Some(data.clone()), chain_id.clone(), None); let bytes = tx.to_bytes(); let tx_from_bytes = @@ -525,6 +545,7 @@ mod tests { data: Some(data), timestamp: None, chain_id: chain_id.0, + expiration: None, }; let mut bytes = vec![]; types_tx.encode(&mut bytes).expect("encoding failed"); diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 285113c36a..f8c4327cdb 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -93,12 +93,13 @@ pub mod decrypted_tx { .try_to_vec() .expect("Encrypting transaction should not fail"), ), - // If undecrytable we cannot extract the ChainId. + // If undecrytable we cannot extract the ChainId and expiration. // If instead the tx gets decrypted successfully, the correct - // chain id is serialized inside the data field - // of the Tx, while the one available - // in the chain_id field is just a placeholder + // chain id and expiration are serialized inside the data field + // of the Tx, while the ones available + // in the chain_id and expiration field are just placeholders ChainId(String::new()), + None, ) } } diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 18f4475701..7c5d2ec1f3 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -246,8 +246,9 @@ pub mod tx_types { vec![], Some(ty.try_to_vec().unwrap()), ChainId(String::new()), /* No need to provide a valid - * ChainId when casting back from + * ChainId or expiration when casting back from * TxType */ + None, ) } } @@ -304,6 +305,7 @@ pub mod tx_types { data: Some(data.clone()), timestamp: tx.timestamp, chain_id: tx.chain_id.clone(), + expiration: tx.expiration, } .hash(); match TxType::try_from(Tx { @@ -311,6 +313,7 @@ pub mod tx_types { data: Some(data), timestamp: tx.timestamp, chain_id: tx.chain_id, + expiration: tx.expiration, }) .map_err(|err| TxError::Deserialization(err.to_string()))? { @@ -351,6 +354,7 @@ pub mod tx_types { use super::*; use crate::types::address::nam; use crate::types::storage::Epoch; + use crate::types::time::DateTimeUtc; fn gen_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; @@ -368,6 +372,7 @@ pub mod tx_types { "wasm code".as_bytes().to_owned(), None, ChainId::default(), + None, ); match process_tx(tx.clone()).expect("Test failed") { @@ -385,6 +390,7 @@ pub mod tx_types { "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + None, ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -394,6 +400,7 @@ pub mod tx_types { .expect("Test failed"), ), inner.chain_id.clone(), + None, ); match process_tx(tx).expect("Test failed") { @@ -410,6 +417,7 @@ pub mod tx_types { "code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + None, ); let tx = Tx::new( "wasm code".as_bytes().to_owned(), @@ -419,6 +427,7 @@ pub mod tx_types { .expect("Test failed"), ), inner.chain_id.clone(), + None, ) .sign(&gen_keypair()); @@ -437,6 +446,7 @@ pub mod tx_types { "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + None, ); // the signed tx let wrapper = WrapperTx::new( @@ -452,7 +462,7 @@ pub mod tx_types { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, tx.chain_id.clone()) + .sign(&keypair, tx.chain_id.clone(), Some(DateTimeUtc::now())) .expect("Test failed"); match process_tx(wrapper).expect("Test failed") { @@ -475,6 +485,7 @@ pub mod tx_types { "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + None, ); // the signed tx let wrapper = WrapperTx::new( @@ -497,6 +508,7 @@ pub mod tx_types { TxType::Wrapper(wrapper).try_to_vec().expect("Test failed"), ), ChainId::default(), + None, ); let result = process_tx(tx).expect_err("Test failed"); assert_matches!(result, TxError::Unsigned(_)); @@ -511,6 +523,7 @@ pub mod tx_types { "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + None, ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -539,6 +552,7 @@ pub mod tx_types { "transaction data".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + None, ); let decrypted = DecryptedTx::Decrypted { tx: payload.clone(), @@ -561,6 +575,7 @@ pub mod tx_types { vec![], Some(signed.try_to_vec().expect("Test failed")), ChainId::default(), + None, ); match process_tx(tx).expect("Test failed") { TxType::Decrypted(DecryptedTx::Decrypted { diff --git a/core/src/types/transaction/protocol.rs b/core/src/types/transaction/protocol.rs index 3139ee598e..a0aee560ed 100644 --- a/core/src/types/transaction/protocol.rs +++ b/core/src/types/transaction/protocol.rs @@ -101,6 +101,7 @@ mod protocol_txs { .expect("Could not serialize ProtocolTx"), ), chain_id, + None, ) .sign(signing_key) } @@ -130,6 +131,7 @@ mod protocol_txs { .expect("Serializing request should not fail"), ), chain_id, + None, ) .sign(signing_key), ) diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index 555186f221..5de138bacd 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -16,6 +16,7 @@ pub mod wrapper_tx { use crate::types::chain::ChainId; use crate::types::key::*; use crate::types::storage::Epoch; + use crate::types::time::DateTimeUtc; use crate::types::token::Amount; use crate::types::transaction::encrypted::EncryptedTx; use crate::types::transaction::{EncryptionKey, Hash, TxError, TxType}; @@ -251,6 +252,7 @@ pub mod wrapper_tx { &self, keypair: &common::SecretKey, chain_id: ChainId, + expiration: Option, ) -> Result { if self.pk != keypair.ref_to() { return Err(WrapperTxErr::InvalidKeyPair); @@ -263,6 +265,7 @@ pub mod wrapper_tx { .expect("Could not serialize WrapperTx"), ), chain_id, + expiration, ) .sign(keypair)) } @@ -368,6 +371,7 @@ pub mod wrapper_tx { "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + Some(DateTimeUtc::now()), ); let wrapper = WrapperTx::new( @@ -397,6 +401,7 @@ pub mod wrapper_tx { "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + Some(DateTimeUtc::now()), ); let mut wrapper = WrapperTx::new( @@ -432,6 +437,7 @@ pub mod wrapper_tx { "wasm code".as_bytes().to_owned(), Some("transaction data".as_bytes().to_owned()), ChainId::default(), + Some(DateTimeUtc::now()), ); // the signed tx let mut tx = WrapperTx::new( @@ -447,7 +453,7 @@ pub mod wrapper_tx { #[cfg(not(feature = "mainnet"))] None, ) - .sign(&keypair, ChainId::default()) + .sign(&keypair, ChainId::default(), None) .expect("Test failed"); // we now try to alter the inner tx maliciously @@ -469,6 +475,7 @@ pub mod wrapper_tx { "Give me all the money".as_bytes().to_owned(), None, ChainId::default(), + None, ); // We replace the inner tx with a malicious one diff --git a/proto/types.proto b/proto/types.proto index 371416cff7..0414da45ef 100644 --- a/proto/types.proto +++ b/proto/types.proto @@ -10,6 +10,7 @@ message Tx { optional bytes data = 2; google.protobuf.Timestamp timestamp = 3; string chain_id = 4; + optional google.protobuf.Timestamp expiration = 5; } message Dkg { string data = 1; } diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index abf21a195f..01fceea7b4 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -602,8 +602,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -625,14 +626,9 @@ mod tests { let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -642,8 +638,9 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -724,8 +721,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -747,14 +745,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -784,8 +777,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -807,14 +801,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -841,8 +830,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -924,8 +914,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -947,14 +938,9 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1013,8 +999,9 @@ mod tests { let tx_index = TxIndex::default(); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1035,14 +1022,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1089,8 +1071,9 @@ mod tests { let tx_index = TxIndex::default(); let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1111,14 +1094,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1151,8 +1129,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1173,14 +1152,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1232,8 +1206,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1254,14 +1229,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1321,8 +1291,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1343,14 +1314,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1407,8 +1373,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1429,14 +1396,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1448,8 +1410,9 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1470,14 +1433,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1490,8 +1448,9 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1514,14 +1473,9 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1575,8 +1529,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1597,14 +1552,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1665,8 +1615,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1687,14 +1638,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1760,8 +1706,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1782,14 +1729,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1847,8 +1789,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1869,14 +1812,9 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1941,8 +1879,9 @@ mod tests { let tx_code = vec![]; let mut tx_data = vec![]; msg.to_any().encode(&mut tx_data).expect("encoding failed"); - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -1963,14 +1902,9 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } #[test] @@ -1991,8 +1925,9 @@ mod tests { let tx_index = TxIndex::default(); let tx_code = vec![]; let tx_data = vec![]; - let tx = Tx::new(tx_code, Some(tx_data), storage.chain_id.clone()) - .sign(&keypair_1()); + let tx = + Tx::new(tx_code, Some(tx_data), storage.chain_id.clone(), None) + .sign(&keypair_1()); let gas_meter = VpGasMeter::new(0); let (vp_wasm_cache, _vp_cache_dir) = wasm::compilation_cache::common::testing::cache(); @@ -2013,13 +1948,8 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!( - ibc.validate_tx( - tx.data.as_ref().unwrap(), - &keys_changed, - &verifiers - ) - .expect("validation failed") - ); + assert!(ibc + .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) + .expect("validation failed")); } } diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs index 3a799a5c71..8d483f2419 100644 --- a/shared/src/ledger/queries/shell.rs +++ b/shared/src/ledger/queries/shell.rs @@ -387,8 +387,12 @@ mod test { // Request dry run tx let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); - let tx = - Tx::new(tx_no_op, None, client.wl_storage.storage.chain_id.clone()); + let tx = Tx::new( + tx_no_op, + None, + client.wl_storage.storage.chain_id.clone(), + None, + ); let tx_bytes = tx.to_bytes(); let result = RPC .shell() diff --git a/shared/src/vm/wasm/run.rs b/shared/src/vm/wasm/run.rs index 2de7bfae99..8d1719a3a7 100644 --- a/shared/src/vm/wasm/run.rs +++ b/shared/src/vm/wasm/run.rs @@ -551,7 +551,7 @@ mod tests { input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); // When the `eval`ed VP doesn't run out of memory, it should return // `true` @@ -580,7 +580,7 @@ mod tests { input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); // When the `eval`ed VP runs out of memory, its result should be // `false`, hence we should also get back `false` from the VP that // called `eval`. @@ -625,7 +625,7 @@ mod tests { // Allocating `2^23` (8 MiB) should be below the memory limit and // shouldn't fail let tx_data = 2_usize.pow(23).try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( vp_code.clone(), @@ -646,7 +646,7 @@ mod tests { // Allocating `2^24` (16 MiB) should be above the memory limit and // should fail let tx_data = 2_usize.pow(24).try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let error = vp( vp_code, &tx, @@ -739,7 +739,7 @@ mod tests { // limit and should fail let len = 2_usize.pow(24); let tx_data: Vec = vec![6_u8; len]; - let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let result = vp( vp_code, @@ -848,7 +848,7 @@ mod tests { // Borsh. storage.write(&key, value.try_to_vec().unwrap()).unwrap(); let tx_data = key.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let error = vp( vp_read_key, @@ -906,7 +906,7 @@ mod tests { input, }; let tx_data = eval_vp.try_to_vec().unwrap(); - let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone()); + let tx = Tx::new(vec![], Some(tx_data), storage.chain_id.clone(), None); let (vp_cache, _) = wasm::compilation_cache::common::testing::cache(); let passed = vp( vp_eval, @@ -1011,7 +1011,7 @@ mod tests { ) .expect("unexpected error converting wat2wasm").into_owned(); - let tx = Tx::new(vec![], None, ChainId::default()); + let tx = Tx::new(vec![], None, ChainId::default(), None); let tx_index = TxIndex::default(); let mut storage = TestStorage::default(); let addr = storage.address_gen.generate_address("rng seed"); diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index 230306ecfb..d41c33ccf9 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -135,13 +135,11 @@ mod tests { // Trying to delete a validity predicate should fail let key = storage::Key::validity_predicate(&test_account); - assert!( - panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) - .err() - .map(|a| a.downcast_ref::().cloned().unwrap()) - .unwrap() - .contains("CannotDeleteVp") - ); + assert!(panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("CannotDeleteVp")); } #[test] @@ -446,6 +444,7 @@ mod tests { // Use some arbitrary bytes for tx code let code = vec![4, 3, 2, 1, 0]; + let expiration = Some(DateTimeUtc::now()); for data in &[ // Tx with some arbitrary data Some(vec![1, 2, 3, 4].repeat(10)), @@ -457,6 +456,7 @@ mod tests { code.clone(), data.clone(), env.wl_storage.storage.chain_id.clone(), + expiration, ) .sign(&keypair); let tx_data = env.tx.data.as_ref().expect("data should exist"); @@ -465,21 +465,17 @@ mod tests { .expect("decoding signed data we just signed") }); assert_eq!(&signed_tx_data.data, data); - assert!( - vp::CTX - .verify_tx_signature(&pk, &signed_tx_data.sig) - .unwrap() - ); + assert!(vp::CTX + .verify_tx_signature(&pk, &signed_tx_data.sig) + .unwrap()); let other_keypair = key::testing::keypair_2(); - assert!( - !vp::CTX - .verify_tx_signature( - &other_keypair.ref_to(), - &signed_tx_data.sig - ) - .unwrap() - ); + assert!(!vp::CTX + .verify_tx_signature( + &other_keypair.ref_to(), + &signed_tx_data.sig + ) + .unwrap()); } } @@ -568,6 +564,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // get and increment the connection counter @@ -606,6 +603,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); @@ -644,6 +642,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // get and update the client without a header @@ -690,6 +689,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // update the client with the message @@ -724,6 +724,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // upgrade the client with the message @@ -766,6 +767,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // get and increment the connection counter @@ -804,6 +806,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // init a connection with the message @@ -834,6 +837,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open the connection with the message @@ -874,6 +878,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open try a connection with the message @@ -905,6 +910,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open the connection with the mssage @@ -950,6 +956,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // not bind a port @@ -992,6 +999,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // bind a port @@ -1037,6 +1045,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // init a channel with the message @@ -1062,6 +1071,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open the channle with the message @@ -1104,6 +1114,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // try open a channel with the message @@ -1130,6 +1141,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // open a channel with the message @@ -1174,6 +1186,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // close the channel with the message @@ -1218,6 +1231,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); @@ -1267,6 +1281,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // send the token and a packet with the data @@ -1308,6 +1323,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // ack the packet with the message @@ -1361,6 +1377,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // send the token and a packet with the data @@ -1430,6 +1447,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // receive a packet with the message @@ -1514,6 +1532,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // receive a packet with the message @@ -1565,6 +1584,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // send a packet with the message @@ -1595,6 +1615,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // ack the packet with the message @@ -1650,6 +1671,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); // receive a packet with the message @@ -1716,6 +1738,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); @@ -1792,6 +1815,7 @@ mod tests { data: Some(tx_data.clone()), timestamp: DateTimeUtc::now(), chain_id: ChainId::default(), + expiration: None, } .sign(&key::testing::keypair_1()); diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 9565fe9619..a3183525bb 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -78,7 +78,7 @@ impl Default for TestTxEnv { vp_cache_dir, tx_wasm_cache, tx_cache_dir, - tx: Tx::new(vec![], None, chain_id), + tx: Tx::new(vec![], None, chain_id, None), } } } diff --git a/tests/src/vm_host_env/vp.rs b/tests/src/vm_host_env/vp.rs index 023167eee4..82b436866b 100644 --- a/tests/src/vm_host_env/vp.rs +++ b/tests/src/vm_host_env/vp.rs @@ -74,7 +74,7 @@ impl Default for TestVpEnv { wl_storage, iterators: PrefixIterators::default(), gas_meter: VpGasMeter::default(), - tx: Tx::new(vec![], None, chain_id), + tx: Tx::new(vec![], None, chain_id, None), tx_index: TxIndex::default(), keys_changed: BTreeSet::default(), verifiers: BTreeSet::default(), From 73a360ab44b7c1d7effd0e15e8a81f98e0e6dcfa Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Thu, 2 Feb 2023 18:18:29 +0100 Subject: [PATCH 49/58] Updates client for tx expiration --- apps/src/lib/cli.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 1de6b51b5e..574969ef89 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -1621,7 +1621,7 @@ pub mod args { const DRY_RUN_TX: ArgFlag = flag("dry-run"); const DUMP_TX: ArgFlag = flag("dump-tx"); const EPOCH: ArgOpt = arg_opt("epoch"); - const EXPIRATION: ArgOpt = arg_opt("expiration"); + const EXPIRATION_OPT: ArgOpt = arg_opt("expiration"); const FORCE: ArgFlag = flag("force"); const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); const GAS_AMOUNT: ArgDefault = @@ -2926,6 +2926,7 @@ pub mod args { "The maximum amount of gas needed to run transaction", ), ) + .arg(EXPIRATION_OPT.def().about("The expiration datetime of the transaction, after which the tx won't be accepted anymore. All of these examples are equivalent:\n2012-12-12T12:12:12Z\n2012-12-12 12:12:12Z\n2012- 12-12T12: 12:12Z")) .arg( SIGNING_KEY_OPT .def() @@ -2957,7 +2958,7 @@ pub mod args { let fee_amount = GAS_AMOUNT.parse(matches); let fee_token = GAS_TOKEN.parse(matches); let gas_limit = GAS_LIMIT.parse(matches).into(); - let expiration = EXPIRATION.parse(matches); + let expiration = EXPIRATION_OPT.parse(matches); let signing_key = SIGNING_KEY_OPT.parse(matches); let signer = SIGNER.parse(matches); From 0744c11e59a8d5810cebdbe287f57abcba4303c6 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 3 Feb 2023 17:07:09 +0100 Subject: [PATCH 50/58] Tx expiration validation --- apps/src/lib/node/ledger/shell/mod.rs | 20 ++++++- .../lib/node/ledger/shell/process_proposal.rs | 60 ++++++++++++++++++- core/src/ledger/storage/mod.rs | 11 ++++ core/src/types/transaction/mod.rs | 1 - 4 files changed, 89 insertions(+), 3 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index fd59160bee..525b40c5bd 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -132,6 +132,7 @@ pub enum ErrorCodes { ReplayTx = 7, InvalidChainId = 8, InvalidDecryptedChainId = 9, + ExpiredTx = 10, } impl From for u32 { @@ -615,6 +616,24 @@ where return response; } + // Tx expiration + if let Some(exp) = tx.expiration { + let last_block_timestamp = self + .wl_storage + .storage + .get_block_timestamp() + .expect("Failed to retrieve last block timestamp"); + + if exp > last_block_timestamp { + response.code = ErrorCodes::ExpiredTx.into(); + response.log = format!( + "Tx expired at {:#?}, last committed block time: {:#?}", + exp, last_block_timestamp + ); + return response; + } + } + // Tx signature check let tx_type = match process_tx(tx) { Ok(ty) => ty, @@ -1149,7 +1168,6 @@ mod test_utils { #[cfg(test)] mod test_mempool_validate { use namada::proto::SignedTxData; - use namada::types::storage::Epoch; use namada::types::transaction::{Fee, WrapperTx}; use super::test_utils::TestShell; diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 6932050a91..82fb1ab830 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -117,6 +117,7 @@ where }; let tx_chain_id = tx.chain_id.clone(); + let tx_expiration = tx.expiration; // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); @@ -136,6 +137,7 @@ where .into(), }, TxType::Protocol(_) => { + // Tx chain id if tx_chain_id != self.chain_id { return TxResult { code: ErrorCodes::InvalidChainId.into(), @@ -146,6 +148,24 @@ where ), }; } + + // Tx expiration + if let Some(exp) = tx_expiration { + let last_block_timestamp = self + .wl_storage + .storage + .get_block_timestamp() + .expect("Failed to retrieve last block timestamp"); + if exp > last_block_timestamp { + return TxResult { + code: ErrorCodes::ExpiredTx.into(), + info: format!( + "Tx expired at {:#?}, last committed block time: {:#?}", + exp, last_block_timestamp + ), + }; + } + } TxResult { code: ErrorCodes::InvalidTx.into(), info: "Protocol transactions are a fun new feature \ @@ -172,6 +192,7 @@ where has_valid_pow: _, } = tx { + // Tx chain id if tx.chain_id != self.chain_id { return TxResult { code: ErrorCodes::InvalidDecryptedChainId @@ -183,6 +204,25 @@ where ), }; } + + // Tx expiration + if let Some(exp) = tx.expiration { + let last_block_timestamp = self + .wl_storage + .storage + .get_block_timestamp() + .expect("Failed to retrieve last block timestamp"); + if exp > last_block_timestamp { + return TxResult { + code: ErrorCodes::ExpiredTx + .into(), + info: format!( + "Tx expired at {:#?}, last committed block time: {:#?}", + exp, last_block_timestamp + ), + }; + } + } } TxResult { @@ -210,7 +250,7 @@ where } } TxType::Wrapper(wrapper) => { - // ChainId check + // Tx chain id if tx_chain_id != self.chain_id { return TxResult { code: ErrorCodes::InvalidChainId.into(), @@ -222,6 +262,24 @@ where }; } + // Tx expiration + if let Some(exp) = tx_expiration { + let last_block_timestamp = self + .wl_storage + .storage + .get_block_timestamp() + .expect("Failed to retrieve last block timestamp"); + if exp > last_block_timestamp { + return TxResult { + code: ErrorCodes::ExpiredTx.into(), + info: format!( + "Tx expired at {:#?}, last committed block time: {:#?}", + exp, last_block_timestamp + ), + }; + } + } + // validate the ciphertext via Ferveo if !wrapper.validate_ciphertext() { TxResult { diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 768e335a6d..26f1519db6 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -710,6 +710,17 @@ where } } + /// Get the timestamp of the last committed block, or the current timestamp + /// if no blocks have been produced yet + pub fn get_block_timestamp(&self) -> Result { + let last_block_height = self.get_block_height().0; + + Ok(self + .db + .read_block_header(last_block_height)? + .map_or_else(|| DateTimeUtc::now(), |header| header.time)) + } + /// Initialize a new epoch when the current epoch is finished. Returns /// `true` on a new epoch. pub fn update_epoch( diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index 7c5d2ec1f3..b55d33b3ee 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -353,7 +353,6 @@ pub mod tx_types { mod test_process_tx { use super::*; use crate::types::address::nam; - use crate::types::storage::Epoch; use crate::types::time::DateTimeUtc; fn gen_keypair() -> common::SecretKey { From 4b70485979b26362ccc49c3e6c97f6c1601ecc14 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 7 Feb 2023 12:54:28 +0100 Subject: [PATCH 51/58] Improves tx expiration checks. Adds unit tests --- apps/src/lib/node/ledger/shell/mod.rs | 38 +++- .../lib/node/ledger/shell/prepare_proposal.rs | 2 + .../lib/node/ledger/shell/process_proposal.rs | 167 +++++++++++++++--- apps/src/lib/node/ledger/shims/abcipp_shim.rs | 40 ++++- core/src/ledger/storage/mod.rs | 2 +- 5 files changed, 210 insertions(+), 39 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 525b40c5bd..035adba439 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -44,7 +44,7 @@ use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::types::token::{self}; use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, - EllipticCurve, PairingEngine, TxType, MIN_FEE, + EllipticCurve, PairingEngine, TxError, TxType, MIN_FEE, }; use namada::types::{address, hash}; use namada::vm::wasm::{TxCache, VpCache}; @@ -133,6 +133,7 @@ pub enum ErrorCodes { InvalidChainId = 8, InvalidDecryptedChainId = 9, ExpiredTx = 10, + ExpiredDecryptedTx = 11, } impl From for u32 { @@ -621,10 +622,10 @@ where let last_block_timestamp = self .wl_storage .storage - .get_block_timestamp() + .get_last_block_timestamp() .expect("Failed to retrieve last block timestamp"); - if exp > last_block_timestamp { + if last_block_timestamp > exp { response.code = ErrorCodes::ExpiredTx.into(); response.log = format!( "Tx expired at {:#?}, last committed block time: {:#?}", @@ -637,9 +638,12 @@ where // Tx signature check let tx_type = match process_tx(tx) { Ok(ty) => ty, - Err(msg) => { - response.code = ErrorCodes::InvalidSig.into(); - response.log = msg.to_string(); + Err(e) => { + response.code = match e { + TxError::Deserialization(_) => ErrorCodes::InvalidTx.into(), + _ => ErrorCodes::InvalidSig.into(), + }; + response.log = e.to_string(); return response; } }; @@ -1471,4 +1475,26 @@ mod test_mempool_validate { ) ) } + + /// Check that an expired transaction gets rejected + #[test] + fn test_expired_tx() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + Some(DateTimeUtc::now()), + ) + .sign(&keypair); + + let result = shell.mempool_validate( + tx.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ExpiredTx)); + } } diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index f83c07d826..433275be1f 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -44,6 +44,8 @@ where // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); + //FIXME: check expiration of wrapper txs if I check the time against the one in the block header of the request, otherwise + //FIXME: there's no need to do anything here sicne the check is identiacal to the mempool_one which has already run // TODO: Craft the Ethereum state update tx // filter in half of the new txs from Tendermint, only keeping // wrappers diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 82fb1ab830..fb1cb9c75d 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -36,7 +36,29 @@ where &self, req: RequestProcessProposal, ) -> ProcessProposal { - let tx_results = self.process_txs(&req.txs); + let block_time = match req.time { + Some(t) => { + match t.try_into() { + Ok(t) => t, + Err(_) => { + // Default to last committed block time + self.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") + } + } + } + None => { + // Default to last committed block time + self.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") + } + }; + + let tx_results = self.process_txs(&req.txs, block_time); ProcessProposal { status: if tx_results.iter().all(|res| { @@ -45,6 +67,7 @@ where ErrorCodes::Ok | ErrorCodes::Undecryptable | ErrorCodes::InvalidDecryptedChainId + | ErrorCodes::ExpiredDecryptedTx ) }) { ProposalStatus::Accept as i32 @@ -57,7 +80,11 @@ where } /// Check all the given txs. - pub fn process_txs(&self, txs: &[Vec]) -> Vec { + pub fn process_txs( + &self, + txs: &[Vec], + block_time: DateTimeUtc, + ) -> Vec { let mut tx_queue_iter = self.wl_storage.storage.tx_queue.iter(); let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); txs.iter() @@ -66,6 +93,7 @@ where tx_bytes, &mut tx_queue_iter, &mut temp_wl_storage, + block_time, ); if let ErrorCodes::Ok = ErrorCodes::from_u32(result.code).unwrap() @@ -104,6 +132,7 @@ where tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, temp_wl_storage: &mut TempWlStorage, + block_time: DateTimeUtc, ) -> TxResult { let tx = match Tx::try_from(tx_bytes) { Ok(tx) => tx, @@ -151,18 +180,13 @@ where // Tx expiration if let Some(exp) = tx_expiration { - let last_block_timestamp = self - .wl_storage - .storage - .get_block_timestamp() - .expect("Failed to retrieve last block timestamp"); - if exp > last_block_timestamp { + if block_time > exp { return TxResult { code: ErrorCodes::ExpiredTx.into(), info: format!( - "Tx expired at {:#?}, last committed block time: {:#?}", - exp, last_block_timestamp - ), + "Tx expired at {:#?}, block time: {:#?}", + exp, block_time + ), }; } } @@ -207,18 +231,13 @@ where // Tx expiration if let Some(exp) = tx.expiration { - let last_block_timestamp = self - .wl_storage - .storage - .get_block_timestamp() - .expect("Failed to retrieve last block timestamp"); - if exp > last_block_timestamp { + if block_time > exp { return TxResult { - code: ErrorCodes::ExpiredTx + code: ErrorCodes::ExpiredDecryptedTx .into(), info: format!( - "Tx expired at {:#?}, last committed block time: {:#?}", - exp, last_block_timestamp + "Dercrypted tx expired at {:#?}, block time: {:#?}", + exp, block_time ), }; } @@ -264,18 +283,13 @@ where // Tx expiration if let Some(exp) = tx_expiration { - let last_block_timestamp = self - .wl_storage - .storage - .get_block_timestamp() - .expect("Failed to retrieve last block timestamp"); - if exp > last_block_timestamp { + if block_time > exp { return TxResult { code: ErrorCodes::ExpiredTx.into(), info: format!( - "Tx expired at {:#?}, last committed block time: {:#?}", - exp, last_block_timestamp - ), + "Tx expired at {:#?}, block time: {:#?}", + exp, block_time + ), }; } } @@ -1406,4 +1420,99 @@ mod test_process_proposal { Err(_) => panic!("Test failed"), } } + + /// Test that an expired wrapper transaction causes a block rejection + #[test] + fn test_expired_wrapper() { + let (mut shell, _) = TestShell::new(); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + 0.into(), + tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let signed = wrapper + .sign(&keypair, shell.chain_id.clone(), Some(DateTimeUtc::now())) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ExpiredTx) + ); + } + } + } + + /// Test that an expired decrypted transaction is correctlye marked as so + /// without rejecting the entire block + #[test] + fn test_expired_decrypted() { + let (mut shell, _) = TestShell::new(); + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("new transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + Some(DateTimeUtc::now()), + ); + let decrypted: Tx = DecryptedTx::Decrypted { + tx: tx.clone(), + has_valid_pow: false, + } + .into(); + let signed_decrypted = decrypted.sign(&keypair); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrapper_in_queue = WrapperTxInQueue { + tx: wrapper, + has_valid_pow: false, + }; + shell.wl_storage.storage.tx_queue.push(wrapper_in_queue); + + // Run validation + let request = ProcessProposal { + txs: vec![signed_decrypted.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(response) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ExpiredDecryptedTx) + ); + } + Err(_) => panic!("Test failed"), + } + } } diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index 74a56a4ddc..e6d07ce85c 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -137,8 +137,42 @@ impl AbcippShim { } #[cfg(not(feature = "abcipp"))] Req::EndBlock(_) => { - let processing_results = - self.service.process_txs(&self.delivered_txs); + let begin_block_request = + self.begin_block_request.take().unwrap(); + let block_time = match &begin_block_request.header { + Some(header) => match &header.time { + Some(time) => match time.to_owned().try_into() { + Ok(t) => t, + Err(_) => { + // Default to last committed block time + self.service.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") + } + }, + None => { + // Default to last committed block time + self.service.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") + } + }, + None => { + // Default to last committed block time + self.service + .wl_storage + .storage + .get_last_block_timestamp() + .expect( + "Failed to retrieve last block timestamp", + ) + } + }; + let processing_results = self + .service + .process_txs(&self.delivered_txs, block_time); let mut txs = Vec::with_capacity(self.delivered_txs.len()); let mut delivered = vec![]; std::mem::swap(&mut self.delivered_txs, &mut delivered); @@ -149,7 +183,7 @@ impl AbcippShim { txs.push(ProcessedTx { tx, result }); } let mut end_block_request: FinalizeBlock = - self.begin_block_request.take().unwrap().into(); + begin_block_request.into(); let hash = self.get_hash(); end_block_request.hash = BlockHash::from(hash.clone()); end_block_request.txs = txs; diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index 26f1519db6..cc517e54e0 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -712,7 +712,7 @@ where /// Get the timestamp of the last committed block, or the current timestamp /// if no blocks have been produced yet - pub fn get_block_timestamp(&self) -> Result { + pub fn get_last_block_timestamp(&self) -> Result { let last_block_height = self.get_block_height().0; Ok(self From d876660b236a7c701dcc95c8aca9e9d00ec7f94f Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 7 Feb 2023 12:54:56 +0100 Subject: [PATCH 52/58] Tx expiration check in `prepare_proposal`. Unit test --- .../lib/node/ledger/shell/prepare_proposal.rs | 157 ++++++++++++++++-- 1 file changed, 142 insertions(+), 15 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 433275be1f..086e162b3f 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -3,6 +3,7 @@ use namada::ledger::storage::{DBIter, StorageHasher, DB}; use namada::proto::Tx; use namada::types::internal::WrapperTxInQueue; +use namada::types::time::DateTimeUtc; use namada::types::transaction::tx_types::TxType; use namada::types::transaction::wrapper::wrapper_tx::PairingEngine; use namada::types::transaction::{AffineCurve, DecryptedTx, EllipticCurve}; @@ -44,8 +45,6 @@ where // TODO: This should not be hardcoded let privkey = ::G2Affine::prime_subgroup_generator(); - //FIXME: check expiration of wrapper txs if I check the time against the one in the block header of the request, otherwise - //FIXME: there's no need to do anything here sicne the check is identiacal to the mempool_one which has already run // TODO: Craft the Ethereum state update tx // filter in half of the new txs from Tendermint, only keeping // wrappers @@ -55,13 +54,52 @@ where .txs .into_iter() .map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - record::keep(tx_bytes) - } else { - record::remove(tx_bytes) - } + match Tx::try_from(tx_bytes.as_slice()) { + Ok(tx) => { + let tx_expiration = tx.expiration; + if let Ok(TxType::Wrapper(_)) = process_tx(tx) { + // Check tx expiration against proposed block + match tx_expiration { + Some(exp) => { + match req.time { + Some(&block_time) => { + match TryInto::::try_into(block_time.to_owned()) { + Ok(datetime) => { + +if datetime > exp { + record::remove(tx_bytes) + } else { + record::keep(tx_bytes) + } + }, + Err(_) => { + + // Default to last block datetime which has already been checked by mempool_validate, so accept the tx + record::keep(tx_bytes) + } + } + }, + None => { + + // Default to last block datetime which has already been checked by mempool_validate, so accept the tx + record::keep(tx_bytes) + } + } + }, + None => record::keep(tx_bytes) + } + + + } else { + record::remove(tx_bytes) + } + } + Err(_) => record::remove(tx_bytes), + + + + + } }) .take_while(|tx_record| { let new_size = total_proposal_size + tx_record.tx.len(); @@ -80,12 +118,46 @@ where .txs .into_iter() .filter_map(|tx_bytes| { - if let Ok(Ok(TxType::Wrapper(_))) = - Tx::try_from(tx_bytes.as_slice()).map(process_tx) - { - Some(tx_bytes) - } else { - None + match Tx::try_from(tx_bytes.as_slice()) { + Ok(tx) => { + let tx_expiration = tx.expiration; + if let Ok(TxType::Wrapper(_)) = process_tx(tx) { + // Check tx expiration against proposed block + match tx_expiration { + Some(exp) => { + match &req.time { + Some(block_time) => { + match TryInto::::try_into(block_time.to_owned()) { + Ok(datetime) => { + +if datetime > exp { None + } else { + Some(tx_bytes) + } + }, + Err(_) => { + + // Default to last block datetime which has already been checked by mempool_validate, so accept the tx + Some(tx_bytes) + } + } + }, + None => { + + // Default to last block datetime which has already been checked by mempool_validate, so accept the tx + Some(tx_bytes) + } + } + }, + None => Some(tx_bytes) + } + + + } else { + None + } + } + Err(_) => None, } }) .take_while(|tx_bytes| { @@ -178,6 +250,7 @@ pub(super) mod record { #[cfg(test)] mod test_prepare_proposal { + use borsh::BorshSerialize; use namada::{ proof_of_stake::Epoch, @@ -368,4 +441,58 @@ mod test_prepare_proposal { assert_eq!(received, expected_txs); } } + + /// Test that expired wrapper transactions are not included in the block + #[test] + fn test_expired_wrapper_tx() { + +let (shell, _) = TestShell::new(); + let keypair = gen_keypair(); + let tx_time = DateTimeUtc::now(); +let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper_tx = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrapper = wrapper_tx + .sign(&keypair, shell.chain_id.clone(), Some(tx_time)) + .expect("Test failed"); + +let time = DateTimeUtc::now(); +let mut block_time = namada::core::tendermint_proto::google::protobuf::Timestamp::default(); + block_time.seconds = +time.0.timestamp(); + block_time.nanos = +time.0.timestamp_subsec_nanos() as i32; + let req = RequestPrepareProposal { + txs: vec![wrapper.to_bytes()], + max_tx_bytes: 0, + time: Some(block_time) , + ..Default::default() + }; + #[cfg(feature = "abcipp")] + assert_eq!( + shell.prepare_proposal(req).tx_records, + vec![record::remove(tx.to_bytes())] + ); + #[cfg(not(feature = "abcipp"))] + { + let result = shell.prepare_proposal(req); + eprintln!("Proposal: {:?}", result.txs); + assert!(result.txs.is_empty()); + } + } } From 2fdfbfc0e45bb4527c20bab12e5ed498d7227dc7 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 7 Feb 2023 16:57:45 +0100 Subject: [PATCH 53/58] Clippy + fmt --- apps/src/lib/cli.rs | 7 +- .../lib/node/ledger/shell/prepare_proposal.rs | 120 +++++------ .../lib/node/ledger/shell/process_proposal.rs | 2 +- apps/src/lib/node/ledger/shims/abcipp_shim.rs | 54 ++++- core/src/ledger/storage/mod.rs | 2 +- core/src/proto/mod.rs | 3 +- core/src/types/transaction/decrypted.rs | 7 +- core/src/types/transaction/mod.rs | 3 +- shared/src/ledger/ibc/vp/mod.rs | 198 +++++++++++++----- tests/src/vm_host_env/mod.rs | 34 +-- wasm/wasm_source/src/tx_bond.rs | 2 +- .../src/tx_change_validator_commission.rs | 2 +- wasm/wasm_source/src/tx_unbond.rs | 2 +- wasm/wasm_source/src/tx_withdraw.rs | 2 +- 14 files changed, 282 insertions(+), 156 deletions(-) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index 574969ef89..9540a1606f 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -2926,7 +2926,12 @@ pub mod args { "The maximum amount of gas needed to run transaction", ), ) - .arg(EXPIRATION_OPT.def().about("The expiration datetime of the transaction, after which the tx won't be accepted anymore. All of these examples are equivalent:\n2012-12-12T12:12:12Z\n2012-12-12 12:12:12Z\n2012- 12-12T12: 12:12Z")) + .arg(EXPIRATION_OPT.def().about( + "The expiration datetime of the transaction, after which the \ + tx won't be accepted anymore. All of these examples are \ + equivalent:\n2012-12-12T12:12:12Z\n2012-12-12 \ + 12:12:12Z\n2012- 12-12T12: 12:12Z", + )) .arg( SIGNING_KEY_OPT .def() diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 086e162b3f..0747d0d27d 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -55,50 +55,41 @@ where .into_iter() .map(|tx_bytes| { match Tx::try_from(tx_bytes.as_slice()) { - Ok(tx) => { + Ok(tx) => { let tx_expiration = tx.expiration; if let Ok(TxType::Wrapper(_)) = process_tx(tx) { // Check tx expiration against proposed block match tx_expiration { Some(exp) => { - match req.time { - Some(&block_time) => { + match &req.time { + Some(block_time) => { match TryInto::::try_into(block_time.to_owned()) { Ok(datetime) => { - -if datetime > exp { - record::remove(tx_bytes) - } else { - record::keep(tx_bytes) - } + if datetime > exp { + record::remove(tx_bytes) + } else { + record::keep(tx_bytes) + } }, Err(_) => { - - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx - record::keep(tx_bytes) + // Default to last block datetime which has already been checked by mempool_validate, so accept the tx + record::keep(tx_bytes) } } }, None => { - - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx - record::keep(tx_bytes) + // Default to last block datetime which has already been checked by mempool_validate, so accept the tx + record::keep(tx_bytes) } } }, None => record::keep(tx_bytes) } - - - } else { + } else { record::remove(tx_bytes) } } Err(_) => record::remove(tx_bytes), - - - - } }) .take_while(|tx_record| { @@ -129,21 +120,19 @@ if datetime > exp { Some(block_time) => { match TryInto::::try_into(block_time.to_owned()) { Ok(datetime) => { - -if datetime > exp { None - } else { - Some(tx_bytes) - } + if datetime > exp { + None + } else { + Some(tx_bytes) + } }, Err(_) => { - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx Some(tx_bytes) } } }, None => { - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx Some(tx_bytes) } @@ -151,9 +140,7 @@ if datetime > exp { None }, None => Some(tx_bytes) } - - - } else { + } else { None } } @@ -445,54 +432,53 @@ mod test_prepare_proposal { /// Test that expired wrapper transactions are not included in the block #[test] fn test_expired_wrapper_tx() { - -let (shell, _) = TestShell::new(); + let (shell, _) = TestShell::new(); let keypair = gen_keypair(); let tx_time = DateTimeUtc::now(); -let tx = Tx::new( - "wasm_code".as_bytes().to_owned(), - Some("transaction data".as_bytes().to_owned()), - shell.chain_id.clone(), - None, - ); - let wrapper_tx = WrapperTx::new( - Fee { - amount: 0.into(), - token: shell.wl_storage.storage.native_token.clone(), - }, - &keypair, - 0.into(), - tx, - Default::default(), - #[cfg(not(feature = "mainnet"))] - None, - ); - let wrapper = wrapper_tx - .sign(&keypair, shell.chain_id.clone(), Some(tx_time)) - .expect("Test failed"); + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + shell.chain_id.clone(), + None, + ); + let wrapper_tx = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let wrapper = wrapper_tx + .sign(&keypair, shell.chain_id.clone(), Some(tx_time)) + .expect("Test failed"); -let time = DateTimeUtc::now(); -let mut block_time = namada::core::tendermint_proto::google::protobuf::Timestamp::default(); - block_time.seconds = -time.0.timestamp(); - block_time.nanos = -time.0.timestamp_subsec_nanos() as i32; - let req = RequestPrepareProposal { + let time = DateTimeUtc::now(); + let block_time = + namada::core::tendermint_proto::google::protobuf::Timestamp { + seconds: time.0.timestamp(), + nanos: time.0.timestamp_subsec_nanos() as i32, + }; + let req = RequestPrepareProposal { txs: vec![wrapper.to_bytes()], max_tx_bytes: 0, - time: Some(block_time) , - ..Default::default() + time: Some(block_time), + ..Default::default() }; #[cfg(feature = "abcipp")] assert_eq!( shell.prepare_proposal(req).tx_records, - vec![record::remove(tx.to_bytes())] + vec![record::remove(wrapper.to_bytes())] ); - #[cfg(not(feature = "abcipp"))] + #[cfg(not(feature = "abcipp"))] { let result = shell.prepare_proposal(req); eprintln!("Proposal: {:?}", result.txs); - assert!(result.txs.is_empty()); + assert!(result.txs.is_empty()); } } } diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index fb1cb9c75d..8c8d9b4fea 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1440,7 +1440,7 @@ mod test_process_proposal { }, &keypair, 0.into(), - tx.clone(), + tx, Default::default(), #[cfg(not(feature = "mainnet"))] None, diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index e6d07ce85c..c9a53596f1 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -102,9 +102,37 @@ impl AbcippShim { }), #[cfg(feature = "abcipp")] Req::FinalizeBlock(block) => { + let block_time = match &block.time { + Some(block_time) => { + match block_time.to_owned().try_into() { + Ok(t) => t, + Err(_) => { + // Default to last committed block time + self.service + .wl_storage + .storage + .get_last_block_timestamp() // FIXME: manage error? + .expect( + "Failed to retrieve last block \ + timestamp", + ) + } + } + } + None => { + // Default to last committed block time + self.service + .wl_storage + .storage + .get_last_block_timestamp() // FIXME: manage error? + .expect( + "Failed to retrieve last block timestamp", + ) + } + }; let unprocessed_txs = block.txs.clone(); let processing_results = - self.service.process_txs(&block.txs); + self.service.process_txs(&block.txs, block_time); let mut txs = Vec::with_capacity(unprocessed_txs.len()); for (result, tx) in processing_results .into_iter() @@ -145,18 +173,26 @@ impl AbcippShim { Ok(t) => t, Err(_) => { // Default to last committed block time - self.service.wl_storage - .storage - .get_last_block_timestamp() - .expect("Failed to retrieve last block timestamp") + self.service + .wl_storage + .storage + .get_last_block_timestamp() // FIXME: manage error? + .expect( + "Failed to retrieve last block \ + timestamp", + ) } }, None => { // Default to last committed block time - self.service.wl_storage - .storage - .get_last_block_timestamp() - .expect("Failed to retrieve last block timestamp") + self.service + .wl_storage + .storage + .get_last_block_timestamp() + .expect( + "Failed to retrieve last block \ + timestamp", + ) } }, None => { diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index cc517e54e0..4366f5a523 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -718,7 +718,7 @@ where Ok(self .db .read_block_header(last_block_height)? - .map_or_else(|| DateTimeUtc::now(), |header| header.time)) + .map_or_else(DateTimeUtc::now, |header| header.time)) } /// Initialize a new epoch when the current epoch is finished. Returns diff --git a/core/src/proto/mod.rs b/core/src/proto/mod.rs index cda7c8f1ac..39e18f23b7 100644 --- a/core/src/proto/mod.rs +++ b/core/src/proto/mod.rs @@ -7,10 +7,11 @@ pub use types::{Dkg, Error, Signed, SignedTxData, Tx}; #[cfg(test)] mod tests { + use std::time::SystemTime; + use data_encoding::HEXLOWER; use generated::types::Tx; use prost::Message; - use std::time::SystemTime; use super::*; use crate::types::chain::ChainId; diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index f8c4327cdb..3407179168 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -93,9 +93,10 @@ pub mod decrypted_tx { .try_to_vec() .expect("Encrypting transaction should not fail"), ), - // If undecrytable we cannot extract the ChainId and expiration. - // If instead the tx gets decrypted successfully, the correct - // chain id and expiration are serialized inside the data field + // If undecrytable we cannot extract the ChainId and + // expiration. If instead the tx gets decrypted + // successfully, the correct chain id and + // expiration are serialized inside the data field // of the Tx, while the ones available // in the chain_id and expiration field are just placeholders ChainId(String::new()), diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index b55d33b3ee..b8eba6fa0d 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -246,7 +246,8 @@ pub mod tx_types { vec![], Some(ty.try_to_vec().unwrap()), ChainId(String::new()), /* No need to provide a valid - * ChainId or expiration when casting back from + * ChainId or expiration when + * casting back from * TxType */ None, ) diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index 01fceea7b4..8294b934ac 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -626,9 +626,14 @@ mod tests { let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -745,9 +750,14 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -801,9 +811,14 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -938,9 +953,14 @@ mod tests { ); let ibc = Ibc { ctx }; // this should return true because state has been stored - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1022,9 +1042,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1094,9 +1119,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1152,9 +1182,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1229,9 +1264,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1314,9 +1354,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1396,9 +1441,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1433,9 +1483,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1473,9 +1528,14 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1552,9 +1612,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1638,9 +1703,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1729,9 +1799,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1812,9 +1887,14 @@ mod tests { vp_wasm_cache, ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1902,9 +1982,14 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } #[test] @@ -1948,8 +2033,13 @@ mod tests { ); let ibc = Ibc { ctx }; - assert!(ibc - .validate_tx(tx.data.as_ref().unwrap(), &keys_changed, &verifiers) - .expect("validation failed")); + assert!( + ibc.validate_tx( + tx.data.as_ref().unwrap(), + &keys_changed, + &verifiers + ) + .expect("validation failed") + ); } } diff --git a/tests/src/vm_host_env/mod.rs b/tests/src/vm_host_env/mod.rs index d41c33ccf9..f87a1e1c3b 100644 --- a/tests/src/vm_host_env/mod.rs +++ b/tests/src/vm_host_env/mod.rs @@ -135,11 +135,13 @@ mod tests { // Trying to delete a validity predicate should fail let key = storage::Key::validity_predicate(&test_account); - assert!(panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) - .err() - .map(|a| a.downcast_ref::().cloned().unwrap()) - .unwrap() - .contains("CannotDeleteVp")); + assert!( + panic::catch_unwind(|| { tx::ctx().delete(&key).unwrap() }) + .err() + .map(|a| a.downcast_ref::().cloned().unwrap()) + .unwrap() + .contains("CannotDeleteVp") + ); } #[test] @@ -465,17 +467,21 @@ mod tests { .expect("decoding signed data we just signed") }); assert_eq!(&signed_tx_data.data, data); - assert!(vp::CTX - .verify_tx_signature(&pk, &signed_tx_data.sig) - .unwrap()); + assert!( + vp::CTX + .verify_tx_signature(&pk, &signed_tx_data.sig) + .unwrap() + ); let other_keypair = key::testing::keypair_2(); - assert!(!vp::CTX - .verify_tx_signature( - &other_keypair.ref_to(), - &signed_tx_data.sig - ) - .unwrap()); + assert!( + !vp::CTX + .verify_tx_signature( + &other_keypair.ref_to(), + &signed_tx_data.sig + ) + .unwrap() + ); } } diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 649dccaa2d..8215045d5c 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -100,7 +100,7 @@ mod tests { let tx_code = vec![]; let tx_data = bond.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_change_validator_commission.rs b/wasm/wasm_source/src/tx_change_validator_commission.rs index ef81fcb1b5..30ae263269 100644 --- a/wasm/wasm_source/src/tx_change_validator_commission.rs +++ b/wasm/wasm_source/src/tx_change_validator_commission.rs @@ -79,7 +79,7 @@ mod tests { let tx_code = vec![]; let tx_data = commission_change.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 2cdc11789e..33cf9f56ea 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -122,7 +122,7 @@ mod tests { let tx_code = vec![]; let tx_data = unbond.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index cafdca8f6b..b00661261e 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -155,7 +155,7 @@ mod tests { let tx_code = vec![]; let tx_data = withdraw.try_to_vec().unwrap(); - let tx = Tx::new(tx_code, Some(tx_data), ChainId::default()); + let tx = Tx::new(tx_code, Some(tx_data), ChainId::default(), None); let signed_tx = tx.sign(&key); let tx_data = signed_tx.data.unwrap(); From d73aaec461ab4ae4b4d3a387b04858638df69db4 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Wed, 8 Feb 2023 14:15:27 +0100 Subject: [PATCH 54/58] Refactors block time retrieval --- apps/src/lib/node/ledger/shell/mod.rs | 45 ++++++++--- .../lib/node/ledger/shell/process_proposal.rs | 25 +----- apps/src/lib/node/ledger/shims/abcipp_shim.rs | 79 +++---------------- 3 files changed, 48 insertions(+), 101 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 035adba439..605e69c589 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -51,6 +51,7 @@ use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; use num_traits::{FromPrimitive, ToPrimitive}; +use tendermint_proto::google::protobuf::Timestamp; use thiserror::Error; use tokio::sync::mpsc::UnboundedSender; @@ -393,6 +394,35 @@ where response } + /// Takes the optional tendermint timestamp of the block: if it's Some than converts it to + /// a [`DateTimeUtc`], otherwise retrieve from self the time of the last block committed + pub fn get_block_timestamp( + &self, + tendermint_block_time: Option, + ) -> DateTimeUtc { + match tendermint_block_time { + Some(t) => { + match t.try_into() { + Ok(t) => t, + Err(_) => { + // Default to last committed block time + self.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") + } + } + } + None => { + // Default to last committed block time + self.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") + } + } + } + /// Read the value for a storage key dropping any error pub fn read_storage_key(&self, key: &Key) -> Option where @@ -619,11 +649,7 @@ where // Tx expiration if let Some(exp) = tx.expiration { - let last_block_timestamp = self - .wl_storage - .storage - .get_last_block_timestamp() - .expect("Failed to retrieve last block timestamp"); + let last_block_timestamp = self.get_block_timestamp(None); if last_block_timestamp > exp { response.code = ErrorCodes::ExpiredTx.into(); @@ -638,12 +664,9 @@ where // Tx signature check let tx_type = match process_tx(tx) { Ok(ty) => ty, - Err(e) => { - response.code = match e { - TxError::Deserialization(_) => ErrorCodes::InvalidTx.into(), - _ => ErrorCodes::InvalidSig.into(), - }; - response.log = e.to_string(); + Err(msg) => { + response.code = ErrorCodes::InvalidSig.into(); + response.log = msg.to_string(); return response; } }; diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 8c8d9b4fea..50145e397a 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -36,29 +36,8 @@ where &self, req: RequestProcessProposal, ) -> ProcessProposal { - let block_time = match req.time { - Some(t) => { - match t.try_into() { - Ok(t) => t, - Err(_) => { - // Default to last committed block time - self.wl_storage - .storage - .get_last_block_timestamp() - .expect("Failed to retrieve last block timestamp") - } - } - } - None => { - // Default to last committed block time - self.wl_storage - .storage - .get_last_block_timestamp() - .expect("Failed to retrieve last block timestamp") - } - }; - - let tx_results = self.process_txs(&req.txs, block_time); + let tx_results = + self.process_txs(&req.txs, self.get_block_timestamp(req.time)); ProcessProposal { status: if tx_results.iter().all(|res| { diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index c9a53596f1..f8360f9e92 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -10,6 +10,7 @@ use namada::types::address::Address; use namada::types::hash::Hash; #[cfg(not(feature = "abcipp"))] use namada::types::storage::BlockHash; +use namada::types::time::DateTimeUtc; #[cfg(not(feature = "abcipp"))] use namada::types::transaction::hash_tx; use tokio::sync::mpsc::UnboundedSender; @@ -102,34 +103,9 @@ impl AbcippShim { }), #[cfg(feature = "abcipp")] Req::FinalizeBlock(block) => { - let block_time = match &block.time { - Some(block_time) => { - match block_time.to_owned().try_into() { - Ok(t) => t, - Err(_) => { - // Default to last committed block time - self.service - .wl_storage - .storage - .get_last_block_timestamp() // FIXME: manage error? - .expect( - "Failed to retrieve last block \ - timestamp", - ) - } - } - } - None => { - // Default to last committed block time - self.service - .wl_storage - .storage - .get_last_block_timestamp() // FIXME: manage error? - .expect( - "Failed to retrieve last block timestamp", - ) - } - }; + let block_time = self + .service + .get_block_timestamp_from_tendermint(&block.time); let unprocessed_txs = block.txs.clone(); let processing_results = self.service.process_txs(&block.txs, block_time); @@ -167,45 +143,14 @@ impl AbcippShim { Req::EndBlock(_) => { let begin_block_request = self.begin_block_request.take().unwrap(); - let block_time = match &begin_block_request.header { - Some(header) => match &header.time { - Some(time) => match time.to_owned().try_into() { - Ok(t) => t, - Err(_) => { - // Default to last committed block time - self.service - .wl_storage - .storage - .get_last_block_timestamp() // FIXME: manage error? - .expect( - "Failed to retrieve last block \ - timestamp", - ) - } - }, - None => { - // Default to last committed block time - self.service - .wl_storage - .storage - .get_last_block_timestamp() - .expect( - "Failed to retrieve last block \ - timestamp", - ) - } - }, - None => { - // Default to last committed block time - self.service - .wl_storage - .storage - .get_last_block_timestamp() - .expect( - "Failed to retrieve last block timestamp", - ) - } - }; + let block_time = self.service.get_block_timestamp( + begin_block_request + .header + .as_ref() + .map(|header| header.time.to_owned()) + .flatten(), + ); + let processing_results = self .service .process_txs(&self.delivered_txs, block_time); From 58e601d0ecd1a138d1ca5c2958ddba204a9684e6 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 24 Feb 2023 17:17:12 +0100 Subject: [PATCH 55/58] Refactors `prepare_proposal` tx validation --- apps/src/lib/node/ledger/shell/mod.rs | 27 ++--- .../lib/node/ledger/shell/prepare_proposal.rs | 105 ++++++------------ 2 files changed, 39 insertions(+), 93 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 605e69c589..e08682c20b 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -400,27 +400,16 @@ where &self, tendermint_block_time: Option, ) -> DateTimeUtc { - match tendermint_block_time { - Some(t) => { - match t.try_into() { - Ok(t) => t, - Err(_) => { - // Default to last committed block time - self.wl_storage - .storage - .get_last_block_timestamp() - .expect("Failed to retrieve last block timestamp") - } - } - } - None => { - // Default to last committed block time - self.wl_storage - .storage - .get_last_block_timestamp() - .expect("Failed to retrieve last block timestamp") + if let Some(t) = tendermint_block_time { + if let Ok(t) = t.try_into() { + return t; } } + // Default to last committed block time + self.wl_storage + .storage + .get_last_block_timestamp() + .expect("Failed to retrieve last block timestamp") } /// Read the value for a storage key dropping any error diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 0747d0d27d..c63a752088 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -54,43 +54,10 @@ where .txs .into_iter() .map(|tx_bytes| { - match Tx::try_from(tx_bytes.as_slice()) { - Ok(tx) => { - let tx_expiration = tx.expiration; - if let Ok(TxType::Wrapper(_)) = process_tx(tx) { - // Check tx expiration against proposed block - match tx_expiration { - Some(exp) => { - match &req.time { - Some(block_time) => { - match TryInto::::try_into(block_time.to_owned()) { - Ok(datetime) => { - if datetime > exp { - record::remove(tx_bytes) - } else { - record::keep(tx_bytes) - } - }, - Err(_) => { - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx - record::keep(tx_bytes) - } - } - }, - None => { - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx - record::keep(tx_bytes) - } - } - }, - None => record::keep(tx_bytes) - } - } else { - record::remove(tx_bytes) - } - } - Err(_) => record::remove(tx_bytes), - } + match validate_tx_bytes(&tx_bytes, req.time.clone()) { + Ok(()) => record::keep(tx_bytes), + Err(()) => record::remove(tx_bytes), + } }) .take_while(|tx_record| { let new_size = total_proposal_size + tx_record.tx.len(); @@ -109,43 +76,9 @@ where .txs .into_iter() .filter_map(|tx_bytes| { - match Tx::try_from(tx_bytes.as_slice()) { - Ok(tx) => { - let tx_expiration = tx.expiration; - if let Ok(TxType::Wrapper(_)) = process_tx(tx) { - // Check tx expiration against proposed block - match tx_expiration { - Some(exp) => { - match &req.time { - Some(block_time) => { - match TryInto::::try_into(block_time.to_owned()) { - Ok(datetime) => { - if datetime > exp { - None - } else { - Some(tx_bytes) - } - }, - Err(_) => { - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx - Some(tx_bytes) - } - } - }, - None => { - // Default to last block datetime which has already been checked by mempool_validate, so accept the tx - Some(tx_bytes) - } - } - }, - None => Some(tx_bytes) - } - } else { - None - } - } - Err(_) => None, - } + validate_tx_bytes(&tx_bytes, req.time.clone()) + .ok() + .map(|_| tx_bytes) }) .take_while(|tx_bytes| { let new_size = total_proposal_size + tx_bytes.len(); @@ -202,6 +135,30 @@ where } } +fn validate_tx_bytes( + tx_bytes: &[u8], + block_time: Option, +) -> Result<(), ()> { + let tx = Tx::try_from(tx_bytes).map_err(|_| ())?; + let tx_expiration = tx.expiration; + + if let Ok(TxType::Wrapper(_)) = process_tx(tx) { + // Check tx expiration against proposed block + match (block_time, tx_expiration) { + (Some(block_time), Some(exp)) => { + match TryInto::::try_into(block_time) { + Ok(datetime) if datetime > exp => Err(()), + _ => Ok(()), // If error in conversion, default to last block datetime, it's valid because of mempool check + } + } + // If tx doesn't have an expiration it is valid. If time cannot be retrieved from block default to last block datetime which has already been checked by mempool_validate, so it's valid + _ => Ok(()), + } + } else { + return Err(()); + } +} + /// Functions for creating the appropriate TxRecord given the /// numeric code #[cfg(feature = "abcipp")] From e30bd99a8176345cc2a8a2ba7c0ea1e352422a33 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 24 Feb 2023 17:49:52 +0100 Subject: [PATCH 56/58] Misc adjustments --- apps/src/lib/node/ledger/shell/mod.rs | 32 ++++++----- .../lib/node/ledger/shell/prepare_proposal.rs | 20 ++++--- .../lib/node/ledger/shell/process_proposal.rs | 56 ++++++++----------- apps/src/lib/node/ledger/shims/abcipp_shim.rs | 9 +-- core/src/types/transaction/mod.rs | 1 + tests/src/e2e/ledger_tests.rs | 2 +- 6 files changed, 59 insertions(+), 61 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index e08682c20b..3cef89561c 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -42,16 +42,17 @@ use namada::types::key::*; use namada::types::storage::{BlockHeight, Key, TxIndex}; use namada::types::time::{DateTimeUtc, TimeZone, Utc}; use namada::types::token::{self}; +#[cfg(not(feature = "mainnet"))] +use namada::types::transaction::MIN_FEE; use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, - EllipticCurve, PairingEngine, TxError, TxType, MIN_FEE, + EllipticCurve, PairingEngine, TxType, }; use namada::types::{address, hash}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; use num_traits::{FromPrimitive, ToPrimitive}; -use tendermint_proto::google::protobuf::Timestamp; use thiserror::Error; use tokio::sync::mpsc::UnboundedSender; @@ -62,6 +63,7 @@ use crate::facade::tendermint_proto::abci::{ Misbehavior as Evidence, MisbehaviorType as EvidenceType, ValidatorUpdate, }; use crate::facade::tendermint_proto::crypto::public_key; +use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::facade::tower_abci::{request, response}; use crate::node::ledger::shims::abcipp_shim_types::shim; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; @@ -124,17 +126,17 @@ impl From for TxResult { #[derive(Debug, Clone, FromPrimitive, ToPrimitive, PartialEq)] pub enum ErrorCodes { Ok = 0, - InvalidTx = 1, - InvalidSig = 2, + InvalidDecryptedChainId = 1, + ExpiredDecryptedTx = 2, WasmRuntimeError = 3, - InvalidOrder = 4, - ExtraTxs = 5, - Undecryptable = 6, - ReplayTx = 7, - InvalidChainId = 8, - InvalidDecryptedChainId = 9, - ExpiredTx = 10, - ExpiredDecryptedTx = 11, + InvalidTx = 4, + InvalidSig = 5, + InvalidOrder = 6, + ExtraTxs = 7, + Undecryptable = 8, + ReplayTx = 9, + InvalidChainId = 10, + ExpiredTx = 11, } impl From for u32 { @@ -394,8 +396,9 @@ where response } - /// Takes the optional tendermint timestamp of the block: if it's Some than converts it to - /// a [`DateTimeUtc`], otherwise retrieve from self the time of the last block committed + /// Takes the optional tendermint timestamp of the block: if it's Some than + /// converts it to a [`DateTimeUtc`], otherwise retrieve from self the + /// time of the last block committed pub fn get_block_timestamp( &self, tendermint_block_time: Option, @@ -1183,6 +1186,7 @@ mod test_utils { /// Test the failure cases of [`mempool_validate`] #[cfg(test)] mod test_mempool_validate { + use namada::proof_of_stake::Epoch; use namada::proto::SignedTxData; use namada::types::transaction::{Fee, WrapperTx}; diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index c63a752088..cfe29cd100 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -12,6 +12,7 @@ use super::super::*; use crate::facade::tendermint_proto::abci::RequestPrepareProposal; #[cfg(feature = "abcipp")] use crate::facade::tendermint_proto::abci::{tx_record::TxAction, TxRecord}; +use crate::facade::tendermint_proto::google::protobuf::Timestamp; use crate::node::ledger::shell::{process_tx, ShellMode}; use crate::node::ledger::shims::abcipp_shim_types::shim::TxBytes; @@ -137,7 +138,7 @@ where fn validate_tx_bytes( tx_bytes: &[u8], - block_time: Option, + block_time: Option, ) -> Result<(), ()> { let tx = Tx::try_from(tx_bytes).map_err(|_| ())?; let tx_expiration = tx.expiration; @@ -148,14 +149,18 @@ fn validate_tx_bytes( (Some(block_time), Some(exp)) => { match TryInto::::try_into(block_time) { Ok(datetime) if datetime > exp => Err(()), - _ => Ok(()), // If error in conversion, default to last block datetime, it's valid because of mempool check + _ => Ok(()), /* If error in conversion, default to last + * block datetime, it's valid because of + * mempool check */ } } - // If tx doesn't have an expiration it is valid. If time cannot be retrieved from block default to last block datetime which has already been checked by mempool_validate, so it's valid + // If tx doesn't have an expiration it is valid. If time cannot be + // retrieved from block default to last block datetime which has + // already been checked by mempool_validate, so it's valid _ => Ok(()), } } else { - return Err(()); + Err(()) } } @@ -196,10 +201,8 @@ pub(super) mod record { mod test_prepare_proposal { use borsh::BorshSerialize; - use namada::{ - proof_of_stake::Epoch, - types::transaction::{Fee, WrapperTx}, - }; + use namada::proof_of_stake::Epoch; + use namada::types::transaction::{Fee, WrapperTx}; use super::*; use crate::node::ledger::shell::test_utils::{gen_keypair, TestShell}; @@ -404,6 +407,7 @@ mod test_prepare_proposal { token: shell.wl_storage.storage.native_token.clone(), }, &keypair, + Epoch(0), 0.into(), tx, Default::default(), diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 50145e397a..72a057bed3 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -40,18 +40,10 @@ where self.process_txs(&req.txs, self.get_block_timestamp(req.time)); ProcessProposal { - status: if tx_results.iter().all(|res| { - matches!( - ErrorCodes::from_u32(res.code).unwrap(), - ErrorCodes::Ok - | ErrorCodes::Undecryptable - | ErrorCodes::InvalidDecryptedChainId - | ErrorCodes::ExpiredDecryptedTx - ) - }) { - ProposalStatus::Accept as i32 - } else { + status: if tx_results.iter().any(|res| res.code > 3) { ProposalStatus::Reject as i32 + } else { + ProposalStatus::Accept as i32 }, tx_results, @@ -215,7 +207,7 @@ where code: ErrorCodes::ExpiredDecryptedTx .into(), info: format!( - "Dercrypted tx expired at {:#?}, block time: {:#?}", + "Decrypted tx expired at {:#?}, block time: {:#?}", exp, block_time ), }; @@ -230,9 +222,8 @@ where .into(), } } else { - // Wrong inner tx commitment TxResult { - code: ErrorCodes::Undecryptable.into(), + code: ErrorCodes::InvalidTx.into(), info: "The encrypted payload of tx was \ incorrectly marked as \ un-decryptable" @@ -739,7 +730,7 @@ mod test_process_proposal { ); } - /// Test that a tx incorrectly labelled as undecryptable + /// Test that a block containing a tx incorrectly labelled as undecryptable /// is rejected by [`process_proposal`] #[test] fn test_incorrectly_labelled_as_undecryptable() { @@ -775,23 +766,22 @@ mod test_process_proposal { txs: vec![tx.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::Undecryptable)); - assert_eq!( - response.result.info, - String::from( - "The encrypted payload of tx was incorrectly marked as \ - un-decryptable" - ), - ) + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + String::from( + "The encrypted payload of tx was incorrectly marked \ + as un-decryptable" + ), + ) + } + } } /// Test that a wrapper tx whose inner_tx does not have @@ -1418,6 +1408,7 @@ mod test_process_proposal { token: shell.wl_storage.storage.native_token.clone(), }, &keypair, + Epoch(0), 0.into(), tx, Default::default(), @@ -1468,6 +1459,7 @@ mod test_process_proposal { token: shell.wl_storage.storage.native_token.clone(), }, &keypair, + Epoch(0), 0.into(), tx, Default::default(), diff --git a/apps/src/lib/node/ledger/shims/abcipp_shim.rs b/apps/src/lib/node/ledger/shims/abcipp_shim.rs index f8360f9e92..989806497d 100644 --- a/apps/src/lib/node/ledger/shims/abcipp_shim.rs +++ b/apps/src/lib/node/ledger/shims/abcipp_shim.rs @@ -10,7 +10,6 @@ use namada::types::address::Address; use namada::types::hash::Hash; #[cfg(not(feature = "abcipp"))] use namada::types::storage::BlockHash; -use namada::types::time::DateTimeUtc; #[cfg(not(feature = "abcipp"))] use namada::types::transaction::hash_tx; use tokio::sync::mpsc::UnboundedSender; @@ -103,9 +102,8 @@ impl AbcippShim { }), #[cfg(feature = "abcipp")] Req::FinalizeBlock(block) => { - let block_time = self - .service - .get_block_timestamp_from_tendermint(&block.time); + let block_time = + self.service.get_block_timestamp(block.time.clone()); let unprocessed_txs = block.txs.clone(); let processing_results = self.service.process_txs(&block.txs, block_time); @@ -147,8 +145,7 @@ impl AbcippShim { begin_block_request .header .as_ref() - .map(|header| header.time.to_owned()) - .flatten(), + .and_then(|header| header.time.to_owned()), ); let processing_results = self diff --git a/core/src/types/transaction/mod.rs b/core/src/types/transaction/mod.rs index b8eba6fa0d..dc53125617 100644 --- a/core/src/types/transaction/mod.rs +++ b/core/src/types/transaction/mod.rs @@ -354,6 +354,7 @@ pub mod tx_types { mod test_process_tx { use super::*; use crate::types::address::nam; + use crate::types::storage::Epoch; use crate::types::time::DateTimeUtc; fn gen_keypair() -> common::SecretKey { diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 9437103546..daf643945c 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -1704,7 +1704,7 @@ fn invalid_transactions() -> Result<()> { client.exp_string("Transaction accepted")?; client.exp_string("Transaction applied")?; client.exp_string("Transaction is invalid")?; - client.exp_string(r#""code": "1"#)?; + client.exp_string(r#""code": "4"#)?; client.assert_success(); let mut ledger = bg_ledger.foreground(); From a11a3d1bf7b81c76efdc6c0b076fe55950722535 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 24 Feb 2023 17:19:13 +0000 Subject: [PATCH 57/58] [ci] wasm checksums update --- wasm/checksums.json | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/wasm/checksums.json b/wasm/checksums.json index bbd61b7a29..73da0adf42 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.cd75887e287f0228576f3555a70a8cd7e82587ea336a1b8494f805719e303ba0.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.860609e27cdc77107966e8a8964cc98cf4d8a8f58e145c39824c3215547243a9.wasm", - "tx_ibc.wasm": "tx_ibc.9325c23a937267b53177e1d49b94ad1aa140f29df50e43dbe55b025b8cccf245.wasm", - "tx_init_account.wasm": "tx_init_account.290c2e570e265112a9026e00a5b172750b4a9a463979d2fc9a8ca573f5f345fa.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.5e15f66b9cf2b10d98b3744a0f49137af35237e953ea8a709e90a4ff86c29ebb.wasm", - "tx_init_validator.wasm": "tx_init_validator.0cc9413e5ee774ccbef9fca5869587cf1c067927e4b33cdaf4f336f950cfb49d.wasm", + "tx_bond.wasm": "tx_bond.b9a0da9c86f6a86ec3189600467c057f5fc05fbc976357edfc862a2e489a1f95.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.e3f89b2e0389dcd557d09adb0e4604519fb62b5b5d0295405445ac579d561d45.wasm", + "tx_ibc.wasm": "tx_ibc.dcc58076014465932afdc4169b36e06dfd73541be6202545b5af18491b569bfa.wasm", + "tx_init_account.wasm": "tx_init_account.0319de11929e66e6ae3c6d06695ad3328a1105a1241e07d3410c61cc3ec95667.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.05737688353f5a008cd2bfc0592b1d7df38212b6feed7cac55b9166de5e6fefe.wasm", + "tx_init_validator.wasm": "tx_init_validator.a9e941f7dd226fe4768d9754bfc779e97301fdf002f1abc09b0025320050bcca.wasm", "tx_reveal_pk.wasm": "tx_reveal_pk.6d4a8adf662ba52c44dcd66fca0240e717b7b1d01949a736f41bcb464a746aee.wasm", - "tx_transfer.wasm": "tx_transfer.2f2e492b45b90ca7c32c8748116e62e60ad0885489da4d2356d6269a8128dc61.wasm", - "tx_unbond.wasm": "tx_unbond.119a32741d39f9cba683f8ca9c4ce9356bc1094d22d7e902e43c47f84e1ff144.wasm", - "tx_update_vp.wasm": "tx_update_vp.ccacec9b1afd97144022f1d5ae05c7d12f31ef3a2f08fe57bdbb357d34bc1dbf.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.97882252913e386a82e7e1d7381122856b467cedb36c8abe98d44e15b3ef5bd7.wasm", - "tx_withdraw.wasm": "tx_withdraw.0092f99316e60cda347d262169f6ff7dc85a6aa2af3082121c259e4f98c43497.wasm", - "vp_implicit.wasm": "vp_implicit.d418d65c79f666263d24c5a85d32716b502c31d04c88ce1b63538a7f6237b66c.wasm", - "vp_masp.wasm": "vp_masp.42eabefb4be329d315c28b8ee830e7069b8b11d3793b0d7b59a9541f3f158d3f.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.4ce1d9281ad0286a844fe81b81be19718c97fff8712bf09cd217723be646306c.wasm", - "vp_token.wasm": "vp_token.a8e7cff8c487ee4da2178db7b1b8285a3388bd63490ca4e66ec15b484aa9c982.wasm", - "vp_user.wasm": "vp_user.ebb5f0c15718622a1643b4fce61ae899c662d37857e83172b5fa4bc8c07b3678.wasm", - "vp_validator.wasm": "vp_validator.633839364ce085dc9163ee694c6046ebab6840bf727dc8f4a354b1067814d212.wasm" + "tx_transfer.wasm": "tx_transfer.764f952723f8ba9afd30681d429bde10fba0057904f3a627435495a71b41b2d3.wasm", + "tx_unbond.wasm": "tx_unbond.3e3edeffc468feb86a8d71c7d8509e2e794bf8ee854cd3d88348065e16c1f0bd.wasm", + "tx_update_vp.wasm": "tx_update_vp.f2d056de2f44e5cd4b80afcc084ec9e5bb644073ee12b92aa692c415a3f01b56.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.fb17c4de2b16d6c8896da61e0ff62324b4f710e5bfa16bb4461ae033605deede.wasm", + "tx_withdraw.wasm": "tx_withdraw.aa08f290a6203910614e73b219b4e4b2da17ca085c3ac6c989f4dbd604bb450c.wasm", + "vp_implicit.wasm": "vp_implicit.2fb9f3003b5e37966831c0939bee272a1aebb06dafd51c77608a264b30c429b5.wasm", + "vp_masp.wasm": "vp_masp.0b3d000cbcc22a1011138a915f15acb3122d9445b03e63d2aea8d45beec56db6.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.55766c78656bec0bdf97b65ffa2221865477273d0b3257b12ed1f9d3caade45e.wasm", + "vp_token.wasm": "vp_token.272908823cdc4697951fd16c40becccc12ea625de0ef6c8810264669bc538605.wasm", + "vp_user.wasm": "vp_user.74e4fd3d9fb6c5ff9b03452d1776466931f48d3bb6d63dea71ea4a2dfd21a9d0.wasm", + "vp_validator.wasm": "vp_validator.355e2b24e7508c7c1f7877ce31252aaedc044cc4dbb2d5251fe85dca7c768fc2.wasm" } \ No newline at end of file From 76ad54b53c0170e6f653db13f439a5fedcdb87bd Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Tue, 7 Feb 2023 14:09:26 +0100 Subject: [PATCH 58/58] changelog: add #1123 --- .changelog/unreleased/features/1123-tx-lifetime.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/features/1123-tx-lifetime.md diff --git a/.changelog/unreleased/features/1123-tx-lifetime.md b/.changelog/unreleased/features/1123-tx-lifetime.md new file mode 100644 index 0000000000..44b51be3f0 --- /dev/null +++ b/.changelog/unreleased/features/1123-tx-lifetime.md @@ -0,0 +1,2 @@ +- Adds expiration field to transactions + ([#1123](https://github.com/anoma/namada/pull/1123)) \ No newline at end of file