diff --git a/.changelog/unreleased/improvements/1017-replay-protection-impl.md b/.changelog/unreleased/improvements/1017-replay-protection-impl.md new file mode 100644 index 0000000000..1783a89251 --- /dev/null +++ b/.changelog/unreleased/improvements/1017-replay-protection-impl.md @@ -0,0 +1,2 @@ +- Adds hash-based replay protection + ([#1017](https://github.com/anoma/namada/pull/1017)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/1051-temp-wl-storage.md b/.changelog/unreleased/improvements/1051-temp-wl-storage.md new file mode 100644 index 0000000000..5be4294bd6 --- /dev/null +++ b/.changelog/unreleased/improvements/1051-temp-wl-storage.md @@ -0,0 +1,3 @@ +- Added a TempWlStorage for storage_api::StorageRead/Write + in ABCI++ prepare/process proposal handler. + ([#1051](https://github.com/anoma/namada/pull/1051)) \ No newline at end of file diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 336c4d9415..e625835f43 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -2,8 +2,9 @@ use namada::ledger::pos::namada_proof_of_stake; use namada::ledger::pos::types::into_tm_voting_power; -use namada::ledger::protocol; use namada::ledger::storage_api::StorageRead; +use namada::ledger::{protocol, replay_protection}; +use namada::types::hash; use namada::types::storage::{BlockHash, BlockResults, Header}; use namada::types::token::Amount; @@ -145,17 +146,49 @@ where tx_event["gas_used"] = "0".into(); response.events.push(tx_event); // if the rejected tx was decrypted, remove it - // from the queue of txs to be processed + // from the queue of txs to be processed and remove the hash + // from storage if let TxType::Decrypted(_) = &tx_type { - self.wl_storage.storage.tx_queue.pop(); + let tx_hash = self + .wl_storage + .storage + .tx_queue + .pop() + .expect("Missing wrapper tx in queue") + .tx + .tx_hash; + let tx_hash_key = + replay_protection::get_tx_hash_key(&tx_hash); + self.wl_storage + .storage + .delete(&tx_hash_key) + .expect("Error while deleting tx hash from storage"); } continue; } - let mut tx_event = match &tx_type { + let (mut tx_event, tx_unsigned_hash) = match &tx_type { TxType::Wrapper(wrapper) => { let mut tx_event = Event::new_tx_event(&tx_type, height.0); + // Writes both txs hash to storage + let tx = Tx::try_from(processed_tx.tx.as_ref()).unwrap(); + let wrapper_tx_hash_key = + replay_protection::get_tx_hash_key(&hash::Hash( + tx.unsigned_hash(), + )); + self.wl_storage + .storage + .write(&wrapper_tx_hash_key, vec![]) + .expect("Error while writing tx hash to storage"); + + let inner_tx_hash_key = + replay_protection::get_tx_hash_key(&wrapper.tx_hash); + self.wl_storage + .storage + .write(&inner_tx_hash_key, vec![]) + .expect("Error while writing tx hash to storage"); + #[cfg(not(feature = "mainnet"))] let has_valid_pow = self.invalidate_pow_solution_if_valid(wrapper); @@ -216,11 +249,18 @@ where #[cfg(not(feature = "mainnet"))] has_valid_pow, }); - tx_event + (tx_event, None) } TxType::Decrypted(inner) => { // We remove the corresponding wrapper tx from the queue - self.wl_storage.storage.tx_queue.pop(); + let wrapper_hash = self + .wl_storage + .storage + .tx_queue + .pop() + .expect("Missing wrapper tx in queue") + .tx + .tx_hash; let mut event = Event::new_tx_event(&tx_type, height.0); match inner { @@ -239,8 +279,7 @@ where event["code"] = ErrorCodes::Undecryptable.into(); } } - - event + (event, Some(wrapper_hash)) } TxType::Raw(_) => { tracing::error!( @@ -333,6 +372,25 @@ where msg ); stats.increment_errored_txs(); + + // If transaction type is Decrypted and failed because of + // out of gas, remove its hash from storage to allow + // rewrapping it + if let Some(hash) = tx_unsigned_hash { + if let Error::TxApply(protocol::Error::GasError(namada::ledger::gas::Error::TransactionGasExceededError)) = + msg + { + let tx_hash_key = + replay_protection::get_tx_hash_key(&hash); + self.wl_storage + .storage + .delete(&tx_hash_key) + .expect( + "Error while deleting tx hash key from storage", + ); + } + } + self.wl_storage.drop_tx(); tx_event["gas_used"] = self .gas_meter @@ -691,7 +749,7 @@ mod test_finalize_block { let mut processed_txs = vec![]; let mut valid_txs = vec![]; - // Add unshielded balance for fee paymenty + // Add unshielded balance for fee payment let balance_key = token::balance_key( &shell.wl_storage.storage.native_token, &Address::from(&keypair.ref_to()), @@ -915,4 +973,79 @@ mod test_finalize_block { last_storage_state = store_block_state(&shell); } } + + /// Test that if a decrypted transaction fails because of out-of-gas, its + /// hash is removed from storage to allow rewrapping it + #[test] + fn test_remove_tx_hash() { + let (mut shell, _) = setup(); + let keypair = gen_keypair(); + + let mut wasm_path = top_level_directory(); + wasm_path.push("wasm_for_tests/tx_no_op.wasm"); + let tx_code = std::fs::read(wasm_path) + .expect("Expected a file at given code path"); + let raw_tx = Tx::new( + tx_code, + Some("Encrypted transaction data".as_bytes().to_owned()), + ); + let wrapper_tx = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + raw_tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + + // Write inner hash in storage + let inner_hash_key = + replay_protection::get_tx_hash_key(&wrapper_tx.tx_hash); + shell + .wl_storage + .storage + .write(&inner_hash_key, vec![]) + .expect("Test failed"); + + let processed_tx = ProcessedTx { + tx: Tx::from(TxType::Decrypted(DecryptedTx::Decrypted { + tx: raw_tx, + #[cfg(not(feature = "mainnet"))] + has_valid_pow: false, + })) + .to_bytes(), + result: TxResult { + code: ErrorCodes::Ok.into(), + info: "".into(), + }, + }; + shell.enqueue_tx(wrapper_tx); + + let _event = &shell + .finalize_block(FinalizeBlock { + txs: vec![processed_tx], + ..Default::default() + }) + .expect("Test failed")[0]; + + // FIXME: uncomment when proper gas metering is in place + // // Check inner tx hash has been removed from storage + // assert_eq!(event.event_type.to_string(), String::from("applied")); + // let code = event.attributes.get("code").expect("Test + // failed").as_str(); assert_eq!(code, + // String::from(ErrorCodes::WasmRuntimeError).as_str()); + + // assert!( + // !shell + // .storage + // .has_key(&inner_hash_key) + // .expect("Test failed") + // .0 + // ) + } } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index bd95285a08..97771527e1 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -32,10 +32,9 @@ use namada::ledger::storage::{ DBIter, Sha256Hasher, Storage, StorageHasher, WlStorage, DB, }; use namada::ledger::storage_api::{self, StorageRead}; -use namada::ledger::{ibc, pos, protocol}; +use namada::ledger::{ibc, pos, protocol, replay_protection}; use namada::proof_of_stake::{self, read_pos_params, slash}; use namada::proto::{self, Tx}; -use namada::types::address; use namada::types::address::{masp, masp_tx_key, Address}; use namada::types::chain::ChainId; use namada::types::internal::WrapperTxInQueue; @@ -47,6 +46,7 @@ use namada::types::transaction::{ hash_tx, process_tx, verify_decrypted_correctly, AffineCurve, DecryptedTx, EllipticCurve, PairingEngine, TxType, MIN_FEE, }; +use namada::types::{address, hash}; use namada::vm::wasm::{TxCache, VpCache}; use namada::vm::WasmCacheRwAccess; use num_derive::{FromPrimitive, ToPrimitive}; @@ -129,6 +129,7 @@ pub enum ErrorCodes { InvalidOrder = 4, ExtraTxs = 5, Undecryptable = 6, + ReplayTx = 7, } impl From for u32 { @@ -578,49 +579,113 @@ where /// Validate a transaction request. On success, the transaction will /// included in the mempool and propagated to peers, otherwise it will be /// rejected. + /// + /// Error codes: + /// 0: Ok + /// 1: Invalid tx + /// 2: Tx is invalidly signed + /// 7: Replay attack pub fn mempool_validate( &self, tx_bytes: &[u8], r#_type: MempoolTxType, ) -> response::CheckTx { let mut response = response::CheckTx::default(); - match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { - Ok(tx) => { - // Check balance for fee - if let Ok(TxType::Wrapper(wrapper)) = process_tx(tx) { - let fee_payer = if wrapper.pk != masp_tx_key().ref_to() { - wrapper.fee_payer() - } else { - masp() - }; - // check that the fee payer has sufficient balance - let balance = - self.get_balance(&wrapper.fee.token, &fee_payer); - - // In testnets with a faucet, tx is allowed to skip fees if - // it includes a valid PoW - #[cfg(not(feature = "mainnet"))] - let has_valid_pow = self.has_valid_pow_solution(&wrapper); - #[cfg(feature = "mainnet")] - let has_valid_pow = false; - - if !has_valid_pow && self.get_wrapper_tx_fees() > balance { - response.code = 1; - response.log = String::from( - "The address given does not have sufficient \ - balance to pay fee", - ); - return response; - } - } - response.log = String::from("Mempool validation passed"); + // Tx format check + let tx = match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { + Ok(t) => t, + Err(msg) => { + response.code = ErrorCodes::InvalidTx.into(); + response.log = msg.to_string(); + return response; } + }; + + // Tx signature check + let tx_type = match process_tx(tx) { + Ok(ty) => ty, Err(msg) => { - response.code = 1; + response.code = ErrorCodes::InvalidSig.into(); response.log = msg.to_string(); + return response; } + }; + + // Tx type check + if let TxType::Wrapper(wrapper) = tx_type { + // Replay protection check + let inner_hash_key = + replay_protection::get_tx_hash_key(&wrapper.tx_hash); + if self + .wl_storage + .storage + .has_key(&inner_hash_key) + .expect("Error while checking inner tx hash key in storage") + .0 + { + response.code = ErrorCodes::ReplayTx.into(); + response.log = format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + wrapper.tx_hash + ); + return response; + } + + let tx = + Tx::try_from(tx_bytes).expect("Deserialization shouldn't fail"); + let wrapper_hash = hash::Hash(tx.unsigned_hash()); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + if self + .wl_storage + .storage + .has_key(&wrapper_hash_key) + .expect("Error while checking wrapper tx hash key in storage") + .0 + { + response.code = ErrorCodes::ReplayTx.into(); + response.log = format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ); + return response; + } + + // Check balance for fee + let fee_payer = if wrapper.pk != masp_tx_key().ref_to() { + wrapper.fee_payer() + } else { + masp() + }; + // check that the fee payer has sufficient balance + let balance = self.get_balance(&wrapper.fee.token, &fee_payer); + + // In testnets with a faucet, tx is allowed to skip fees if + // it includes a valid PoW + #[cfg(not(feature = "mainnet"))] + let has_valid_pow = self.has_valid_pow_solution(&wrapper); + #[cfg(feature = "mainnet")] + let has_valid_pow = false; + + if !has_valid_pow && self.get_wrapper_tx_fees() > balance { + response.code = ErrorCodes::InvalidTx.into(); + response.log = String::from( + "The given address does not have a sufficient balance to \ + pay fee", + ); + return response; + } + } else { + response.code = ErrorCodes::InvalidTx.into(); + response.log = "Unsupported tx type".to_string(); + return response; } + + response.log = "Mempool validation passed".to_string(); + response } @@ -1069,3 +1134,270 @@ mod test_utils { assert!(!shell.wl_storage.storage.tx_queue.is_empty()); } } + +/// Test the failure cases of [`mempool_validate`] +#[cfg(test)] +mod test_mempool_validate { + use namada::proto::SignedTxData; + use namada::types::storage::Epoch; + use namada::types::transaction::{Fee, WrapperTx}; + + use super::test_utils::TestShell; + use super::{MempoolTxType, *}; + + /// Mempool validation must reject unsigned wrappers + #[test] + fn test_missing_signature() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + + let mut wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ) + .sign(&keypair) + .expect("Wrapper signing failed"); + + let unsigned_wrapper = if let Some(Ok(SignedTxData { + data: Some(data), + sig: _, + })) = wrapper + .data + .take() + .map(|data| SignedTxData::try_from_slice(&data[..])) + { + Tx::new(vec![], Some(data)) + } else { + panic!("Test failed") + }; + + let mut result = shell.mempool_validate( + unsigned_wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + result = shell.mempool_validate( + unsigned_wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + } + + /// Mempool validation must reject wrappers with an invalid signature + #[test] + fn test_invalid_signature() { + let (shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + + let mut wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ) + .sign(&keypair) + .expect("Wrapper signing failed"); + + let invalid_wrapper = if let Some(Ok(SignedTxData { + data: Some(data), + sig, + })) = wrapper + .data + .take() + .map(|data| SignedTxData::try_from_slice(&data[..])) + { + let mut new_wrapper = if let TxType::Wrapper(wrapper) = + ::deserialize(&mut data.as_ref()) + .expect("Test failed") + { + wrapper + } else { + panic!("Test failed") + }; + + // we mount a malleability attack to try and remove the fee + new_wrapper.fee.amount = 0.into(); + let new_data = TxType::Wrapper(new_wrapper) + .try_to_vec() + .expect("Test failed"); + Tx::new( + vec![], + Some( + SignedTxData { + sig, + data: Some(new_data), + } + .try_to_vec() + .expect("Test failed"), + ), + ) + } else { + panic!("Test failed"); + }; + + let mut result = shell.mempool_validate( + invalid_wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + result = shell.mempool_validate( + invalid_wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidSig)); + } + + /// Mempool validation must reject non-wrapper txs + #[test] + fn test_wrong_tx_type() { + let (shell, _) = TestShell::new(); + + // Test Raw TxType + let tx = Tx::new("wasm_code".as_bytes().to_owned(), None); + + let result = shell.mempool_validate( + tx.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::InvalidTx)); + assert_eq!(result.log, "Unsupported tx type") + } + + /// Mempool validation must reject already applied wrapper and decrypted + /// transactions + #[test] + fn test_replay_attack() { + let (mut shell, _) = TestShell::new(); + + let keypair = super::test_utils::gen_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + + let wrapper = WrapperTx::new( + Fee { + amount: 100.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ) + .sign(&keypair) + .expect("Wrapper signing failed"); + + let tx_type = match process_tx(wrapper.clone()).expect("Test failed") { + TxType::Wrapper(t) => t, + _ => panic!("Test failed"), + }; + + // Write wrapper hash to storage + let wrapper_hash = hash::Hash(wrapper.unsigned_hash()); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + shell + .wl_storage + .storage + .write(&wrapper_hash_key, &wrapper_hash) + .expect("Test failed"); + + // Try wrapper tx replay attack + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ) + ); + + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Wrapper transaction hash {} already in storage, replay \ + attempt", + wrapper_hash + ) + ); + + // Write inner hash in storage + let inner_hash_key = + replay_protection::get_tx_hash_key(&tx_type.tx_hash); + shell + .wl_storage + .storage + .write(&inner_hash_key, &tx_type.tx_hash) + .expect("Test failed"); + + // Try inner tx replay attack + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::NewTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Inner transaction hash {} already in storage, replay attempt", + tx_type.tx_hash + ) + ); + + let result = shell.mempool_validate( + wrapper.to_bytes().as_ref(), + MempoolTxType::RecheckTransaction, + ); + assert_eq!(result.code, u32::from(ErrorCodes::ReplayTx)); + assert_eq!( + result.log, + format!( + "Inner transaction hash {} already in storage, replay attempt", + tx_type.tx_hash + ) + ) + } +} diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index 11deec9e13..5a3a2ebc59 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -1,6 +1,8 @@ //! Implementation of the ['VerifyHeader`], [`ProcessProposal`], //! and [`RevertProposal`] ABCI++ methods for the Shell +use namada::core::types::hash::Hash; +use namada::ledger::storage::TempWlStorage; use namada::types::internal::WrapperTxInQueue; use super::*; @@ -21,10 +23,15 @@ where Default::default() } - /// Check all the txs in a block. Some txs may be incorrect, - /// but we only reject the entire block if the order of the - /// included txs violates the order decided upon in the previous - /// block. + /// Check all the txs in a block. + /// We reject the entire block when: + /// - decrypted txs violate the committed order + /// - more decrypted txs than expected + /// - checks on wrapper tx fail + /// + /// We cannot reject the block for failed checks on the decrypted txs since + /// their order has already been committed in storage, so we simply discard + /// the single invalid inner tx pub fn process_proposal( &self, req: RequestProcessProposal, @@ -32,11 +39,17 @@ where let tx_results = self.process_txs(&req.txs); ProcessProposal { - status: if tx_results.iter().any(|res| res.code > 3) { - ProposalStatus::Reject as i32 - } else { + status: if tx_results.iter().all(|res| { + matches!( + ErrorCodes::from_u32(res.code).unwrap(), + ErrorCodes::Ok | ErrorCodes::Undecryptable + ) + }) { ProposalStatus::Accept as i32 + } else { + ProposalStatus::Reject as i32 }, + tx_results, } } @@ -44,9 +57,22 @@ where /// Check all the given txs. pub fn process_txs(&self, txs: &[Vec]) -> Vec { let mut tx_queue_iter = self.wl_storage.storage.tx_queue.iter(); + let mut temp_wl_storage = TempWlStorage::new(&self.wl_storage.storage); txs.iter() .map(|tx_bytes| { - self.process_single_tx(tx_bytes, &mut tx_queue_iter) + let result = self.process_single_tx( + tx_bytes, + &mut tx_queue_iter, + &mut temp_wl_storage, + ); + if let ErrorCodes::Ok = + ErrorCodes::from_u32(result.code).unwrap() + { + temp_wl_storage.write_log.commit_tx(); + } else { + temp_wl_storage.write_log.drop_tx(); + } + result }) .collect() } @@ -64,7 +90,9 @@ where /// 2: Tx is invalidly signed /// 3: Wasm runtime error /// 4: Invalid order of decrypted txs - /// 5. More decrypted txs than expected + /// 5: More decrypted txs than expected + /// 6: Undecryptable inner tx + /// 7: Replay attack /// /// INVARIANT: Any changes applied in this method must be reverted if the /// proposal is rejected (unless we can simply overwrite them in the @@ -73,6 +101,7 @@ where &self, tx_bytes: &[u8], tx_queue_iter: &mut impl Iterator, + temp_wl_storage: &mut TempWlStorage, ) -> TxResult { let tx = match Tx::try_from(tx_bytes) { Ok(tx) => tx, @@ -107,45 +136,46 @@ where is coming soon to a blockchain near you. Patience." .into(), }, - TxType::Decrypted(tx) => match tx_queue_iter.next() { - Some(WrapperTxInQueue { - tx: wrapper, - #[cfg(not(feature = "mainnet"))] - has_valid_pow: _, - }) => { - if wrapper.tx_hash != tx.hash_commitment() { - TxResult { - code: ErrorCodes::InvalidOrder.into(), - info: "Process proposal rejected a decrypted \ - transaction that violated the tx order \ - determined in the previous block" - .into(), - } - } else if verify_decrypted_correctly(&tx, privkey) { - TxResult { - code: ErrorCodes::Ok.into(), - info: "Process Proposal accepted this \ - transaction" - .into(), - } - } else { - TxResult { - code: ErrorCodes::InvalidTx.into(), - info: "The encrypted payload of tx was \ - incorrectly marked as un-decryptable" - .into(), + TxType::Decrypted(tx) => { + match tx_queue_iter.next() { + Some(wrapper) => { + if wrapper.tx.tx_hash != tx.hash_commitment() { + TxResult { + code: ErrorCodes::InvalidOrder.into(), + info: "Process proposal rejected a \ + decrypted transaction that \ + violated the tx order determined \ + in the previous block" + .into(), + } + } else if verify_decrypted_correctly(&tx, privkey) { + TxResult { + code: ErrorCodes::Ok.into(), + info: "Process Proposal accepted this \ + transaction" + .into(), + } + } else { + // Wrong inner tx commitment + TxResult { + code: ErrorCodes::Undecryptable.into(), + info: "The encrypted payload of tx was \ + incorrectly marked as \ + un-decryptable" + .into(), + } } } + None => TxResult { + code: ErrorCodes::ExtraTxs.into(), + info: "Received more decrypted txs than expected" + .into(), + }, } - None => TxResult { - code: ErrorCodes::ExtraTxs.into(), - info: "Received more decrypted txs than expected" - .into(), - }, - }, - TxType::Wrapper(tx) => { + } + TxType::Wrapper(wrapper) => { // validate the ciphertext via Ferveo - if !tx.validate_ciphertext() { + if !wrapper.validate_ciphertext() { TxResult { code: ErrorCodes::InvalidTx.into(), info: format!( @@ -155,23 +185,78 @@ where ), } } else { + // Replay protection checks + let inner_hash_key = replay_protection::get_tx_hash_key( + &wrapper.tx_hash, + ); + if temp_wl_storage.has_key(&inner_hash_key).expect( + "Error while checking inner tx hash key in storage", + ) { + return TxResult { + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Inner transaction hash {} already in \ + storage, replay attempt", + &wrapper.tx_hash + ), + }; + } + + // Write inner hash to WAL + temp_wl_storage + .write_log + .write(&inner_hash_key, vec![]) + .expect( + "Couldn't write inner transaction hash to \ + write log", + ); + + let tx = Tx::try_from(tx_bytes) + .expect("Deserialization shouldn't fail"); + let wrapper_hash = Hash(tx.unsigned_hash()); + let wrapper_hash_key = + replay_protection::get_tx_hash_key(&wrapper_hash); + if temp_wl_storage.has_key(&wrapper_hash_key).expect( + "Error while checking wrapper tx hash key in \ + storage", + ) { + return TxResult { + code: ErrorCodes::ReplayTx.into(), + info: format!( + "Wrapper transaction hash {} already in \ + storage, replay attempt", + wrapper_hash + ), + }; + } + + // Write wrapper hash to WAL + temp_wl_storage + .write_log + .write(&wrapper_hash_key, vec![]) + .expect( + "Couldn't write wrapper tx hash to write log", + ); + // If the public key corresponds to the MASP sentinel // transaction key, then the fee payer is effectively // the MASP, otherwise derive // they payer from public key. - let fee_payer = if tx.pk != masp_tx_key().ref_to() { - tx.fee_payer() + let fee_payer = if wrapper.pk != masp_tx_key().ref_to() + { + wrapper.fee_payer() } else { masp() }; // check that the fee payer has sufficient balance let balance = - self.get_balance(&tx.fee.token, &fee_payer); + self.get_balance(&wrapper.fee.token, &fee_payer); // In testnets, tx is allowed to skip fees if it // includes a valid PoW #[cfg(not(feature = "mainnet"))] - let has_valid_pow = self.has_valid_pow_solution(&tx); + let has_valid_pow = + self.has_valid_pow_solution(&wrapper); #[cfg(feature = "mainnet")] let has_valid_pow = false; @@ -187,7 +272,7 @@ where } else { TxResult { code: ErrorCodes::InvalidTx.into(), - info: "The address given does not have \ + info: "The given address does not have a \ sufficient balance to pay fee" .into(), } @@ -226,7 +311,7 @@ mod test_process_proposal { gen_keypair, ProcessProposal, TestError, TestShell, }; - /// Test that if a wrapper tx is not signed, it is rejected + /// Test that if a wrapper tx is not signed, the block is rejected /// by [`process_proposal`]. #[test] fn test_unsigned_wrapper_rejected() { @@ -259,23 +344,23 @@ mod test_process_proposal { txs: vec![tx.clone()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); - assert_eq!( - response.result.info, - String::from("Wrapper transactions must be signed") - ); + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidSig) + ); + assert_eq!( + response[0].result.info, + String::from("Wrapper transactions must be signed") + ); + } + } } - /// Test that a wrapper tx with invalid signature is rejected + /// Test that a block including a wrapper tx with invalid signature is + /// rejected #[test] fn test_wrapper_bad_signature_rejected() { let (mut shell, _) = TestShell::new(); @@ -340,27 +425,28 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![new_tx.to_bytes()], }; - let response = if let [response] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - response.clone() - } else { - panic!("Test failed") - }; - let expected_error = "Signature verification failed: Invalid signature"; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidSig)); - assert!( - response.result.info.contains(expected_error), - "Result info {} doesn't contain the expected error {}", - response.result.info, - expected_error - ); + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + let expected_error = + "Signature verification failed: Invalid signature"; + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidSig) + ); + assert!( + response[0].result.info.contains(expected_error), + "Result info {} doesn't contain the expected error {}", + response[0].result.info, + expected_error + ); + } + } } /// Test that if the account submitting the tx is not known and the fee is - /// non-zero, [`process_proposal`] rejects that tx + /// non-zero, [`process_proposal`] rejects that block #[test] fn test_wrapper_unknown_address() { let (mut shell, _) = TestShell::new(); @@ -387,26 +473,27 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![wrapper.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, - "The address given does not have sufficient balance to pay fee" - .to_string(), - ); + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + "The given address does not have a sufficient balance to \ + pay fee" + .to_string(), + ); + } + } } /// Test that if the account submitting the tx does /// not have sufficient balance to pay the fee, - /// [`process_proposal`] rejects that tx + /// [`process_proposal`] rejects the entire block #[test] fn test_wrapper_insufficient_balance_address() { let (mut shell, _) = TestShell::new(); @@ -446,22 +533,22 @@ mod test_process_proposal { txs: vec![wrapper.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") - }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, - String::from( - "The address given does not have sufficient balance to pay fee" - ) - ); + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + String::from( + "The given address does not have a sufficient balance \ + to pay fee" + ) + ); + } + } } /// Test that if the expected order of decrypted txs is @@ -577,7 +664,7 @@ mod test_process_proposal { } else { panic!("Test failed") }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); + assert_eq!(response.result.code, u32::from(ErrorCodes::Undecryptable)); assert_eq!( response.result.info, String::from( @@ -734,7 +821,7 @@ mod test_process_proposal { ); } - /// Process Proposal should reject a RawTx, but not panic + /// Process Proposal should reject a block containing a RawTx, but not panic #[test] fn test_raw_tx_rejected() { let (mut shell, _) = TestShell::new(); @@ -747,22 +834,295 @@ mod test_process_proposal { let request = ProcessProposal { txs: vec![tx.to_bytes()], }; - let response = if let [resp] = shell - .process_proposal(request) - .expect("Test failed") - .as_slice() - { - resp.clone() - } else { - panic!("Test failed") + + match shell.process_proposal(request) { + Ok(_) => panic!("Test failes"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::InvalidTx) + ); + assert_eq!( + response[0].result.info, + String::from( + "Transaction rejected: Non-encrypted transactions are \ + not supported" + ), + ); + } + } + } + + /// Test that if the unsigned wrapper tx hash is known (replay attack), the + /// block is rejected + #[test] + fn test_wrapper_tx_hash() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + // Write wrapper hash to storage + let wrapper_unsigned_hash = Hash(signed.unsigned_hash()); + let hash_key = + replay_protection::get_tx_hash_key(&wrapper_unsigned_hash); + shell + .wl_storage + .storage + .write(&hash_key, vec![]) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], }; - assert_eq!(response.result.code, u32::from(ErrorCodes::InvalidTx)); - assert_eq!( - response.result.info, - String::from( - "Transaction rejected: Non-encrypted transactions are not \ - supported" - ), + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[0].result.info, + format!( + "Wrapper transaction hash {} already in storage, \ + replay attempt", + wrapper_unsigned_hash + ) + ); + } + } + } + + /// Test that a block containing two identical wrapper txs is rejected + #[test] + fn test_wrapper_tx_hash_same_block() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) + .unwrap(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(); 2], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!(response[0].result.code, u32::from(ErrorCodes::Ok)); + assert_eq!( + response[1].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + // The checks happens on the inner hash first, so the tx is + // rejected because of this hash, not the + // wrapper one + assert_eq!( + response[1].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + wrapper.tx_hash + ) + ); + } + } + } + + /// Test that if the unsigned inner tx hash is known (replay attack), the + /// block is rejected + #[test] + fn test_inner_tx_hash() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let inner_unsigned_hash = wrapper.tx_hash.clone(); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + // Write inner hash to storage + let hash_key = replay_protection::get_tx_hash_key(&inner_unsigned_hash); + shell + .wl_storage + .storage + .write(&hash_key, vec![]) + .expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!( + response[0].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[0].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + inner_unsigned_hash + ) + ); + } + } + } + + /// Test that a block containing two identical inner transactions is + /// rejected + #[test] + fn test_inner_tx_hash_same_block() { + let (mut shell, _) = TestShell::new(); + + let keypair = crate::wallet::defaults::daewon_keypair(); + let keypair_2 = crate::wallet::defaults::daewon_keypair(); + + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair.ref_to()), + ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) + .unwrap(); + + // Add unshielded balance for fee payment + let balance_key = token::balance_key( + &shell.wl_storage.storage.native_token, + &Address::from(&keypair_2.ref_to()), ); + shell + .wl_storage + .storage + .write(&balance_key, Amount::whole(1000).try_to_vec().unwrap()) + .unwrap(); + + let tx = Tx::new( + "wasm_code".as_bytes().to_owned(), + Some("transaction data".as_bytes().to_owned()), + ); + let wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair, + Epoch(0), + 0.into(), + tx.clone(), + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let inner_unsigned_hash = wrapper.tx_hash.clone(); + let signed = wrapper.sign(&keypair).expect("Test failed"); + + let new_wrapper = WrapperTx::new( + Fee { + amount: 0.into(), + token: shell.wl_storage.storage.native_token.clone(), + }, + &keypair_2, + Epoch(0), + 0.into(), + tx, + Default::default(), + #[cfg(not(feature = "mainnet"))] + None, + ); + let new_signed = new_wrapper.sign(&keypair).expect("Test failed"); + + // Run validation + let request = ProcessProposal { + txs: vec![signed.to_bytes(), new_signed.to_bytes()], + }; + match shell.process_proposal(request) { + Ok(_) => panic!("Test failed"), + Err(TestError::RejectProposal(response)) => { + assert_eq!(response[0].result.code, u32::from(ErrorCodes::Ok)); + assert_eq!( + response[1].result.code, + u32::from(ErrorCodes::ReplayTx) + ); + assert_eq!( + response[1].result.info, + format!( + "Inner transaction hash {} already in storage, replay \ + attempt", + inner_unsigned_hash + ) + ); + } + } } } diff --git a/core/src/ledger/mod.rs b/core/src/ledger/mod.rs index 31879e9b99..f472c5e321 100644 --- a/core/src/ledger/mod.rs +++ b/core/src/ledger/mod.rs @@ -5,6 +5,7 @@ pub mod governance; #[cfg(any(feature = "abciplus", feature = "abcipp"))] pub mod ibc; pub mod parameters; +pub mod replay_protection; pub mod slash_fund; pub mod storage; pub mod storage_api; diff --git a/core/src/ledger/replay_protection.rs b/core/src/ledger/replay_protection.rs new file mode 100644 index 0000000000..cee54ef06f --- /dev/null +++ b/core/src/ledger/replay_protection.rs @@ -0,0 +1,21 @@ +//! Replay protection storage + +use crate::types::address::{Address, InternalAddress}; +use crate::types::hash::Hash; +use crate::types::storage::{DbKeySeg, Key, KeySeg}; + +/// Internal replay protection address +pub const ADDRESS: Address = + Address::Internal(InternalAddress::ReplayProtection); + +/// Check if a key is a replay protection key +pub fn is_tx_hash_key(key: &Key) -> bool { + matches!(&key.segments[0], DbKeySeg::AddressSeg(addr) if addr == &ADDRESS) +} + +/// Get the transaction hash key +pub fn get_tx_hash_key(hash: &Hash) -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&hash.to_string()) + .expect("Cannot obtain a valid db key") +} diff --git a/core/src/ledger/storage/masp_conversions.rs b/core/src/ledger/storage/masp_conversions.rs index 0834d7bb53..3945ba936a 100644 --- a/core/src/ledger/storage/masp_conversions.rs +++ b/core/src/ledger/storage/masp_conversions.rs @@ -29,8 +29,8 @@ pub fn update_allowed_conversions( wl_storage: &mut super::WlStorage, ) -> crate::ledger::storage_api::Result<()> where - D: super::DB + for<'iter> super::DBIter<'iter>, - H: super::StorageHasher, + D: 'static + super::DB + for<'iter> super::DBIter<'iter>, + H: 'static + super::StorageHasher, { use masp_primitives::ff::PrimeField; use masp_primitives::transaction::components::Amount as MaspAmount; diff --git a/core/src/ledger/storage/mod.rs b/core/src/ledger/storage/mod.rs index bafd4e09a6..bed23af0ab 100644 --- a/core/src/ledger/storage/mod.rs +++ b/core/src/ledger/storage/mod.rs @@ -21,7 +21,7 @@ pub use merkle_tree::{ use thiserror::Error; pub use traits::{Sha256Hasher, StorageHasher}; pub use wl_storage::{ - iter_prefix_post, iter_prefix_pre, PrefixIter, WlStorage, + iter_prefix_post, iter_prefix_pre, PrefixIter, TempWlStorage, WlStorage, }; #[cfg(feature = "wasm-runtime")] diff --git a/core/src/ledger/storage/wl_storage.rs b/core/src/ledger/storage/wl_storage.rs index ff6f454162..e5aebd2dc7 100644 --- a/core/src/ledger/storage/wl_storage.rs +++ b/core/src/ledger/storage/wl_storage.rs @@ -25,10 +25,101 @@ where pub storage: Storage, } +/// Temporary storage that can be used for changes that will never be committed +/// to the DB. This is useful for the shell `PrepareProposal` and +/// `ProcessProposal` handlers that should not change state, but need to apply +/// storage changes for replay protection to validate the proposal. +#[derive(Debug)] +pub struct TempWlStorage<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Write log + pub write_log: WriteLog, + /// Storage provides access to DB + pub storage: &'a Storage, +} + +impl<'a, D, H> TempWlStorage<'a, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + /// Create a temp storage that can mutated in memory, but never committed to + /// DB. + pub fn new(storage: &'a Storage) -> Self { + Self { + write_log: WriteLog::default(), + storage, + } + } +} + +/// Common trait for [`WlStorage`] and [`TempWlStorage`], used to implement +/// storage_api traits. +trait WriteLogAndStorage { + // DB type + type D: DB + for<'iter> DBIter<'iter>; + // DB hasher type + type H: StorageHasher; + + /// Borrow `WriteLog` + fn write_log(&self) -> &WriteLog; + + /// Borrow mutable `WriteLog` + fn write_log_mut(&mut self) -> &mut WriteLog; + + /// Borrow `Storage` + fn storage(&self) -> &Storage; +} + +impl WriteLogAndStorage for WlStorage +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn storage(&self) -> &Storage { + &self.storage + } +} + +impl WriteLogAndStorage for TempWlStorage<'_, D, H> +where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, +{ + type D = D; + type H = H; + + fn write_log(&self) -> &WriteLog { + &self.write_log + } + + fn write_log_mut(&mut self) -> &mut WriteLog { + &mut self.write_log + } + + fn storage(&self) -> &Storage { + self.storage + } +} + impl WlStorage where D: 'static + DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + H: 'static + StorageHasher, { /// Combine storage with write-log pub fn new(write_log: WriteLog, storage: Storage) -> Self { @@ -231,10 +322,11 @@ where } } -impl StorageRead for WlStorage +impl StorageRead for T where - D: DB + for<'iter> DBIter<'iter>, - H: StorageHasher, + T: WriteLogAndStorage, + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, { type PrefixIter<'iter> = PrefixIter<'iter, D> where Self: 'iter; @@ -243,7 +335,7 @@ where key: &storage::Key, ) -> storage_api::Result>> { // try to read from the write log first - let (log_val, _gas) = self.write_log.read(key); + let (log_val, _gas) = self.write_log().read(key); match log_val { Some(&write_log::StorageModification::Write { ref value }) => { Ok(Some(value.clone())) @@ -258,14 +350,17 @@ where } None => { // when not found in write log, try to read from the storage - self.storage.db.read_subspace_val(key).into_storage_result() + self.storage() + .db + .read_subspace_val(key) + .into_storage_result() } } } fn has_key(&self, key: &storage::Key) -> storage_api::Result { // try to read from the write log first - let (log_val, _gas) = self.write_log.read(key); + let (log_val, _gas) = self.write_log().read(key); match log_val { Some(&write_log::StorageModification::Write { .. }) | Some(&write_log::StorageModification::InitAccount { .. }) @@ -276,7 +371,7 @@ where } None => { // when not found in write log, try to check the storage - self.storage.block.tree.has_key(key).into_storage_result() + self.storage().block.tree.has_key(key).into_storage_result() } } } @@ -286,7 +381,7 @@ where prefix: &storage::Key, ) -> storage_api::Result> { let (iter, _gas) = - iter_prefix_post(&self.write_log, &self.storage, prefix); + iter_prefix_post(self.write_log(), self.storage(), prefix); Ok(iter) } @@ -298,40 +393,41 @@ where } fn get_chain_id(&self) -> std::result::Result { - Ok(self.storage.chain_id.to_string()) + Ok(self.storage().chain_id.to_string()) } fn get_block_height( &self, ) -> std::result::Result { - Ok(self.storage.block.height) + Ok(self.storage().block.height) } fn get_block_hash( &self, ) -> std::result::Result { - Ok(self.storage.block.hash.clone()) + Ok(self.storage().block.hash.clone()) } fn get_block_epoch( &self, ) -> std::result::Result { - Ok(self.storage.block.epoch) + Ok(self.storage().block.epoch) } fn get_tx_index( &self, ) -> std::result::Result { - Ok(self.storage.tx_index) + Ok(self.storage().tx_index) } fn get_native_token(&self) -> storage_api::Result
{ - Ok(self.storage.native_token.clone()) + Ok(self.storage().native_token.clone()) } } -impl StorageWrite for WlStorage +impl StorageWrite for T where + T: WriteLogAndStorage, D: DB + for<'iter> DBIter<'iter>, H: StorageHasher, { @@ -340,13 +436,19 @@ where key: &storage::Key, val: impl AsRef<[u8]>, ) -> storage_api::Result<()> { - self.write_log + let _ = self + .write_log_mut() .protocol_write(key, val.as_ref().to_vec()) - .into_storage_result() + .into_storage_result(); + Ok(()) } fn delete(&mut self, key: &storage::Key) -> storage_api::Result<()> { - self.write_log.protocol_delete(key).into_storage_result() + let _ = self + .write_log_mut() + .protocol_delete(key) + .into_storage_result(); + Ok(()) } } diff --git a/core/src/proto/types.rs b/core/src/proto/types.rs index 40e343d1bf..4b73644e46 100644 --- a/core/src/proto/types.rs +++ b/core/src/proto/types.rs @@ -362,6 +362,32 @@ impl Tx { SigningTx::from(self.clone()).hash() } + pub fn unsigned_hash(&self) -> [u8; 32] { + match self.data { + Some(ref data) => { + match SignedTxData::try_from_slice(data) { + Ok(signed_data) => { + // Reconstruct unsigned tx + let unsigned_tx = Tx { + code: self.code.clone(), + data: signed_data.data, + timestamp: self.timestamp, + }; + unsigned_tx.hash() + } + Err(_) => { + // Unsigned tx + self.hash() + } + } + } + None => { + // Unsigned tx + self.hash() + } + } + } + pub fn code_hash(&self) -> [u8; 32] { SigningTx::from(self.clone()).code_hash } diff --git a/core/src/types/address.rs b/core/src/types/address.rs index ae96842f8c..965dc1b57c 100644 --- a/core/src/types/address.rs +++ b/core/src/types/address.rs @@ -69,6 +69,8 @@ mod internal { "ibc::IBC Mint Address "; pub const ETH_BRIDGE: &str = "ano::ETH Bridge Address "; + pub const REPLAY_PROTECTION: &str = + "ano::Replay Protection "; } /// Fixed-length address strings prefix for established addresses. @@ -198,6 +200,9 @@ impl Address { InternalAddress::EthBridge => { internal::ETH_BRIDGE.to_string() } + InternalAddress::ReplayProtection => { + internal::REPLAY_PROTECTION.to_string() + } }; debug_assert_eq!(string.len(), FIXED_LEN_STRING_BYTES); string @@ -251,6 +256,9 @@ impl Address { internal::ETH_BRIDGE => { Ok(Address::Internal(InternalAddress::EthBridge)) } + internal::REPLAY_PROTECTION => { + Ok(Address::Internal(InternalAddress::ReplayProtection)) + } _ => Err(Error::new( ErrorKind::InvalidData, "Invalid internal address", @@ -466,6 +474,8 @@ pub enum InternalAddress { SlashFund, /// Bridge to Ethereum EthBridge, + /// Replay protection contains transactions' hash + ReplayProtection, } impl InternalAddress { @@ -500,6 +510,7 @@ impl Display for InternalAddress { Self::IbcBurn => "IbcBurn".to_string(), Self::IbcMint => "IbcMint".to_string(), Self::EthBridge => "EthBridge".to_string(), + Self::ReplayProtection => "ReplayProtection".to_string(), } ) } @@ -783,8 +794,10 @@ pub mod testing { InternalAddress::IbcEscrow => {} InternalAddress::IbcBurn => {} InternalAddress::IbcMint => {} - InternalAddress::EthBridge => {} /* Add new addresses in the - * `prop_oneof` below. */ + InternalAddress::EthBridge => {} + InternalAddress::ReplayProtection => {} /* Add new addresses in + * the + * `prop_oneof` below. */ }; prop_oneof![ Just(InternalAddress::PoS), @@ -799,6 +812,7 @@ pub mod testing { Just(InternalAddress::Governance), Just(InternalAddress::SlashFund), Just(InternalAddress::EthBridge), + Just(InternalAddress::ReplayProtection) ] } diff --git a/core/src/types/internal.rs b/core/src/types/internal.rs index 8c85a4236e..d13d392381 100644 --- a/core/src/types/internal.rs +++ b/core/src/types/internal.rs @@ -89,6 +89,12 @@ mod tx_queue { pub fn is_empty(&self) -> bool { self.0.is_empty() } + + /// Get reference to the element at the given index. + /// Returns [`None`] if index exceeds the queue lenght. + pub fn get(&self, index: usize) -> Option<&WrapperTxInQueue> { + self.0.get(index) + } } } diff --git a/core/src/types/transaction/decrypted.rs b/core/src/types/transaction/decrypted.rs index 3ac49efc77..7f76bf9e49 100644 --- a/core/src/types/transaction/decrypted.rs +++ b/core/src/types/transaction/decrypted.rs @@ -11,7 +11,7 @@ pub mod decrypted_tx { use super::EllipticCurve; use crate::proto::Tx; - use crate::types::transaction::{hash_tx, Hash, TxType, WrapperTx}; + use crate::types::transaction::{Hash, TxType, WrapperTx}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] #[allow(clippy::large_enum_variant)] @@ -56,14 +56,15 @@ pub mod decrypted_tx { } /// Return the hash used as a commitment to the tx's contents in the - /// wrapper tx that includes this tx as an encrypted payload. + /// wrapper tx that includes this tx as an encrypted payload. The + /// commitment is computed on the unsigned tx if tx is signed pub fn hash_commitment(&self) -> Hash { match self { DecryptedTx::Decrypted { tx, #[cfg(not(feature = "mainnet"))] has_valid_pow: _, - } => hash_tx(&tx.to_bytes()), + } => Hash(tx.unsigned_hash()), DecryptedTx::Undecryptable(wrapper) => wrapper.tx_hash.clone(), } } diff --git a/core/src/types/transaction/wrapper.rs b/core/src/types/transaction/wrapper.rs index 70ef2827bc..a87b3e1fff 100644 --- a/core/src/types/transaction/wrapper.rs +++ b/core/src/types/transaction/wrapper.rs @@ -17,9 +17,7 @@ pub mod wrapper_tx { use crate::types::storage::Epoch; use crate::types::token::Amount; use crate::types::transaction::encrypted::EncryptedTx; - use crate::types::transaction::{ - hash_tx, EncryptionKey, Hash, TxError, TxType, - }; + use crate::types::transaction::{EncryptionKey, Hash, TxError, TxType}; /// Minimum fee amount in micro NAMs pub const MIN_FEE: u64 = 100; @@ -206,7 +204,7 @@ pub mod wrapper_tx { epoch, gas_limit, inner_tx, - tx_hash: hash_tx(&tx.to_bytes()), + tx_hash: Hash(tx.unsigned_hash()), #[cfg(not(feature = "mainnet"))] pow_solution, } @@ -227,7 +225,7 @@ pub mod wrapper_tx { /// Decrypt the wrapped transaction. /// - /// Will fail if the inner transaction does match the + /// Will fail if the inner transaction doesn't match the /// hash commitment or we are unable to recover a /// valid Tx from the decoded byte stream. pub fn decrypt( @@ -236,14 +234,15 @@ pub mod wrapper_tx { ) -> Result { // decrypt the inner tx let decrypted = self.inner_tx.decrypt(privkey); + let decrypted_tx = Tx::try_from(decrypted.as_ref()) + .map_err(|_| WrapperTxErr::InvalidTx)?; + // check that the hash equals commitment - if hash_tx(&decrypted) != self.tx_hash { - Err(WrapperTxErr::DecryptedHash) - } else { - // convert back to Tx type - Tx::try_from(decrypted.as_ref()) - .map_err(|_| WrapperTxErr::InvalidTx) + if decrypted_tx.unsigned_hash() != self.tx_hash.0 { + return Err(WrapperTxErr::DecryptedHash); } + + Ok(decrypted_tx) } /// Sign the wrapper transaction and convert to a normal Tx type @@ -416,7 +415,7 @@ pub mod wrapper_tx { assert_matches!(err, WrapperTxErr::DecryptedHash); } - /// We check that even if the encrypted payload and has of its + /// We check that even if the encrypted payload and hash of its /// contents are correctly changed, we detect fraudulent activity /// via the signature. #[test] @@ -470,7 +469,7 @@ pub mod wrapper_tx { ); // We change the commitment appropriately - wrapper.tx_hash = hash_tx(&malicious.to_bytes()); + wrapper.tx_hash = Hash(malicious.unsigned_hash()); // we check ciphertext validity still passes assert!(wrapper.validate_ciphertext()); diff --git a/documentation/specs/src/base-ledger/replay-protection.md b/documentation/specs/src/base-ledger/replay-protection.md index 1094460cad..85001729a5 100644 --- a/documentation/specs/src/base-ledger/replay-protection.md +++ b/documentation/specs/src/base-ledger/replay-protection.md @@ -1,232 +1,539 @@ # Replay Protection -Replay protection is a mechanism to prevent _replay attacks_, which consist of a malicious user resubmitting an already executed transaction (also mentioned as tx in this document) to the ledger. +Replay protection is a mechanism to prevent _replay attacks_, which consist of a +malicious user resubmitting an already executed transaction (also mentioned as +tx in this document) to the ledger. -A replay attack causes the state of the machine to deviate from the intended one (from the perspective of the parties involved in the original transaction) and causes economic damage to the fee payer of the original transaction, who finds himself paying more than once. Further economic damage is caused if the transaction involved the moving of value in some form (e.g. a transfer of tokens) with the sender being deprived of more value than intended. +A replay attack causes the state of the machine to deviate from the intended one +(from the perspective of the parties involved in the original transaction) and +causes economic damage to the fee payer of the original transaction, who finds +himself paying more than once. Further economic damage is caused if the +transaction involved the moving of value in some form (e.g. a transfer of +tokens) with the sender being deprived of more value than intended. -Since the original transaction was already well formatted for the protocol's rules, the attacker doesn't need to rework it, making this attack relatively easy. +Since the original transaction was already well formatted for the protocol's +rules, the attacker doesn't need to rework it, making this attack relatively +easy. -Of course, a replay attack makes sense only if the attacker differs from the _source_ of the original transaction, as a user will always be able to generate another semantically identical transaction to submit without the need to replay the same one. +Of course, a replay attack makes sense only if the attacker differs from the +_source_ of the original transaction, as a user will always be able to generate +another semantically identical transaction to submit without the need to replay +the same one. + +To prevent this scenario, Namada supports a replay protection mechanism to +prevent the execution of already processed transactions. -To prevent this scenario, Namada supports a replay protection mechanism to prevent the execution of already processed transactions. - ## Context -This section will illustrate the pre-existing context in which we are going to implement the replay protection mechanism. +This section will illustrate the pre-existing context in which we are going to +implement the replay protection mechanism. ### Encryption-Authentication -The current implementation of Namada is built on top of Tendermint which provides an encrypted and authenticated communication channel between every two nodes to prevent a _man-in-the-middle_ attack (see the detailed [spec](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/p2p/peer.md)). +The current implementation of Namada is built on top of Tendermint which +provides an encrypted and authenticated communication channel between every two +nodes to prevent a _man-in-the-middle_ attack (see the detailed +[spec](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/p2p/peer.md)). -The Namada protocol relies on this substrate to exchange transactions (messages) that will define the state transition of the ledger. More specifically, a transaction is composed of two parts: a `WrapperTx` and an inner `Tx` +The Namada protocol relies on this substrate to exchange transactions (messages) +that will define the state transition of the ledger. More specifically, a +transaction is composed of two parts: a `WrapperTx` and an inner `Tx` ```rust pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// The epoch in which the tx is to be submitted. This determines - /// which decryption key will be used - pub epoch: Epoch, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// the encrypted payload - pub inner_tx: EncryptedTx, - /// sha-2 hash of the inner transaction acting as a commitment - /// the contents of the encrypted payload - pub tx_hash: Hash, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// The epoch in which the tx is to be submitted. This determines + /// which decryption key will be used + pub epoch: Epoch, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// The optional unshielding tx for fee payment + pub unshield: Option, + /// the encrypted payload + pub inner_tx: EncryptedTx, + /// sha-2 hash of the inner transaction acting as a commitment + /// the contents of the encrypted payload + pub tx_hash: Hash, } pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, } -``` +``` -The wrapper transaction is composed of some metadata, the encrypted inner transaction itself and the hash of this. The inner `Tx` transaction carries the Wasm code to be executed and the associated data. +The wrapper transaction is composed of some metadata, an optional unshielding tx +for fee payment (see [fee specs](../economics/fee-system.md)), the encrypted +inner transaction itself and the hash of this. The inner `Tx` transaction +carries the Wasm code to be executed and the associated data. A transaction is constructed as follows: 1. The struct `Tx` is produced -2. The hash of this transaction gets signed by the author, producing another `Tx` where the data field holds the concatenation of the original data and the signature (`SignedTxData`) -3. The produced transaction is encrypted and embedded in a `WrapperTx`. The encryption step is there for a future implementation of DKG (see [Ferveo](https://github.com/anoma/ferveo)) -4. Finally, the `WrapperTx` gets converted to a `Tx` struct, signed over its hash (same as step 2, relying on `SignedTxData`), and submitted to the network - -Note that the signer of the `WrapperTx` and that of the inner one don't need to coincide, but the signer of the wrapper will be charged with gas and fees. -In the execution steps: +2. The hash of this transaction gets signed by the author, producing another + `Tx` where the data field holds the concatenation of the original data and + the signature (`SignedTxData`) +3. The produced transaction is encrypted and embedded in a `WrapperTx`. The + encryption step is there for a future implementation of DKG (see + [Ferveo](https://github.com/anoma/ferveo)) +4. Finally, the `WrapperTx` gets converted to a `Tx` struct, signed over its + hash (same as step 2, relying on `SignedTxData`), and submitted to the + network + +Note that the signer of the `WrapperTx` and that of the inner one don't need to +coincide, but the signer of the wrapper will be charged with gas and fees. In +the execution steps: 1. The `WrapperTx` signature is verified and, only if valid, the tx is processed -2. In the following height the proposer decrypts the inner tx, checks that the hash matches that of the `tx_hash` field and, if everything went well, includes the decrypted tx in the proposed block +2. In the following height the proposer decrypts the inner tx, checks that the + hash matches that of the `tx_hash` field and, if everything went well, + includes the decrypted tx in the proposed block 3. The inner tx will then be executed by the Wasm runtime -4. After the execution, the affected validity predicates (also mentioned as VP in this document) will check the storage changes and (if relevant) the signature of the transaction: if the signature is not valid, the VP will deem the transaction invalid and the changes won't be applied to the storage +4. After the execution, the affected validity predicates (also mentioned as VP + in this document) will check the storage changes and (if relevant) the + signature of the transaction: if the signature is not valid, the VP will deem + the transaction invalid and the changes won't be applied to the storage -The signature checks effectively prevent any tampering with the transaction data because that would cause the checks to fail and the transaction to be rejected. -For a more in-depth view, please refer to the [Namada execution spec](./execution.md). +The signature checks effectively prevent any tampering with the transaction data +because that would cause the checks to fail and the transaction to be rejected. +For a more in-depth view, please refer to the +[Namada execution spec](./execution.md). ### Tendermint replay protection -The underlying consensus engine, [Tendermint](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/abci/apps.md), provides a first layer of protection in its mempool which is based on a cache of previously seen transactions. This mechanism is actually aimed at preventing a block proposer from including an already processed transaction in the next block, which can happen when the transaction has been received late. Of course, this also acts as a countermeasure against intentional replay attacks. This check though, like all the checks performed in `CheckTx`, is weak, since a malicious validator could always propose a block containing invalid transactions. There's therefore the need for a more robust replay protection mechanism implemented directly in the application. +The underlying consensus engine, +[Tendermint](https://github.com/tendermint/tendermint/blob/29e5fbcc648510e4763bd0af0b461aed92c21f30/spec/abci/apps.md), +provides a first layer of protection in its mempool which is based on a cache of +previously seen transactions. This mechanism is actually aimed at preventing a +block proposer from including an already processed transaction in the next +block, which can happen when the transaction has been received late. Of course, +this also acts as a countermeasure against intentional replay attacks. This +check though, like all the checks performed in `CheckTx`, is weak, since a +malicious validator could always propose a block containing invalid +transactions. There's therefore the need for a more robust replay protection +mechanism implemented directly in the application. ## Implementation -Namada replay protection consists of three parts: the hash-based solution for both `EncryptedTx` (also called the `InnerTx`) and `WrapperTx`, a way to mitigate replay attacks in case of a fork and a concept of a lifetime for the transactions. +Namada replay protection consists of three parts: the hash-based solution for +both `EncryptedTx` (also called the `InnerTx`) and `WrapperTx`, a way to +mitigate replay attacks in case of a fork and a concept of a lifetime for the +transactions. ### Hash register -The actual Wasm code and data for the transaction are encapsulated inside a struct `Tx`, which gets encrypted as an `EncryptedTx` and wrapped inside a `WrapperTx` (see the [relative](#encryption-authentication) section). This inner transaction must be protected from replay attacks because it carries the actual semantics of the state transition. Moreover, even if the wrapper transaction was protected from replay attacks, an attacker could extract the inner transaction, rewrap it, and replay it. Note that for this attack to work, the attacker will need to sign the outer transaction himself and pay gas and fees for that, but this could still cause much greater damage to the parties involved in the inner transaction. - -`WrapperTx` is the only type of transaction currently accepted by the ledger. It must be protected from replay attacks because, if it wasn't, a malicious user could replay the transaction as is. Even if the inner transaction implemented replay protection or, for any reason, wasn't accepted, the signer of the wrapper would still pay for gas and fees, effectively suffering economic damage. - -To prevent the replay of both these transactions we will rely on a set of already processed transactions' digests that will be kept in storage. These digests will be computed on the **unsigned** transactions, to support replay protection even for [multisigned](multisignature.md) transactions: in this case, if hashes were taken from the signed transactions, a different set of signatures on the same tx would produce a different hash, effectively allowing for a replay. To support this, we'll need a subspace in storage headed by a `ReplayProtection` internal address: +The actual Wasm code and data for the transaction are encapsulated inside a +struct `Tx`, which gets encrypted as an `EncryptedTx` and wrapped inside a +`WrapperTx` (see the [relative](#encryption-authentication) section). This inner +transaction must be protected from replay attacks because it carries the actual +semantics of the state transition. Moreover, even if the wrapper transaction was +protected from replay attacks, an attacker could extract the inner transaction, +rewrap it, and replay it. Note that for this attack to work, the attacker will +need to sign the outer transaction himself and pay gas and fees for that, but +this could still cause much greater damage to the parties involved in the inner +transaction. + +`WrapperTx` is the only type of transaction currently accepted by the ledger. It +must be protected from replay attacks because, if it wasn't, a malicious user +could replay the transaction as is. Even if the inner transaction implemented +replay protection or, for any reason, wasn't accepted, the signer of the wrapper +would still pay for gas and fees, effectively suffering economic damage. + +To prevent the replay of both these transactions we will rely on a set of +already processed transactions' digests that will be kept in storage. These +digests will be computed on the **unsigned** transactions, to support replay +protection even for [multisigned](multisignature.md) transactions: in this case, +if hashes were taken from the signed transactions, a different set of signatures +on the same tx would produce a different hash, effectively allowing for a +replay. To support this, we'll first need to update the `WrapperTx` hash field +to contain the hash of the unsigned inner tx, instead of the signed one: this +doesn't affect the overall safety of Namada (since the wrapper is still signed +over all of its bytes, including the inner signature) and allows for early +replay attack checks in mempool and at wrapper block-inclusion time. +Additionally, we need a subspace in storage headed by a `ReplayProtection` +internal address: ``` -/$ReplayProtectionAddress/$tx0_hash: None -/$ReplayProtectionAddress/$tx1_hash: None -/$ReplayProtectionAddress/$tx2_hash: None +/\$ReplayProtectionAddress/\$tx0_hash: None +/\$ReplayProtectionAddress/\$tx1_hash: None +/\$ReplayProtectionAddress/\$tx2_hash: None ... ``` -The hashes will form the last part of the path to allow for a fast storage lookup. - -The consistency of the storage subspace is of critical importance for the correct working of the replay protection mechanism. To protect it, a validity predicate will check that no changes to this subspace are applied by any wasm transaction, as those should only be available from protocol. - -Both in `mempool_validation` and `process_proposal` we will perform a check (together with others, see the [relative](#wrapper-checks) section) on both the digests against the storage to check that neither of the transactions has already been executed: if this doesn't hold, the `WrapperTx` will not be included into the mempool/block respectively. If both checks pass then the transaction is included in the block and executed. In the `finalize_block` function we will add the transaction's hash to storage to prevent re-executions. We will first add the hash of the wrapper transaction. After that, in the following block, we deserialize the inner transaction, check the correct order of the transactions in the block and execute the tx: if it runs out of gas then we'll avoid storing its hash to allow rewrapping and executing the transaction, otherwise we'll add the hash in storage (both in case of success or failure of the tx). +The hashes will form the last part of the path to allow for a fast storage +lookup. + +The consistency of the storage subspace is of critical importance for the +correct working of the replay protection mechanism. To protect it, a validity +predicate will check that no changes to this subspace are applied by any wasm +transaction, as those should only be available from protocol. + +Both in `mempool_validation` and `process_proposal` we will perform a check +(together with others, see the [relative](#wrapper-checks) section) on both the +digests against the storage to check that neither of the transactions has +already been executed: if this doesn't hold, the `WrapperTx` will not be +included into the mempool/block respectively. In `process_proposal` we'll use a +temporary cache to prevent a replay of a transaction in the same block. If both +checks pass then the transaction is included in the block. The hashes are +committed to storage in `finalize_block` and the transaction is executed. + +In the next block we deserialize the inner transaction, check the validity of +the decrypted txs and their correct order: if the order is off a new round of +tendermint will start. If instead an error is found in any single decrypted tx, +we remove from storage the previously inserted hash of the inner tx to allow it +to be rewrapped, and discard the tx itself. Finally, in `finalize_block` we +execute the tx: if it runs out of gas then we'll remove its hash from storage, +again to allow rewrapping and executing the transaction, otherwise we'll keep +the hash in storage (both in case of success or failure of the tx). + +#### Optional unshielding + +The optional `unshield` field is supposed to carry an unshielding masp +`Transfer`. Given this assumption, there's no need to manage it since masp has +an internal replay protection mechanism. + +Still, since this field represents a valid, signed `Tx`, there are three +possible attacks that can be run by leveraging this field: + +1. If the wrapper signer constructs an `unshield` tx that actually encodes + another type of transaction, then this one can be extracted and executed + separately +2. A malicious user could extract this tx before it makes it to a block and play + it in advance +3. A combination of the previous two + +In the first case, the unshielding operation would fail because of the checks +run in protocol, but the tx itself could be extracted, wrapped and submitted to +the network. This issue could be solved with the mechanism explained in the +previous section. + +The second attack, instead, is performed before the original tx is placed in a +block and, therefore, cannot be prevented with a replay protection mechanism. +The only result of this attack would be that the original wrapper transaction +would fail since it would attempt to replay a masp transfer: in this case, the +submitter of the original tx can recreate it without the need for the +unshielding operation since the attacker has already performed it. + +In the last case the unshielding transaction (which is not a masp transfer) +could be encrypted, wrapped and executed before the original transaction is +inserted in a block. When the latter gets executed the protocol checks detect +that this is not a masp unshielding transfer and reject it. + +Given that saving the hash of the unshielding transaction is redundant in case +of a proper masp transfer and it doesn't prevent the second scenario in case of +non-masp transaction, Namada does not implement the replay protection mechanism +on the unshielding transaction, whose correctness is left to the wrapper signer +and the masp validity predicate (in case the unshielding tx was indeed a correct +masp unshield transfer). The combination of the fee system, the validity +predicates set and the protocol checks on the unshielding operation guarantees +that even if one of the attacks explained in this section is performed: + +- The original wrapper signer doesn't suffer economic damage (the wrapper + containing the invalid unshielding forces the block rejection without fee + collection) +- The attacker has to pay fees on the rewrapped tx preventing him to submit + these transactions for free +- The invalid unshielding transaction must still be a valid transaction per the + VPs triggered + +#### Governance proposals + +Governance [proposals](../base-ledger/governance.md) may carry some wasm code to +be executed in case the proposal passed. This code is embedded into a +`DecryptedTx` directly by the validators at block processing time and is not +inserted into the block itself. + +Given that the wasm code is attached to the transaction initiating the proposal, +it could be extracted from here and inserted in a transaction before the +proposal is executed. Therefore, replay protection is not a solution to prevent +attacks on governance proposals' code. Instead, to protect these transactions, +Namada relies on its proposal id mechanism in conjunction with the VP set. + +#### Protocol transactions + +At the moment, protocol transactions are only used for ETH bridge related +operations. The current implementation already takes care of replay attempts by +keeping track of the validators' signature on the events: this also includes +replay attacks in the same block. + +In the future, new types of protocol transactions may be supported: in this +case, a review of the replay protection mechanism might be required. ### Forks -In the case of a fork, the transaction hash is not enough to prevent replay attacks. Transactions, in fact, could still be replayed on the other branch as long as their format is kept unchanged and the counters in storage match. +In the case of a fork, the transaction hash is not enough to prevent replay +attacks. Transactions, in fact, could still be replayed on the other branch as +long as their format is kept unchanged and the counters in storage match. -To mitigate this problem, transactions will need to carry a `ChainId` identifier to tie them to a specific fork. This field needs to be added to the `Tx` struct so that it applies to both `WrapperTx` and `EncryptedTx`: +To mitigate this problem, transactions will need to carry a `ChainId` identifier +to tie them to a specific fork. This field needs to be added to the `Tx` struct +so that it applies to both `WrapperTx` and `EncryptedTx`: ```rust pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub chain_id: ChainId + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub chain_id: ChainId } ``` -This new field will be signed just like the other ones and is therefore subject to the same guarantees explained in the [initial](#encryption-authentication) section. The validity of this identifier will be checked in `process_proposal` for both the outer and inner tx: if a transaction carries an unexpected chain id, it won't be applied, meaning that no modifications will be applied to storage. +This new field will be signed just like the other ones and is therefore subject +to the same guarantees explained in the [initial](#encryption-authentication) +section. The validity of this identifier will be checked in `process_proposal` +for both the outer and inner tx: if a transaction carries an unexpected chain +id, it won't be applied, meaning that no modifications will be applied to +storage. ### Transaction lifetime -In general, a transaction is valid at the moment of submission, but after that, a series of external factors (ledger state, etc.) might change the mind of the submitter who's now not interested in the execution of the transaction anymore. - -We have to introduce the concept of a lifetime (or timeout) for the transactions: basically, the `Tx` struct will hold an extra field called `expiration` stating the maximum `DateTimeUtc` up until which the submitter is willing to see the transaction executed. After the specified time, the transaction will be considered invalid and discarded regardless of all the other checks. - -By introducing this new field we are setting a new constraint in the transaction's contract, where the ledger will make sure to prevent the execution of the transaction after the deadline and, on the other side, the submitter commits himself to the result of the execution at least until its expiration. If the expiration is reached and the transaction has not been executed the submitter can decide to submit a new transaction if he's still interested in the changes carried by it. - -In our design, the `expiration` will hold until the transaction is executed: once it's executed, either in case of success or failure, the tx hash will be written to storage and the transaction will not be replayable. In essence, the transaction submitter commits himself to one of these three conditions: +In general, a transaction is valid at the moment of submission, but after that, +a series of external factors (ledger state, etc.) might change the mind of the +submitter who's now not interested in the execution of the transaction anymore. + +We have to introduce the concept of a lifetime (or timeout) for the +transactions: basically, the `Tx` struct will hold an extra field called +`expiration` stating the maximum `DateTimeUtc` up until which the submitter is +willing to see the transaction executed. After the specified time, the +transaction will be considered invalid and discarded regardless of all the other +checks. + +By introducing this new field we are setting a new constraint in the +transaction's contract, where the ledger will make sure to prevent the execution +of the transaction after the deadline and, on the other side, the submitter +commits himself to the result of the execution at least until its expiration. If +the expiration is reached and the transaction has not been executed the +submitter can decide to submit a new transaction if he's still interested in the +changes carried by it. + +In our design, the `expiration` will hold until the transaction is executed: +once it's executed, either in case of success or failure, the tx hash will be +written to storage and the transaction will not be replayable. In essence, the +transaction submitter commits himself to one of these three conditions: - Transaction is invalid regardless of the specific state -- Transaction is executed (either with success or not) and the transaction hash is saved in the storage +- Transaction is executed (either with success or not) and the transaction hash + is saved in the storage - Expiration time has passed The first condition satisfied will invalidate further executions of the same tx. -In anticipation of DKG implementation, the current struct `WrapperTx` holds a field `epoch` stating the epoch in which the tx should be executed. This is because Ferveo will produce a new public key each epoch, effectively limiting the lifetime of the transaction (see section 2.2.2 of the [documentation](https://eprint.iacr.org/2022/898.pdf)). Unfortunately, for replay protection, a resolution of 1 epoch (~ 1 day) is too low for the possible needs of the submitters, therefore we need the `expiration` field to hold a maximum `DateTimeUtc` to increase resolution down to a single block (~ 10 seconds). +In anticipation of DKG implementation, the current struct `WrapperTx` holds a +field `epoch` stating the epoch in which the tx should be executed. This is +because Ferveo will produce a new public key each epoch, effectively limiting +the lifetime of the transaction (see section 2.2.2 of the +[documentation](https://eprint.iacr.org/2022/898.pdf)). Unfortunately, for +replay protection, a resolution of 1 epoch (~ 1 day) is too low for the possible +needs of the submitters, therefore we need the `expiration` field to hold a +maximum `DateTimeUtc` to increase resolution down to a single block (~ 10 +seconds). ```rust pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub chain_id: ChainId, - /// Lifetime of the transaction, also determines which decryption key will be used - pub expiration: DateTimeUtc, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub chain_id: ChainId, + /// Lifetime of the transaction, also determines which decryption key will be used + pub expiration: DateTimeUtc, } pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// the encrypted payload - pub inner_tx: EncryptedTx, - /// sha-2 hash of the inner transaction acting as a commitment - /// the contents of the encrypted payload - pub tx_hash: Hash, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// the encrypted payload + pub inner_tx: EncryptedTx, + /// sha-2 hash of the inner transaction acting as a commitment + /// the contents of the encrypted payload + pub tx_hash: Hash, } ``` -Since we now have more detailed information about the desired lifetime of the transaction, we can remove the `epoch` field and rely solely on `expiration`. Now, the producer of the inner transaction should make sure to set a sensible value for this field, in the sense that it should not span more than one epoch. If this happens, then the transaction will be correctly decrypted only in a subset of the desired lifetime (the one expecting the actual key used for the encryption), while, in the following epochs, the transaction will fail decryption and won't be executed. In essence, the `expiration` parameter can only restrict the implicit lifetime within the current epoch, it can not surpass it as that would make the transaction fail in the decryption phase. - -The subject encrypting the inner transaction will also be responsible for using the appropriate public key for encryption relative to the targeted time. - -The wrapper transaction will match the `expiration` of the inner for correct execution. Note that we need this field also for the wrapper to anticipate the check at mempool/proposal evaluation time, but also to prevent someone from inserting a wrapper transaction after the corresponding inner has expired forcing the wrapper signer to pay for the fees. +Since we now have more detailed information about the desired lifetime of the +transaction, we can remove the `epoch` field and rely solely on `expiration`. +Now, the producer of the inner transaction should make sure to set a sensible +value for this field, in the sense that it should not span more than one epoch. +If this happens, then the transaction will be correctly decrypted only in a +subset of the desired lifetime (the one expecting the actual key used for the +encryption), while, in the following epochs, the transaction will fail +decryption and won't be executed. In essence, the `expiration` parameter can +only restrict the implicit lifetime within the current epoch, it can not surpass +it as that would make the transaction fail in the decryption phase. + +The subject encrypting the inner transaction will also be responsible for using +the appropriate public key for encryption relative to the targeted time. + +The wrapper transaction will match the `expiration` of the inner for correct +execution. Note that we need this field also for the wrapper to anticipate the +check at mempool/proposal evaluation time, but also to prevent someone from +inserting a wrapper transaction after the corresponding inner has expired +forcing the wrapper signer to pay for the fees. ### Wrapper checks -In `mempool_validation` and `process_proposal` we will perform some checks on the wrapper tx to validate it. These will involve: - -- Valid signature -- Enough funds to pay the fee -- Valid chainId -- Valid transaction hash -- Valid expiration - -These checks can all be done before executing the transactions themselves (the check on the gas cannot be done ahead of time). If any of these fails, the transaction should be considered invalid and the action to take will be one of the followings: - -1. If the checks fail on the signature, chainId, expiration or transaction hash, then this transaction will be forever invalid, regardless of the possible evolution of the ledger's state. There's no need to include the transaction in the block. Moreover, we **cannot** include this transaction in the block to charge a fee (as a sort of punishment) because these errors may not depend on the signer of the tx (could be due to malicious users or simply a delay in the tx inclusion in the block) -2. If the checks fail _only_ because of an insufficient balance, the wrapper should be kept in mempool for a future play in case the funds should become available -3. If all the checks pass validation we will include the transaction in the block to store the hash and charge the fee - -The `expiration` parameter also justifies step 2 of the previous bullet points which states that if the validity checks fail only because of an insufficient balance to pay for fees then the transaction should be kept in mempool for future execution. Without it, the transaction could be potentially executed at any future moment, possibly going against the mutated interests of the submitter. With the expiration parameter, now, the submitter commits himself to accept the execution of the transaction up to the specified time: it's going to be his responsibility to provide a sensible value for this parameter. Given this constraint the transaction will be kept in memepool up until the expiration (since it would become invalid after that in any case), to prevent the mempool from increasing too much in size. - -This mechanism can also be applied to another scenario. Suppose a transaction was not propagated to the network by a node (or a group of colluding nodes). Now, this tx might be valid, but it doesn't get inserted into a block. Without an expiration, this tx can be replayed (better, applied, since it was never executed in the first place) at a future moment in time when the submitter might not be willing to execute it anymore. +In `mempool_validation` we will perform some checks on the wrapper tx to +validate it. These will involve: + +- Signature +- `GasLimit` is below the block gas limit +- `Fees` are paid with an accepted token and match the minimum amount required +- `ChainId` +- Transaction hash +- Expiration +- Wrapper signer has enough funds to pay the fee +- Unshielding tx (if present), is indeed a masp unshielding transfer +- The unshielding tx (if present) releases the minimum amount of tokens required + to pay fees +- The unshielding tx (if present) runs succesfully + +For gas, fee and the unshielding tx more details can be found in the +[fee specs](../economics/fee-system.md). + +These checks can all be done before executing the transactions themselves. If +any of these fails, the transaction should be considered invalid and the action +to take will be one of the followings: + +1. If the checks fail on the signature, chainId, expiration, transaction hash, + balance or the unshielding tx, then this transaction will be forever invalid, + regardless of the possible evolution of the ledger's state. There's no need + to include the transaction in the block. Moreover, we **cannot** include this + transaction in the block to charge a fee (as a sort of punishment) because + these errors may not depend on the signer of the tx (could be due to + malicious users or simply a delay in the tx inclusion in the block) +2. If the checks fail on `Fee` or `GasLimit` the transaction should be + discarded. In theory the gas limit of a block is a Namada parameter + controlled by governance, so there's a chance that the transaction could + become valid in the future should this limit be raised. The same applies to + the token whitelist and the minimum fee required. However we can expect a + slow rate of change of these parameters so we can reject the tx (the + submitter can always resubmit it at a future time) + +If instead all the checks pass validation we will include the transaction in the +block to store the hash and charge the fee. + +All these checks are also run in `process_proposal`. + +This mechanism can also be applied to another scenario. Suppose a transaction +was not propagated to the network by a node (or a group of colluding nodes). +Now, this tx might be valid, but it doesn't get inserted into a block. Without +an expiration, this tx can be replayed (better, applied, since it was never +executed in the first place) at a future moment in time when the submitter might +not be willing to execute it any more. + +### Block rejection + +To prevent a block proposer from including invalid transactions in a block, the +validators will reject the entire block in case they find a single invalid +wrapper transaction. + +Rejecting the single invalid transaction while still accepting the block is not +a valid solution. In this case, in fact, the block proposer has no incentive to +include invalid transactions in the block because these would gain him no fees +but, at the same time, he doesn't really have a disincentive to not include +them, since in this case the validators will simply discard the invalid tx but +accept the rest of the block granting the proposer his fees on all the other +transactions. This, of course, applies in case the proposer has no other valid +tx to include. A malicious proposer could act like this to spam the block +without suffering any penalty. + +To recap, a block is rejected when at least one of the following conditions is +met: + +- At least one `WrapperTx` is invalid with respect to the checks listed in the + [relative section](#wrapper-checks) +- The order/number of decrypted txs differs from the order/number committed in + the previous block ## Possible optimizations -In this section we describe two alternative solutions that come with some optimizations. +In this section we describe two alternative solutions that come with some +optimizations. ### Transaction counter -Instead of relying on a hash (32 bytes) we could use a 64 bits (8 bytes) transaction counter as nonce for the wrapper and inner transactions. The advantage is that the space required would be much less since we only need two 8 bytes values in storage for every address which is signing transactions. On the other hand, the handling of the counter for the inner transaction will be performed entirely in wasm (transactions and VPs) making it a bit less efficient. This solution also imposes a strict ordering on the transactions issued by a same address. +Instead of relying on a hash (32 bytes) we could use a 64 bits (8 bytes) +transaction counter as nonce for the wrapper and inner transactions. The +advantage is that the space required would be much less since we only need two 8 +bytes values in storage for every address which is signing transactions. On the +other hand, the handling of the counter for the inner transaction will be +performed entirely in wasm (transactions and VPs) making it a bit less +efficient. This solution also imposes a strict ordering on the transactions +issued by a same address. -**NOTE**: this solution requires the ability to [yield](https://github.com/wasmerio/wasmer/issues/1127) execution from Wasmer which is not implemented yet. +**NOTE**: this solution requires the ability to +[yield](https://github.com/wasmerio/wasmer/issues/1127) execution from Wasmer +which is not implemented yet. #### InnerTx -We will implement the protection entirely in Wasm: the check of the counter will be carried out by the validity predicates while the actual writing of the counter in storage will be done by the transactions themselves. +We will implement the protection entirely in Wasm: the check of the counter will +be carried out by the validity predicates while the actual writing of the +counter in storage will be done by the transactions themselves. -To do so, the `SignedTxData` attached to the transaction will hold the current value of the counter in storage: +To do so, the `SignedTxData` attached to the transaction will hold the current +value of the counter in storage: ```rust pub struct SignedTxData { - /// The original tx data bytes, if any - pub data: Option>, - /// The optional transaction counter for replay protection - pub tx_counter: Option, - /// The signature is produced on the tx data concatenated with the tx code - /// and the timestamp. - pub sig: common::Signature, + /// The original tx data bytes, if any + pub data: Option>, + /// The optional transaction counter for replay protection + pub tx_counter: Option, + /// The signature is produced on the tx data concatenated with the tx code + /// and the timestamp. + pub sig: common::Signature, } ``` -The counter must reside in `SignedTxData` and not in the data itself because this must be checked by the validity predicate which is not aware of the specific transaction that took place but only of the changes in the storage; therefore, the VP is not able to correctly deserialize the data of the transactions since it doesn't know what type of data the bytes represent. +The counter must reside in `SignedTxData` and not in the data itself because +this must be checked by the validity predicate which is not aware of the +specific transaction that took place but only of the changes in the storage; +therefore, the VP is not able to correctly deserialize the data of the +transactions since it doesn't know what type of data the bytes represent. -The counter will be signed as well to protect it from tampering and grant it the same guarantees explained at the [beginning](#encryption-authentication) of this document. +The counter will be signed as well to protect it from tampering and grant it the +same guarantees explained at the [beginning](#encryption-authentication) of this +document. -The wasm transaction will simply read the value from storage and increase its value by one. The target key in storage will be the following: +The wasm transaction will simply read the value from storage and increase its +value by one. The target key in storage will be the following: ``` /$Address/inner_tx_counter: u64 ``` -The VP of the _source_ address will then check the validity of the signature and, if it's deemed valid, will proceed to check if the pre-value of the counter in storage was equal to the one contained in the `SignedTxData` struct and if the post-value of the key in storage has been incremented by one: if any of these conditions doesn't hold the VP will discard the transactions and prevent the changes from being applied to the storage. +The VP of the _source_ address will then check the validity of the signature +and, if it's deemed valid, will proceed to check if the pre-value of the counter +in storage was equal to the one contained in the `SignedTxData` struct and if +the post-value of the key in storage has been incremented by one: if any of +these conditions doesn't hold the VP will discard the transactions and prevent +the changes from being applied to the storage. -In the specific case of a shielded transfer, since MASP already comes with replay protection as part of the Zcash design (see the [MASP specs](../masp.md) and [Zcash protocol specs](https://zips.z.cash/protocol/protocol.pdf)), the counter in `SignedTxData` is not required and therefore should be optional. +In the specific case of a shielded transfer, since MASP already comes with +replay protection as part of the Zcash design (see the [MASP specs](../masp.md) +and [Zcash protocol specs](https://zips.z.cash/protocol/protocol.pdf)), the +counter in `SignedTxData` is not required and therefore should be optional. -To implement replay protection for the inner transaction we will need to update all the VPs checking the transaction's signature to include the check on the transaction counter: at the moment the `vp_user` validity predicate is the only one to update. In addition, all the transactions involving `SignedTxData` should increment the counter. +To implement replay protection for the inner transaction we will need to update +all the VPs checking the transaction's signature to include the check on the +transaction counter: at the moment the `vp_user` validity predicate is the only +one to update. In addition, all the transactions involving `SignedTxData` should +increment the counter. #### WrapperTx -To protect this transaction we can implement an in-protocol mechanism. Since the wrapper transaction gets signed before being submitted to the network, we can leverage the `tx_counter` field of the `SignedTxData` already introduced for the inner tx. +To protect this transaction we can implement an in-protocol mechanism. Since the +wrapper transaction gets signed before being submitted to the network, we can +leverage the `tx_counter` field of the `SignedTxData` already introduced for the +inner tx. In addition, we need another counter in the storage subspace of every address: @@ -234,109 +541,229 @@ In addition, we need another counter in the storage subspace of every address: /$Address/wrapper_tx_counter: u64 ``` -where `$Address` is the one signing the transaction (the same implied by the `pk` field of the `WrapperTx` struct). +where `$Address` is the one signing the transaction (the same implied by the +`pk` field of the `WrapperTx` struct). -The check will consist of a signature check first followed by a check on the counter that will make sure that the counter attached to the transaction matches the one in storage for the signing address. This will be done in the `process_proposal` function so that validators can decide whether the transaction is valid or not; if it's not, then they will discard the transaction and skip to the following one. +The check will consist of a signature check first followed by a check on the +counter that will make sure that the counter attached to the transaction matches +the one in storage for the signing address. This will be done in the +`process_proposal` function so that validators can decide whether the +transaction is valid or not; if it's not, then they will discard the transaction +and skip to the following one. -At last, in `finalize_block`, the ledger will update the counter key in storage, increasing its value by one. This will happen when the following conditions are met: +At last, in `finalize_block`, the ledger will update the counter key in storage, +increasing its value by one. This will happen when the following conditions are +met: -- `process_proposal` has accepted the tx by validating its signature and transaction counter -- The tx was correctly applied in `finalize_block` (for `WrapperTx` this simply means inclusion in the block and gas accounting) +- `process_proposal` has accepted the tx by validating its signature and + transaction counter +- The tx was correctly applied in `finalize_block` (for `WrapperTx` this simply + means inclusion in the block and gas accounting) -Now, if a malicious user tried to replay this transaction, the `tx_counter` in the struct would no longer be equal to the one in storage and the transaction would be deemed invalid. +Now, if a malicious user tried to replay this transaction, the `tx_counter` in +the struct would no longer be equal to the one in storage and the transaction +would be deemed invalid. #### Implementation details -In this section we'll talk about some details of the replay protection mechanism that derive from the solution proposed in this section. +In this section we'll talk about some details of the replay protection mechanism +that derive from the solution proposed in this section. ##### Storage counters -Replay protection will require interaction with the storage from both the protocol and Wasm. To do so we can take advantage of the `StorageRead` and `StorageWrite` traits to work with a single interface. +Replay protection will require interaction with the storage from both the +protocol and Wasm. To do so we can take advantage of the `StorageRead` and +`StorageWrite` traits to work with a single interface. -This implementation requires two transaction counters in storage for every address, so that the storage subspace of a given address looks like the following: +This implementation requires two transaction counters in storage for every +address, so that the storage subspace of a given address looks like the +following: ``` /$Address/wrapper_tx_counter: u64 /$Address/inner_tx_counter: u64 ``` -An implementation requiring a single counter in storage has been taken into consideration and discarded because that would not support batching; see the [relative section](#single-counter-in-storage) for a more in-depth explanation. +An implementation requiring a single counter in storage has been taken into +consideration and discarded because that would not support batching; see the +[relative section](#single-counter-in-storage) for a more in-depth explanation. -For both the wrapper and inner transaction, the increase of the counter in storage is an important step that must be correctly executed. First, the implementation will return an error in case of a counter overflow to prevent wrapping, since this would allow for the replay of previous transactions. Also, we want to increase the counter as soon as we verify that the signature, the chain id and the passed-in transaction counter are valid. The increase should happen immediately after the checks because of two reasons: +For both the wrapper and inner transaction, the increase of the counter in +storage is an important step that must be correctly executed. First, the +implementation will return an error in case of a counter overflow to prevent +wrapping, since this would allow for the replay of previous transactions. Also, +we want to increase the counter as soon as we verify that the signature, the +chain id and the passed-in transaction counter are valid. The increase should +happen immediately after the checks because of two reasons: - Prevent replay attack of a transaction in the same block -- Update the transaction counter even in case the transaction fails, to prevent a possible replay attack in the future (since a transaction invalid at state Sx could become valid at state Sn where `n > x`) - -For `WrapperTx`, the counter increase and fee accounting will per performed in `finalize_block` (as stated in the [relative](#wrappertx) section). - -For `InnerTx`, instead, the logic is not straightforward. The transaction code will be executed in a Wasm environment ([Wasmer](https://wasmer.io)) till it eventually completes or raises an exception. In case of success, the counter in storage will be updated correctly but, in case of failure, the protocol will discard all of the changes brought by the transactions to the write-ahead-log, including the updated transaction counter. This is a problem because the transaction could be successfully replayed in the future if it will become valid. - -The ideal solution would be to interrupt the execution of the Wasm code after the transaction counter (if any) has been increased. This would allow performing a first run of the involved VPs and, if all of them accept the changes, let the protocol commit these changes before any possible failure. After that, the protocol would resume the execution of the transaction from the previous interrupt point until completion or failure, after which a second pass of the VPs is initiated to validate the remaining state modifications. In case of a VP rejection after the counter increase there would be no need to resume execution and the transaction could be immediately deemed invalid so that the protocol could skip to the next tx to be executed. With this solution, the counter update would be committed to storage regardless of a failure of the transaction itself. - -Unfortunately, at the moment, Wasmer doesn't allow [yielding](https://github.com/wasmerio/wasmer/issues/1127) from the execution. - -In case the transaction went out of gas (given the `gas_limit` field of the wrapper), all the changes applied will be discarded from the WAL and will not affect the state of the storage. The inner transaction could then be rewrapped with a correct gas limit and replayed until the `expiration` time has been reached. +- Update the transaction counter even in case the transaction fails, to prevent + a possible replay attack in the future (since a transaction invalid at state + Sx could become valid at state Sn where `n > x`) + +For `WrapperTx`, the counter increase and fee accounting will per performed in +`finalize_block` (as stated in the [relative](#wrappertx) section). + +For `InnerTx`, instead, the logic is not straightforward. The transaction code +will be executed in a Wasm environment ([Wasmer](https://wasmer.io)) till it +eventually completes or raises an exception. In case of success, the counter in +storage will be updated correctly but, in case of failure, the protocol will +discard all of the changes brought by the transactions to the write-ahead-log, +including the updated transaction counter. This is a problem because the +transaction could be successfully replayed in the future if it will become +valid. + +The ideal solution would be to interrupt the execution of the Wasm code after +the transaction counter (if any) has been increased. This would allow performing +a first run of the involved VPs and, if all of them accept the changes, let the +protocol commit these changes before any possible failure. After that, the +protocol would resume the execution of the transaction from the previous +interrupt point until completion or failure, after which a second pass of the +VPs is initiated to validate the remaining state modifications. In case of a VP +rejection after the counter increase there would be no need to resume execution +and the transaction could be immediately deemed invalid so that the protocol +could skip to the next tx to be executed. With this solution, the counter update +would be committed to storage regardless of a failure of the transaction itself. + +Unfortunately, at the moment, Wasmer doesn't allow +[yielding](https://github.com/wasmerio/wasmer/issues/1127) from the execution. + +In case the transaction went out of gas (given the `gas_limit` field of the +wrapper), all the changes applied will be discarded from the WAL and will not +affect the state of the storage. The inner transaction could then be rewrapped +with a correct gas limit and replayed until the `expiration` time has been +reached. ##### Batching and transaction ordering -This replay protection technique supports the execution of multiple transactions with the same address as _source_ in a single block. Actually, the presence of the transaction counters and the checks performed on them now impose a strict ordering on the execution sequence (which can be an added value for some use cases). The correct execution of more than one transaction per source address in the same block is preserved as long as: +This replay protection technique supports the execution of multiple transactions +with the same address as _source_ in a single block. Actually, the presence of +the transaction counters and the checks performed on them now impose a strict +ordering on the execution sequence (which can be an added value for some use +cases). The correct execution of more than one transaction per source address in +the same block is preserved as long as: -1. The wrapper transactions are inserted in the block with the correct ascending order +1. The wrapper transactions are inserted in the block with the correct ascending + order 2. No hole is present in the counters' sequence -3. The counter of the first transaction included in the block matches the expected one in storage - -The conditions are enforced by the block proposer who has an interest in maximizing the amount of fees extracted by the proposed block. To support this incentive, we will charge gas and fees at the same moment in which we perform the counter increase explained in the [storage counters](#storage-counters) section: this way we can avoid charging fees and gas if the transaction is invalid (invalid signature, wrong counter or wrong chain id), effectively incentivizing the block proposer to include only valid transactions and correctly reorder them to maximize the fees (see the [block rejection](#block-rejection) section for an alternative solution that was discarded in favor of this). - -In case of a missing transaction causes a hole in the sequence of transaction counters, the block proposer will include in the block all the transactions up to the missing one and discard all the ones following that one, effectively preserving the correct ordering. - -Correctly ordering the transactions is not enough to guarantee the correct execution. As already mentioned in the [WrapperTx](#wrappertx) section, the block proposer and the validators also need to access the storage to check that the first transaction counter of a sequence is actually the expected one. - -The entire counter ordering is only done on the `WrapperTx`: if the inner counter is wrong then the inner transaction will fail and the signer of the corresponding wrapper will be charged with fees. This incentivizes submitters to produce valid transactions and discourages malicious user from rewrapping and resubmitting old transactions. +3. The counter of the first transaction included in the block matches the + expected one in storage + +The conditions are enforced by the block proposer who has an interest in +maximizing the amount of fees extracted by the proposed block. To support this +incentive, validators will reject the block proposed if any of the included +wrapper transactions are invalid, effectively incentivizing the block proposer +to include only valid transactions and correctly reorder them to gain the fees. + +In case of a missing transaction causes a hole in the sequence of transaction +counters, the block proposer will include in the block all the transactions up +to the missing one and discard all the ones following that one, effectively +preserving the correct ordering. + +Correctly ordering the transactions is not enough to guarantee the correct +execution. As already mentioned in the [WrapperTx](#wrappertx) section, the +block proposer and the validators also need to access the storage to check that +the first transaction counter of a sequence is actually the expected one. + +The entire counter ordering is only done on the `WrapperTx`: if the inner +counter is wrong then the inner transaction will fail and the signer of the +corresponding wrapper will be charged with fees. This incentivizes submitters to +produce valid transactions and discourages malicious user from rewrapping and +resubmitting old transactions. ##### Mempool checks -As a form of optimization to prevent mempool spamming, some of the checks that have been introduced in this document will also be brought to the `mempool_validate` function. Of course, we always refer to checks on the `WrapperTx` only. More specifically: +As a form of optimization to prevent mempool spamming, some of the checks that +have been introduced in this document will also be brought to the +`mempool_validate` function. Of course, we always refer to checks on the +`WrapperTx` only. More specifically: - Check the `ChainId` field -- Check the signature of the transaction against the `pk` field of the `WrapperTx` +- Check the signature of the transaction against the `pk` field of the + `WrapperTx` - Perform a limited check on the transaction counter -Regarding the last point, `mempool_validate` will check if the counter in the transaction is `>=` than the one in storage for the address signing the `WrapperTx`. A complete check (checking for strict equality) is not feasible, as described in the [relative](#mempool-counter-validation) section. +Regarding the last point, `mempool_validate` will check if the counter in the +transaction is `>=` than the one in storage for the address signing the +`WrapperTx`. A complete check (checking for strict equality) is not feasible, as +described in the [relative](#mempool-counter-validation) section. #### Alternatives considered -In this section we list some possible solutions that were taken into consideration during the writing of this solution but were eventually discarded. +In this section we list some possible solutions that were taken into +consideration during the writing of this solution but were eventually discarded. ##### Mempool counter validation -The idea of performing a complete validation of the transaction counters in the `mempool_validate` function was discarded because of a possible flaw. - -Suppose a client sends five transactions (counters from 1 to 5). The mempool of the next block proposer is not guaranteed to receive them in order: something on the network could shuffle the transactions up so that they arrive in the following order: 2-3-4-5-1. Now, since we validate every single transaction to be included in the mempool in the exact order in which we receive them, we would discard the first four transactions and only accept the last one, that with counter 1. Now the next block proposer might have the four discarded transactions in its mempool (since those were not added to the previous block and therefore not evicted from the other mempools, at least they shouldn't, see [block rejection](#block-rejection)) and could therefore include them in the following block. But still, a process that could have ended in a single block actually took two blocks. Moreover, there are two more issues: - -- The next block proposer might have the remaining transactions out of order in his mempool as well, effectively propagating the same issue down to the next block proposer -- The next block proposer might not have these transactions in his mempool at all - -Finally, transactions that are not allowed into the mempool don't get propagated to the other peers, making their inclusion in a block even harder. -It is instead better to avoid a complete filter on the transactions based on their order in the mempool: instead we are going to perform a simpler check and then let the block proposer rearrange them correctly when proposing the block. +The idea of performing a complete validation of the transaction counters in the +`mempool_validate` function was discarded because of a possible flaw. + +Suppose a client sends five transactions (counters from 1 to 5). The mempool of +the next block proposer is not guaranteed to receive them in order: something on +the network could shuffle the transactions up so that they arrive in the +following order: 2-3-4-5-1. Now, since we validate every single transaction to +be included in the mempool in the exact order in which we receive them, we would +discard the first four transactions and only accept the last one, that with +counter 1. Now the next block proposer might have the four discarded +transactions in its mempool (since those were not added to the previous block +and therefore not evicted from the other mempools, at least they shouldn't, see +[block rejection](#block-rejection)) and could therefore include them in the +following block. But still, a process that could have ended in a single block +actually took two blocks. Moreover, there are two more issues: + +- The next block proposer might have the remaining transactions out of order in + his mempool as well, effectively propagating the same issue down to the next + block proposer +- The next block proposer might not have these transactions in his mempool at + all + +Finally, transactions that are not allowed into the mempool don't get propagated +to the other peers, making their inclusion in a block even harder. It is instead +better to avoid a complete filter on the transactions based on their order in +the mempool: instead we are going to perform a simpler check and then let the +block proposer rearrange them correctly when proposing the block. ##### In-protocol protection for InnerTx -An alternative implementation could place the protection for the inner tx in protocol, just like the wrapper one, based on the transaction counter inside `SignedTxData`. The check would run in `process_proposal` and the update in `finalize_block`, just like for the wrapper transaction. This implementation, though, shows two drawbacks: - -- it implies the need for an hard fork in case of a modification of the replay protection mechanism -- it's not clear who's the source of the inner transaction from the outside, as that depends on the specific code of the transaction itself. We could use specific whitelisted txs set to define when it requires a counter (would not work for future programmable transactions), but still, we have no way to define which address should be targeted for replay protection (**blocking issue**) +An alternative implementation could place the protection for the inner tx in +protocol, just like the wrapper one, based on the transaction counter inside +`SignedTxData`. The check would run in `process_proposal` and the update in +`finalize_block`, just like for the wrapper transaction. This implementation, +though, shows two drawbacks: + +- it implies the need for an hard fork in case of a modification of the replay + protection mechanism +- it's not clear who's the source of the inner transaction from the outside, as + that depends on the specific code of the transaction itself. We could use + specific whitelisted txs set to define when it requires a counter (would not + work for future programmable transactions), but still, we have no way to + define which address should be targeted for replay protection (**blocking + issue**) ##### In-protocol counter increase for InnerTx -In the [storage counter](#storage-counters) section we mentioned the issue of increasing the transaction counter for an inner tx even in case of failure. A possible solution that we took in consideration and discarded was to increase the counter from protocol in case of a failure. +In the [storage counter](#storage-counters) section we mentioned the issue of +increasing the transaction counter for an inner tx even in case of failure. A +possible solution that we took in consideration and discarded was to increase +the counter from protocol in case of a failure. -This is technically feasible since the protocol is aware of the keys modified by the transaction and also of the results of the validity predicates (useful in case the transaction updated more than one counter in storage). It is then possible to recover the value and reapply the change directly from protocol. This logic though, is quite dispersive, since it effectively splits the management of the counter for the `InnerTx` among Wasm and protocol, while our initial intent was to keep it completely in Wasm. +This is technically feasible since the protocol is aware of the keys modified by +the transaction and also of the results of the validity predicates (useful in +case the transaction updated more than one counter in storage). It is then +possible to recover the value and reapply the change directly from protocol. +This logic though, is quite dispersive, since it effectively splits the +management of the counter for the `InnerTx` among Wasm and protocol, while our +initial intent was to keep it completely in Wasm. ##### Single counter in storage -We can't use a single transaction counter in storage because this would prevent batching. +We can't use a single transaction counter in storage because this would prevent +batching. -As an example, if a client (with a current counter in storage holding value 5) generates two transactions to be included in the same block, signing both the outer and the inner (default behavior of the client), it would need to generate the following transaction counters: +As an example, if a client (with a current counter in storage holding value 5) +generates two transactions to be included in the same block, signing both the +outer and the inner (default behavior of the client), it would need to generate +the following transaction counters: ``` [ @@ -345,9 +772,15 @@ As an example, if a client (with a current counter in storage holding value 5) g ] ``` -Now, the current execution model of Namada includes the `WrapperTx` in a block first to then decrypt and execute the inner tx in the following block (respecting the committed order of the transactions). That would mean that the outer tx of `T1` would pass validation and immediately increase the counter to 6 to prevent a replay attack in the same block. Now, the outer tx of `T2` will be processed but it won't pass validation because it carries a counter with value 7 while the ledger expects 6. +Now, the current execution model of Namada includes the `WrapperTx` in a block +first to then decrypt and execute the inner tx in the following block +(respecting the committed order of the transactions). That would mean that the +outer tx of `T1` would pass validation and immediately increase the counter to 6 +to prevent a replay attack in the same block. Now, the outer tx of `T2` will be +processed but it won't pass validation because it carries a counter with value 7 +while the ledger expects 6. -To fix this, one could think to set the counters as follows: +To fix this, one could think to set the counters as follows: ``` [ @@ -356,11 +789,23 @@ To fix this, one could think to set the counters as follows: ] ``` -This way both the transactions will be considered valid and executed. The issue is that, if the second transaction is not included in the block (for any reason), than the first transaction (the only one remaining at this point) will fail. In fact, after the outer tx has correctly increased the counter in storage to value 6 the block will be accepted. In the next block the inner transaction will be decrypted and executed but this last step will fail since the counter in `SignedTxData` carries a value of 7 and the counter in storage has a value of 6. +This way both the transactions will be considered valid and executed. The issue +is that, if the second transaction is not included in the block (for any +reason), than the first transaction (the only one remaining at this point) will +fail. In fact, after the outer tx has correctly increased the counter in storage +to value 6 the block will be accepted. In the next block the inner transaction +will be decrypted and executed but this last step will fail since the counter in +`SignedTxData` carries a value of 7 and the counter in storage has a value of 6. -To cope with this there are two possible ways. The first one is that, instead of checking the exact value of the counter in storage and increasing its value by one, we could check that the transaction carries a counter `>=` than the one in storage and write this one (not increase) to storage. The problem with this is that it the lack of support for strict ordering of execution. +To cope with this there are two possible ways. The first one is that, instead of +checking the exact value of the counter in storage and increasing its value by +one, we could check that the transaction carries a counter `>=` than the one in +storage and write this one (not increase) to storage. The problem with this is +that it the lack of support for strict ordering of execution. -The second option is to keep the usual increase strategy of the counter (increase by one and check for strict equality) and simply use two different counters in storage for each address. The transaction will then look like this: +The second option is to keep the usual increase strategy of the counter +(increase by one and check for strict equality) and simply use two different +counters in storage for each address. The transaction will then look like this: ``` [ @@ -369,135 +814,282 @@ The second option is to keep the usual increase strategy of the counter (increas ] ``` -Since the order of inclusion of the `WrapperTxs` forces the same order of the execution for the inner ones, both transactions can be correctly executed and the correctness will be maintained even in case `T2` didn't make it to the block (note that the counter for an inner tx and the corresponding wrapper one don't need to coincide). - -##### Block rejection - -The implementation proposed in this document has one flaw when it comes to discontinuous transactions. If, for example, for a given address, the counter in storage for the `WrapperTx` is 5 and the block proposer receives, in order, transactions 6, 5 and 8, the proposer will have an incentive to correctly order transactions 5 and 6 to gain the fees that he would otherwise lose. Transaction 8 will never be accepted by the validators no matter the ordering (since they will expect tx 7 which got lost): this effectively means that the block proposer has no incentive to include this transaction in the block because it would gain him no fees but, at the same time, he doesn't really have a disincentive to not include it, since in this case the validators will simply discard the invalid tx but accept the rest of the block granting the proposer his fees on all the other transactions. - -A similar scenario happens in the case of a single transaction that is not the expected one (e.g. tx 5 when 4 is expected), or for a different type of inconsistencies, like a wrong `ChainId` or an invalid signature. - -It is up to the block proposer then, whether to include or not these kinds of transactions: a malicious proposer could do so to spam the block without suffering any penalty. The lack of fees could be a strong enough measure to prevent proposers from applying this behavior, together with the fact that the only damage caused to the chain would be spamming the blocks. - -If one wanted to completely prevent this scenario, the solution would be to reject the entire block: this way the proposer would have an incentive to behave correctly (by not including these transactions into the block) to gain the block fees. This would allow to shrink the size of the blocks in case of unfair block proposers but it would also cause the slow down of the block creation process, since after a block rejection a new Tendermint round has to be initiated. +Since the order of inclusion of the `WrapperTxs` forces the same order of the +execution for the inner ones, both transactions can be correctly executed and +the correctness will be maintained even in case `T2` didn't make it to the block +(note that the counter for an inner tx and the corresponding wrapper one don't +need to coincide). ### Wrapper-bound InnerTx -The solution is to tie an `InnerTx` to the corresponding `WrapperTx`. By doing so, it becomes impossible to rewrap an inner transaction and, therefore, all the attacks related to this practice would be unfeasible. This mechanism requires even less space in storage (only a 64 bit counter for every address signing wrapper transactions) and only one check on the wrapper counter in protocol. As a con, it requires communication between the signer of the inner transaction and that of the wrapper during the transaction construction. This solution also imposes a strict ordering on the wrapper transactions issued by a same address. +The solution is to tie an `InnerTx` to the corresponding `WrapperTx`. By doing +so, it becomes impossible to rewrap an inner transaction and, therefore, all the +attacks related to this practice would be unfeasible. This mechanism requires +even less space in storage (only a 64 bit counter for every address signing +wrapper transactions) and only one check on the wrapper counter in protocol. As +a con, it requires communication between the signer of the inner transaction and +that of the wrapper during the transaction construction. This solution also +imposes a strict ordering on the wrapper transactions issued by a same address. -To do so we will have to change the current definition of the two tx structs to the following: +To do so we will have to change the current definition of the two tx structs to +the following: ```rust pub struct WrapperTx { - /// The fee to be payed for including the tx - pub fee: Fee, - /// Used to determine an implicit account of the fee payer - pub pk: common::PublicKey, - /// Max amount of gas that can be used when executing the inner tx - pub gas_limit: GasLimit, - /// Lifetime of the transaction, also determines which decryption key will be used - pub expiration: DateTimeUtc, - /// Chain identifier for replay protection - pub chain_id: ChainId, - /// Transaction counter for replay protection - pub tx_counter: u64, - /// the encrypted payload - pub inner_tx: EncryptedTx, + /// The fee to be payed for including the tx + pub fee: Fee, + /// Used to determine an implicit account of the fee payer + pub pk: common::PublicKey, + /// Max amount of gas that can be used when executing the inner tx + pub gas_limit: GasLimit, + /// Lifetime of the transaction, also determines which decryption key will be used + pub expiration: DateTimeUtc, + /// Chain identifier for replay protection + pub chain_id: ChainId, + /// Transaction counter for replay protection + pub tx_counter: u64, + /// the encrypted payload + pub inner_tx: EncryptedTx, } pub struct Tx { - pub code: Vec, - pub data: Option>, - pub timestamp: DateTimeUtc, - pub wrapper_commit: Option, + pub code: Vec, + pub data: Option>, + pub timestamp: DateTimeUtc, + pub wrapper_commit: Option, } -``` +``` -The Wrapper transaction no longer holds the inner transaction hash while the inner one now holds a commit to the corresponding wrapper tx in the form of the hash of a `WrapperCommit` struct, defined as: +The Wrapper transaction no longer holds the inner transaction hash while the +inner one now holds a commit to the corresponding wrapper tx in the form of the +hash of a `WrapperCommit` struct, defined as: ```rust pub struct WrapperCommit { - pub pk: common::PublicKey, - pub tx_counter: u64, - pub expiration: DateTimeUtc, - pub chain_id: ChainId, + pub pk: common::PublicKey, + pub tx_counter: u64, + pub expiration: DateTimeUtc, + pub chain_id: ChainId, } ``` -The `pk-tx_counter` couple contained in this struct, uniquely identifies a single `WrapperTx` (since a valid tx_counter is unique given the address) so that the inner one is now bound to this specific wrapper. The remaining fields, `expiration` and `chain_id`, will tie these two values given their importance in terms of safety (see the [relative](#wrappertx-checks) section). Note that the `wrapper_commit` field must be optional because the `WrapperTx` struct itself gets converted to a `Tx` struct before submission but it doesn't need any commitment. - -Both the inner and wrapper tx get signed on their hash, as usual, to prevent tampering with data. When a wrapper gets processed by the ledger, we first check the validity of the signature, checking that none of the fields were modified: this means that the inner tx embedded within the wrapper is, in fact, the intended one. This last statement means that no external attacker has tampered data, but the tampering could still have been performed by the signer of the wrapper before signing the wrapper transaction. - -If this check (and others, explained later in the [checks](#wrappertx-checks) section) passes, then the inner tx gets decrypted in the following block proposal process. At this time we check that the order in which the inner txs are inserted in the block matches that of the corresponding wrapper txs in the previous block. To do so, we rely on an in-storage queue holding the hash of the `WrapperCommit` struct computed from the wrapper tx. From the inner tx we extract the `WrapperCommit` hash and check that it matches that in the queue: if they don't it means that the inner tx has been reordered or rewrapped and we reject the block. Note that, since we have already checked the wrapper at this point, the only way to rewrap the inner tx would be to also modify its commitment (need to change at least the `tx_counter` field), otherwise the checks on the wrapper would have spotted the inconsistency and rejected the tx. - -If this check passes then we can send the inner transaction to the wasm environment for execution: if the transaction is signed, then at least one VP will check its signature to spot possible tampering of the data (especially by the wrapper signer, since this specific case cannot be checked before this step) and, if this is the case, will reject this transaction and no storage modifications will be applied. +The `pk-tx_counter` couple contained in this struct, uniquely identifies a +single `WrapperTx` (since a valid tx_counter is unique given the address) so +that the inner one is now bound to this specific wrapper. The remaining fields, +`expiration` and `chain_id`, will tie these two values given their importance in +terms of safety (see the [relative](#wrappertx-checks) section). Note that the +`wrapper_commit` field must be optional because the `WrapperTx` struct itself +gets converted to a `Tx` struct before submission but it doesn't need any +commitment. + +Both the inner and wrapper tx get signed on their hash, as usual, to prevent +tampering with data. When a wrapper gets processed by the ledger, we first check +the validity of the signature, checking that none of the fields were modified: +this means that the inner tx embedded within the wrapper is, in fact, the +intended one. This last statement means that no external attacker has tampered +data, but the tampering could still have been performed by the signer of the +wrapper before signing the wrapper transaction. + +If this check (and others, explained later in the [checks](#wrappertx-checks) +section) passes, then the inner tx gets decrypted in the following block +proposal process. At this time we check that the order in which the inner txs +are inserted in the block matches that of the corresponding wrapper txs in the +previous block. To do so, we rely on an in-storage queue holding the hash of the +`WrapperCommit` struct computed from the wrapper tx. From the inner tx we +extract the `WrapperCommit` hash and check that it matches that in the queue: if +they don't it means that the inner tx has been reordered and we reject the +block. + +If this check passes then we can send the inner transaction to the wasm +environment for execution: if the transaction is signed, then at least one VP +will check its signature to spot possible tampering of the data (especially by +the wrapper signer, since this specific case cannot be checked before this step) +and, if this is the case, will reject this transaction and no storage +modifications will be applied. In summary: - The `InnerTx` carries a unique identifier of the `WrapperTx` embedding it - Both the inner and wrapper txs are signed on all of their data -- The signature check on the wrapper tx ensures that the inner transaction is the intended one and that this wrapper has not been used to wrap a different inner tx. It also verifies that no tampering happened with the inner transaction by a third party. Finally, it ensures that the public key is the one of the signer -- The check on the `WrapperCommit` ensures that the inner tx has not been reordered nor rewrapped (this last one is a non-exhaustive check, inner tx data could have been tampered with by the wrapper signer) -- The signature check of the inner tx performed in Vp grants that no data of the inner tx has been tampered with, effectively verifying the correctness of the previous check (`WrapperCommit`) - -This sequence of controls makes it no longer possible to rewrap an `InnerTx` which is now bound to its wrapper. This implies that replay protection is only needed on the `WrapperTx` since there's no way to extract the inner one, rewrap it and replay it. +- The signature check on the wrapper tx ensures that the inner transaction is + the intended one and that this wrapper has not been used to wrap a different + inner tx. It also verifies that no tampering happened with the inner + transaction by a third party. Finally, it ensures that the public key is the + one of the signer +- The check on the `WrapperCommit` ensures that the inner tx has not been + reordered nor rewrapped (this last one is a non-exhaustive check, inner tx + data could have been tampered with by the wrapper signer) +- The signature check of the inner tx performed in Vp grants that no data of the + inner tx has been tampered with, effectively verifying the correctness of the + previous check (`WrapperCommit`) + +This sequence of controls makes it no longer possible to rewrap an `InnerTx` +which is now bound to its wrapper. This implies that replay protection is only +needed on the `WrapperTx` since there's no way to extract the inner one, rewrap +it and replay it. #### WrapperTx checks -In `mempool_validation` and `process_proposal` we will perform some checks on the wrapper tx to validate it. These will involve: +In `mempool_validation` we will perform some checks on the wrapper tx to +validate it. These will involve: - Valid signature -- Enough funds to pay for the fee +- `GasLimit` is below the block gas limit (see the + [fee specs](../economics/fee-system.md) for more details) +- `Fees` are paid with an accepted token and match the minimum amount required + (see the [fee specs](../economics/fee-system.md) for more details) - Valid chainId - Valid transaction counter - Valid expiration -These checks can all be done before executing the transactions themselves. The check on the gas cannot be done ahead of time and we'll deal with it later. If any of these fails, the transaction should be considered invalid and the action to take will be one of the followings: - -1. If the checks fail on the signature, chainId, expiration or transaction counter, then this transaction will be forever invalid, regardless of the possible evolution of the ledger's state. There's no need to include the transaction in the block nor to increase the transaction counter. Moreover, we **cannot** include this transaction in the block to charge a fee (as a sort of punishment) because these errors may not depend on the signer of the tx (could be due to malicious users or simply a delay in the tx inclusion in the block) -2. If the checks fail _only_ because of an insufficient balance, the wrapper should be kept in mempool for a future play in case the funds should become available -3. If all the checks pass validation we will include the transaction in the block to increase the counter and charge the fee - -Note that, regarding point one, there's a distinction to be made about an invalid `tx_counter` which could be invalid because of being old or being in advance. To solve this last issue (counter greater than the expected one), we have to introduce the concept of a lifetime (or timeout) for the transactions: basically, the `WrapperTx` will hold an extra field called `expiration` stating the maximum time up until which the submitter is willing to see the transaction executed. After the specified time the transaction will be considered invalid and discarded regardless of all the other checks. This way, in case of a transaction with a counter greater than expected, it is sufficient to wait till after the expiration to submit more transactions, so that the counter in storage is not modified (kept invalid for the transaction under observation) and replaying that tx would result in a rejection. - -This actually generalizes to a more broad concept. In general, a transaction is valid at the moment of submission, but after that, a series of external factors (ledger state, etc.) might change the mind of the submitter who's now not interested in the execution of the transaction anymore. By introducing this new field we are introducing a new constraint in the transaction's contract, where the ledger will make sure to prevent the execution of the transaction after the deadline and, on the other side, the submitter commits himself to the result of the execution at least until its expiration. If the expiration is reached and the transaction has not been executed the submitter can decide to submit a new, identical transaction if he's still interested in the changes carried by it. - -In our design, the `expiration` will hold until the transaction is executed, once it's executed, either in case of success or failure, the `tx_counter` will be increased and the transaction will not be replayable. In essence, the transaction submitter commits himself to one of these three conditions: +These checks can all be done before executing the transactions themselves. If +any of these fails, the transaction should be considered invalid and the action +to take will be one of the followings: + +1. If the checks fail on the signature, chainId, expiration or transaction + counter, then this transaction will be forever invalid, regardless of the + possible evolution of the ledger's state. There's no need to include the + transaction in the block nor to increase the transaction counter. Moreover, + we **cannot** include this transaction in the block to charge a fee (as a + sort of punishment) because these errors may not depend on the signer of the + tx (could be due to malicious users or simply a delay in the tx inclusion in + the block) +2. If the checks fail on `Fee` or `GasLimit` the transaction should be + discarded. In theory the gas limit of a block is a Namada parameter + controlled by governance, so there's a chance that the transaction could + become valid in the future should this limit be raised. The same applies to + the token whitelist and the minimum fee required. However we can expect a + slow rate of change of these parameters so we can reject the tx (the + submitter can always resubmit it at a future time) +3. If all the checks pass validation we will include the transaction in the + block to increase the counter and charge the fee + +Note that, regarding point one, there's a distinction to be made about an +invalid `tx_counter` which could be invalid because of being old or being in +advance. To solve this last issue (counter greater than the expected one), we +have to introduce the concept of a lifetime (or timeout) for the transactions: +basically, the `WrapperTx` will hold an extra field called `expiration` stating +the maximum time up until which the submitter is willing to see the transaction +executed. After the specified time the transaction will be considered invalid +and discarded regardless of all the other checks. This way, in case of a +transaction with a counter greater than expected, it is sufficient to wait till +after the expiration to submit more transactions, so that the counter in storage +is not modified (kept invalid for the transaction under observation) and +replaying that tx would result in a rejection. + +This actually generalizes to a more broad concept. In general, a transaction is +valid at the moment of submission, but after that, a series of external factors +(ledger state, etc.) might change the mind of the submitter who's now not +interested in the execution of the transaction anymore. By introducing this new +field we are introducing a new constraint in the transaction's contract, where +the ledger will make sure to prevent the execution of the transaction after the +deadline and, on the other side, the submitter commits himself to the result of +the execution at least until its expiration. If the expiration is reached and +the transaction has not been executed the submitter can decide to submit a new, +identical transaction if he's still interested in the changes carried by it. + +In our design, the `expiration` will hold until the transaction is executed, +once it's executed, either in case of success or failure, the `tx_counter` will +be increased and the transaction will not be replayable. In essence, the +transaction submitter commits himself to one of these three conditions: - Transaction is invalid regardless of the specific state -- Transaction is executed (either with success or not) and the transaction counter is increased +- Transaction is executed (either with success or not) and the transaction + counter is increased - Expiration time has passed The first condition satisfied will invalidate further executions of the same tx. -The `expiration` parameter also justifies step 2 of the previous bullet points which states that if the validity checks fail only because of an insufficient balance to pay for fees than the transaction should be kept in mempool for a future execution. Without it, the transaction could be potentially executed at any future moment (provided that the counter is still valid), possibily going against the mutated interests of the submitter. With the expiration parameter, now, the submitter commits himself to accepting the execution of the transaction up to the specified time: it's going to be his responsibility to provide a sensible value for this parameter. Given this constraint the transaction will be kept in memepool up until the expiration (since it would become invalid after that in any case), to prevent the mempool from increasing too much in size. - -This mechanism can also be applied to another scenario. Suppose a transaction was not propagated to the network by a node (or a group of colluding nodes). Now, this tx might be valid, but it doesn't get inserted into a block. Without an expiration, if the submitter doesn't submit any other transaction (which gets included in a block to increase the transaction counter), this tx can be replayed (better, applied, since it was never executed in the first place) at a future moment in time when the submitter might not be willing to execute it any more. - -Since the signer of the wrapper may be different from the one of the inner we also need to include this `expiration` field in the `WrapperCommit` struct, to prevent the signer of the wrapper from setting a lifetime which is in conflict with the interests of the inner signer. Note that adding a separate lifetime for the wrapper alone (which would require two separate checks) doesn't carry any benefit: a wrapper with a lifetime greater than the inner would have no sense since the inner would fail. Restricting the lifetime would work but it also means that the wrapper could prevent a valid inner transaction from being executed. We will then keep a single `expiration` field specifying the wrapper tx max time (the inner one will actually be executed one block later because of the execution mechanism of Namada). - -To prevent the signer of the wrapper from submitting the transaction to a different chain, the `ChainId` field should also be included in the commit. - -Finally, in case the transaction run out of gas (based on the provided `gas_limit` field of the wrapper) we don't need to take any action: by this time the transaction counter will have already been incremented and the tx is not replayable anymore. In theory, we don't even need to increment the counter since the only way this transaction could become valid is a change in the way gas is accounted, which might require a fork anyway, and consequently a change in the required `ChainId`. However, since we can't tell the gas consumption before the inner tx has been executed, we cannot anticipate this check. +Since the signer of the wrapper may be different from the one of the inner we +also need to include this `expiration` field in the `WrapperCommit` struct, to +prevent the signer of the wrapper from setting a lifetime which is in conflict +with the interests of the inner signer. Note that adding a separate lifetime for +the wrapper alone (which would require two separate checks) doesn't carry any +benefit: a wrapper with a lifetime greater than the inner would have no sense +since the inner would fail. Restricting the lifetime would work but it also +means that the wrapper could prevent a valid inner transaction from being +executed. We will then keep a single `expiration` field specifying the wrapper +tx max time (the inner one will actually be executed one block later because of +the execution mechanism of Namada). + +To prevent the signer of the wrapper from submitting the transaction to a +different chain, the `ChainId` field should also be included in the commit. + +Finally, in case the transaction run out of gas (based on the provided +`GasLimit` field of the wrapper) we don't need to take any action: by this time +the transaction counter will have already been incremented and the tx is not +replayable anymore. In theory, we don't even need to increment the counter since +the only way this transaction could become valid is a change in the way gas is +accounted, which might require a fork anyway, and consequently a change in the +required `ChainId`. However, since we can't tell the gas consumption before the +inner tx has been executed, we cannot anticipate this check. + +All these checks are also run in `process_proposal` with an addition: validators +also check that the wrapper signer has enough funds to pay the fee. This check +should not be done in mempool because the funds available for a certain address +are variable in time and should only be checked at block inclusion time. If any +of the checks fail here, the entire block is rejected forcing a new Tendermint +round to begin (see a better explanation of this choice in the +[relative](#block-rejection) section). + +The `expiration` parameter also justifies that the check on funds is only done +in `process_proposal` and not in mempool. Without it, the transaction could be +potentially executed at any future moment, possibly going against the mutated +interests of the submitter. With the expiration parameter, now, the submitter +commits himself to accept the execution of the transaction up to the specified +time: it's going to be his responsibility to provide a sensible value for this +parameter. Given this constraint the transaction will be kept in mempool up +until the expiration (since it would become invalid after that in any case), to +prevent the mempool from increasing too much in size. + +This mechanism can also be applied to another scenario. Suppose a transaction +was not propagated to the network by a node (or a group of colluding nodes). +Now, this tx might be valid, but it doesn't get inserted into a block. Without +an expiration, if the submitter doesn't submit any other transaction (which gets +included in a block to increase the transaction counter), this tx can be +replayed (better, applied, since it was never executed in the first place) at a +future moment in time when the submitter might not be willing to execute it any +more. #### WrapperCommit -The fields of `WrapperTx` not included in `WrapperCommit` are at the discretion of the `WrapperTx` producer. These fields are not included in the commit because of one of these two reasons: +The fields of `WrapperTx` not included in `WrapperCommit` are at the discretion +of the `WrapperTx` producer. These fields are not included in the commit because +of one of these two reasons: -- They depend on the specific state of the wrapper signer and cannot be forced (like `fee`, since the wrapper signer must have enough funds to pay for those) -- They are not a threat (in terms of replay attacks) to the signer of the inner transaction in case of failure of the transaction +- They depend on the specific state of the wrapper signer and cannot be forced + (like `fee`, since the wrapper signer must have enough funds to pay for those) +- They are not a threat (in terms of replay attacks) to the signer of the inner + transaction in case of failure of the transaction -In a certain way, the `WrapperCommit` not only binds an `InnerTx` no a wrapper, but effectively allows the inner to control the wrapper by requesting some specific parameters for its creation and bind these parameters among the two transactions: this allows us to apply the same constraints to both txs while performing the checks on the wrapper only. +In a certain way, the `WrapperCommit` not only binds an `InnerTx` no a wrapper, +but effectively allows the inner to control the wrapper by requesting some +specific parameters for its creation and bind these parameters among the two +transactions: this allows us to apply the same constraints to both txs while +performing the checks on the wrapper only. #### Transaction creation process -To craft a transaction, the process will now be the following (optional steps are only required if the signer of the inner differs from that of the wrapper): - -- (**Optional**) the `InnerTx` constructor request, to the wrapper signer, his public key and the `tx_counter` to be used -- The `InnerTx` is constructed in its entirety with also the `wrapper_commit` field to define the constraints of the future wrapper -- The produced `Tx` struct get signed over all of its data (with `SignedTxData`) producing a new struct `Tx` -- (**Optional**) The inner tx produced is sent to the `WrapperTx` producer together with the `WrapperCommit` struct (required since the inner tx only holds the hash of it) -- The signer of the wrapper constructs a `WrapperTx` compliant with the `WrapperCommit` fields +To craft a transaction, the process will now be the following (optional steps +are only required if the signer of the inner differs from that of the wrapper): + +- (**Optional**) the `InnerTx` constructor request, to the wrapper signer, his + public key and the `tx_counter` to be used +- The `InnerTx` is constructed in its entirety with also the `wrapper_commit` + field to define the constraints of the future wrapper +- The produced `Tx` struct get signed over all of its data (with `SignedTxData`) + producing a new struct `Tx` +- (**Optional**) The inner tx produced is sent to the `WrapperTx` producer + together with the `WrapperCommit` struct (required since the inner tx only + holds the hash of it) +- The signer of the wrapper constructs a `WrapperTx` compliant with the + `WrapperCommit` fields - The produced `WrapperTx` gets signed over all of its fields -Compared to a solution not binding the inner tx to the wrapper one, this solution requires the exchange of 3 messages (request `tx_counter`, receive `tx_counter`, send `InnerTx`) between the two signers (in case they differ), instead of one. However, it allows the signer of the inner to send the `InnerTx` to the wrapper signer already encrypted, guaranteeing a higher level of safety: only the `WrapperCommit` struct should be sent clear, but this doesn't reveal any sensitive information about the inner transaction itself. +Compared to a solution not binding the inner tx to the wrapper one, this +solution requires the exchange of 3 messages (request `tx_counter`, receive +`tx_counter`, send `InnerTx`) between the two signers (in case they differ), +instead of one. However, it allows the signer of the inner to send the `InnerTx` +to the wrapper signer already encrypted, guaranteeing a higher level of safety: +only the `WrapperCommit` struct should be sent clear, but this doesn't reveal +any sensitive information about the inner transaction itself. diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index 73f39dda05..fb0d2197ee 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -13,5 +13,5 @@ pub mod storage; pub mod vp_host_fns; pub use namada_core::ledger::{ - gas, governance, parameters, storage_api, tx_env, vp_env, + gas, governance, parameters, replay_protection, storage_api, tx_env, vp_env, }; diff --git a/shared/src/ledger/native_vp/mod.rs b/shared/src/ledger/native_vp/mod.rs index 231405dde5..fa3e319533 100644 --- a/shared/src/ledger/native_vp/mod.rs +++ b/shared/src/ledger/native_vp/mod.rs @@ -3,6 +3,7 @@ pub mod governance; pub mod parameters; +pub mod replay_protection; pub mod slash_fund; use std::cell::RefCell; diff --git a/shared/src/ledger/native_vp/replay_protection.rs b/shared/src/ledger/native_vp/replay_protection.rs new file mode 100644 index 0000000000..3e3c4b7ca0 --- /dev/null +++ b/shared/src/ledger/native_vp/replay_protection.rs @@ -0,0 +1,54 @@ +//! Native VP for replay protection + +use std::collections::BTreeSet; + +use namada_core::ledger::storage; +use namada_core::types::address::{Address, InternalAddress}; +use namada_core::types::storage::Key; +use thiserror::Error; + +use crate::ledger::native_vp::{self, Ctx, NativeVp}; +use crate::vm::WasmCacheAccess; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("Native VP error: {0}")] + NativeVpError(#[from] native_vp::Error), +} + +/// ReplayProtection functions result +pub type Result = std::result::Result; + +/// Replay Protection VP +pub struct ReplayProtectionVp<'a, DB, H, CA> +where + DB: storage::DB + for<'iter> storage::DBIter<'iter>, + H: storage::StorageHasher, + CA: WasmCacheAccess, +{ + /// Context to interact with the host structures. + pub ctx: Ctx<'a, DB, H, CA>, +} + +impl<'a, DB, H, CA> NativeVp for ReplayProtectionVp<'a, DB, H, CA> +where + DB: 'static + storage::DB + for<'iter> storage::DBIter<'iter>, + H: 'static + storage::StorageHasher, + CA: 'static + WasmCacheAccess, +{ + type Error = Error; + + const ADDR: InternalAddress = InternalAddress::ReplayProtection; + + fn validate_tx( + &self, + _tx_data: &[u8], + _keys_changed: &BTreeSet, + _verifiers: &BTreeSet
, + ) -> Result { + // VP should prevent any modification of the subspace. + // Changes are only allowed from protocol + Ok(false) + } +} diff --git a/shared/src/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs index cfd416c14d..ab78a851c5 100644 --- a/shared/src/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -10,6 +10,7 @@ use crate::ledger::gas::{self, BlockGasMeter, VpGasMeter}; use crate::ledger::ibc::vp::{Ibc, IbcToken}; use crate::ledger::native_vp::governance::GovernanceVp; use crate::ledger::native_vp::parameters::{self, ParametersVp}; +use crate::ledger::native_vp::replay_protection::ReplayProtectionVp; use crate::ledger::native_vp::slash_fund::SlashFundVp; use crate::ledger::native_vp::{self, NativeVp}; use crate::ledger::pos::{self, PosVP}; @@ -56,6 +57,10 @@ pub enum Error { SlashFundNativeVpError(crate::ledger::native_vp::slash_fund::Error), #[error("Ethereum bridge native VP error: {0}")] EthBridgeNativeVpError(crate::ledger::eth_bridge::vp::Error), + #[error("Replay protection native VP error: {0}")] + ReplayProtectionNativeVpError( + crate::ledger::native_vp::replay_protection::Error, + ), #[error("Access to an internal address {0} is forbidden")] AccessForbidden(InternalAddress), } @@ -389,6 +394,16 @@ where gas_meter = bridge.ctx.gas_meter.into_inner(); result } + InternalAddress::ReplayProtection => { + let replay_protection_vp = + ReplayProtectionVp { ctx }; + let result = replay_protection_vp + .validate_tx(tx_data, &keys_changed, &verifiers) + .map_err(Error::ReplayProtectionNativeVpError); + gas_meter = + replay_protection_vp.ctx.gas_meter.into_inner(); + result + } }; accepted diff --git a/wasm/checksums.json b/wasm/checksums.json index 8d2885eee9..94eda0a1ae 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,20 +1,20 @@ { - "tx_bond.wasm": "tx_bond.f7f4169dbd708a4776fa6f19c32c259402a7b7c34b9c1ac34029788c27f125fa.wasm", - "tx_change_validator_commission.wasm": "tx_change_validator_commission.49143933426925d549739dd06890f1c4709a27eca101596e34d5e0dbec1bd4c5.wasm", - "tx_ibc.wasm": "tx_ibc.e8081fbfb59dbdb42a2f66e89056fff3058bd7c3111e131b8ff84452752d94f5.wasm", - "tx_init_account.wasm": "tx_init_account.9ad3971335452cc7eada0dc35fb1e6310a05e8e2367e3067c0af8e21dad5705b.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.d0419c9cd9de905c92a0b9839ab94cdc3daf19b050375798f55503150ddc2bfa.wasm", - "tx_init_validator.wasm": "tx_init_validator.ef991c1019164b5d2432a3fba01e4b116825b024cc0dc3bcecdd1555ae7c6579.wasm", - "tx_reveal_pk.wasm": "tx_reveal_pk.b0509274d06dfe22988f03dd4c42e0a70bc40e21983f9670a603c057784dbd8f.wasm", - "tx_transfer.wasm": "tx_transfer.deae9d3955f0761331c21f253f4dc9b1bc93fe268a53049f9eb41d969848c62d.wasm", - "tx_unbond.wasm": "tx_unbond.8c63c856db5b608b44179abf29ec8996005d5a5e5467b11b1afad09b9052e69a.wasm", - "tx_update_vp.wasm": "tx_update_vp.287a8dde719b278b10c63384adbeacf40906b40e173b3ab0d0fac659e323951a.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.f86a66ce7c96be2ed4ee6b8d1fa0186264749ef88ec22575bf2c31ca82341c3e.wasm", - "tx_withdraw.wasm": "tx_withdraw.c9d451ccf7561db4a1113fa5c4c9d8266f185030c3ceb57e0204707de1489792.wasm", - "vp_implicit.wasm": "vp_implicit.56de37194c0b4dfce4918c141dfa3622f8f6e3d3414585bd5542f2267e8abb5f.wasm", - "vp_masp.wasm": "vp_masp.5e0578fdd97f3cbab4fe9e15b4408f7c3dd840d4942c361089b9ffe06dffd764.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.7a2c8a7eec2fe35ea1c7612e35239e4fd197303066397baa64a99c9f58d3d00b.wasm", - "vp_token.wasm": "vp_token.64336910def99aa4253ada63eb20614f4dfd9716c9edec790a2f31e66431d7b6.wasm", - "vp_user.wasm": "vp_user.5558d87e420084d0db74457f980a22a5ab518d5a534da43ddb7c5b39f8b5c6e2.wasm", - "vp_validator.wasm": "vp_validator.3d7edc1c8082bdc56566b3a1fba9e2d0aab6f2c9df7abe206f8b4e9582463b90.wasm" + "tx_bond.wasm": "tx_bond.c5dd2845ea51d06cbd03797d9dd48de1b82906e5f69f2b54be56b5f627c29cb0.wasm", + "tx_change_validator_commission.wasm": "tx_change_validator_commission.ce336a152c4f4dcc3e2aaffd0d8ba3180649b22332764cee6fda5476ec37351f.wasm", + "tx_ibc.wasm": "tx_ibc.8c9ddecc4d9a0ff2fac01af1c774fbac7944133affc9862d062fe70588b66438.wasm", + "tx_init_account.wasm": "tx_init_account.36e02f19fb23b798b12a9e502999114276ddd6163bb9a319b303db7ebbc82483.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.67f949b27d5a1bf7ff3114d326d2e2879380637bc553d2f01ef35cd6770bdc05.wasm", + "tx_init_validator.wasm": "tx_init_validator.72e0b43d2a70ff958c2900a1a59ee2546dcc20539c915cb6d393b760d1576973.wasm", + "tx_reveal_pk.wasm": "tx_reveal_pk.1517057649928209c1b41739e005d0eb2a30d15310f662f69fcfe77fdbab509f.wasm", + "tx_transfer.wasm": "tx_transfer.c08f52e4486c3280f54bc93eef3216e01dd65a504cec898112603df8127da288.wasm", + "tx_unbond.wasm": "tx_unbond.06c774751259b4e9f9a52ba9ac7466cef13117e15574e08552eae67e757ba5d5.wasm", + "tx_update_vp.wasm": "tx_update_vp.efd23e3cff59277a4a2a3bc0d92cbd900eb917d84b350af273c9ac21e4953271.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.7289a8ed94243665a87f6b629cffcc60e5ce2e0f795449c9ef5942e47ad4c3e9.wasm", + "tx_withdraw.wasm": "tx_withdraw.98b89679537255b3f2b7ea252921e7054eb60e279a711b5b2031bdc190485dc4.wasm", + "vp_implicit.wasm": "vp_implicit.0ec16700341f51b6fbca7924daf454739ad125dbd17001ed654077b098a201e9.wasm", + "vp_masp.wasm": "vp_masp.035341b42e8d683cc532a791ae79f0a377cf32e0f50022fa32cc00ff42a74381.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.641faeb3ab83584c74c14d3a9b304e3f31d29bd616e010307f3ebd41b1ea9ba2.wasm", + "vp_token.wasm": "vp_token.6c89404534733fa1d7bbff854978cfa05f32e42d6124ed9168d026f5316df311.wasm", + "vp_user.wasm": "vp_user.72ceb6ab5b8c53a15548c0b5e4fb52378cbd6701e4364a6354ee38e3ae613c65.wasm", + "vp_validator.wasm": "vp_validator.8569d072135f3b3306b8019110c6a101812a7a95bc97d66f69610f166108b3b8.wasm" } \ No newline at end of file