diff --git a/.changelog/unreleased/improvements/318-refactor-pos-vp.md b/.changelog/unreleased/improvements/318-refactor-pos-vp.md new file mode 100644 index 0000000000..5ed78c3cc6 --- /dev/null +++ b/.changelog/unreleased/improvements/318-refactor-pos-vp.md @@ -0,0 +1 @@ +- Refactored PoS VP logic ([#318](https://github.com/anoma/namada/pull/318)) \ No newline at end of file diff --git a/.changelog/unreleased/testing/462-pos-tx-tests.md b/.changelog/unreleased/testing/462-pos-tx-tests.md new file mode 100644 index 0000000000..09bacbc5f0 --- /dev/null +++ b/.changelog/unreleased/testing/462-pos-tx-tests.md @@ -0,0 +1,2 @@ +- Test PoS transaction for bonding, unbonding and withdrawal. Fixed an issue + found on unbonding. ([#462](https://github.com/anoma/anoma/issues/462)) \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index eafd05b2ff..c1d192a318 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4065,6 +4065,7 @@ dependencies = [ "tendermint-proto", "test-log", "thiserror", + "tiny-keccak", "tonic-build", "tracing 0.1.35", "tracing-subscriber 0.3.11", @@ -6727,6 +6728,15 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tiny_http" version = "0.11.0" diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index f546860cfd..8a5f36d40c 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -1481,6 +1481,9 @@ pub mod args { arg_opt("account-key"); const VALIDATOR_CONSENSUS_KEY: ArgOpt = arg_opt("consensus-key"); + const VALIDATOR_ETH_COLD_KEY: ArgOpt = + arg_opt("eth-cold-key"); + const VALIDATOR_ETH_HOT_KEY: ArgOpt = arg_opt("eth-hot-key"); const VALIDATOR_CODE_PATH: ArgOpt = arg_opt("validator-code-path"); const VALUE: ArgOpt = arg_opt("value"); const WASM_CHECKSUMS_PATH: Arg = arg("wasm-checksums-path"); @@ -1694,6 +1697,8 @@ pub mod args { pub scheme: SchemeType, pub account_key: Option, pub consensus_key: Option, + pub eth_cold_key: Option, + pub eth_hot_key: Option, pub rewards_account_key: Option, pub protocol_key: Option, pub validator_vp_code_path: Option, @@ -1708,6 +1713,8 @@ pub mod args { let scheme = SCHEME.parse(matches); let account_key = VALIDATOR_ACCOUNT_KEY.parse(matches); let consensus_key = VALIDATOR_CONSENSUS_KEY.parse(matches); + let eth_cold_key = VALIDATOR_ETH_COLD_KEY.parse(matches); + let eth_hot_key = VALIDATOR_ETH_HOT_KEY.parse(matches); let rewards_account_key = REWARDS_KEY.parse(matches); let protocol_key = PROTOCOL_KEY.parse(matches); let validator_vp_code_path = VALIDATOR_CODE_PATH.parse(matches); @@ -1719,6 +1726,8 @@ pub mod args { scheme, account_key, consensus_key, + eth_cold_key, + eth_hot_key, rewards_account_key, protocol_key, validator_vp_code_path, @@ -1742,7 +1751,18 @@ pub mod args { )) .arg(VALIDATOR_CONSENSUS_KEY.def().about( "A consensus key for the validator account. A new one \ - will be generated if none given.", + will be generated if none given. Note that this must be \ + ed25519.", + )) + .arg(VALIDATOR_ETH_COLD_KEY.def().about( + "An Eth cold key for the validator account. A new one \ + will be generated if none given. Note that this must be \ + secp256k1.", + )) + .arg(VALIDATOR_ETH_HOT_KEY.def().about( + "An Eth hot key for the validator account. A new one will \ + be generated if none given. Note that this must be \ + secp256k1.", )) .arg(REWARDS_KEY.def().about( "A public key for the staking reward account. A new one \ diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index 34652ac825..665a14fe2c 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -1194,7 +1194,7 @@ fn process_bonds_query( let mut total_active = total_active.unwrap_or_else(|| 0.into()); let mut current_total: token::Amount = 0.into(); for bond in bonds.iter() { - for (epoch_start, &(mut delta)) in bond.deltas.iter().sorted() { + for (epoch_start, &(mut delta)) in bond.pos_deltas.iter().sorted() { writeln!(w, " Active from epoch {}: Δ {}", epoch_start, delta) .unwrap(); delta = apply_slashes(slashes, delta, *epoch_start, None, Some(w)); @@ -1653,25 +1653,56 @@ pub async fn get_proposal_offline_votes( let bonds_iter = query_storage_prefix::(client.clone(), key).await; if let Some(bonds) = bonds_iter { - for (key, epoched_amount) in bonds { - let bond = epoched_amount - .get(proposal.tally_epoch) - .expect("Delegation bond should be definied."); + for (key, epoched_bonds) in bonds { + // Look-up slashes for the validator in this key and + // apply them if any + let validator = pos::get_validator_address_from_bond(&key) + .expect( + "Delegation key should contain validator address.", + ); + let slashes_key = pos::validator_slashes_key(&validator); + let slashes = query_storage_value::( + client, + &slashes_key, + ) + .await + .unwrap_or_default(); + let mut delegated_amount: token::Amount = 0.into(); let epoch = namada::ledger::pos::types::Epoch::from( proposal.tally_epoch.0, ); - let amount = *bond - .deltas - .get(&epoch) - .expect("Delegation amount should be definied."); - let validator_address = - pos::get_validator_address_from_bond(&key).expect( - "Delegation key should contain validator address.", + let bond = epoched_bonds + .get(epoch) + .expect("Delegation bond should be defined."); + let mut to_deduct = bond.neg_deltas; + for (start_epoch, &(mut delta)) in + bond.pos_deltas.iter().sorted() + { + // deduct bond's neg_deltas + if to_deduct > delta { + to_deduct -= delta; + // If the whole bond was deducted, continue to + // the next one + continue; + } else { + delta -= to_deduct; + to_deduct = token::Amount::default(); + } + + delta = apply_slashes( + &slashes, + delta, + *start_epoch, + None, + None, ); + delegated_amount += delta; + } + if proposal_vote.vote.is_yay() { - yay_delegators.insert(validator_address, amount); + yay_delegators.insert(validator, delegated_amount); } else { - nay_delegators.insert(validator_address, amount); + nay_delegators.insert(validator, delegated_amount); } } } @@ -1746,7 +1777,21 @@ pub async fn get_bond_amount_at( Some(epoched_bonds) => { let mut delegated_amount: token::Amount = 0.into(); for bond in epoched_bonds.iter() { - for (epoch_start, &(mut delta)) in bond.deltas.iter().sorted() { + let mut to_deduct = bond.neg_deltas; + for (epoch_start, &(mut delta)) in + bond.pos_deltas.iter().sorted() + { + // deduct bond's neg_deltas + if to_deduct > delta { + to_deduct -= delta; + // If the whole bond was deducted, continue to + // the next one + continue; + } else { + delta -= to_deduct; + to_deduct = token::Amount::default(); + } + delta = apply_slashes( &slashes, delta, diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index 1d41ebbc77..cc81c6a10a 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -14,7 +14,7 @@ use namada::types::address::{xan as m1t, Address}; use namada::types::governance::{ OfflineProposal, OfflineVote, Proposal, ProposalVote, }; -use namada::types::key::*; +use namada::types::key::{self, *}; use namada::types::nft::{self, Nft, NftToken}; use namada::types::storage::Epoch; use namada::types::token::Amount; @@ -159,6 +159,8 @@ pub async fn submit_init_validator( scheme, account_key, consensus_key, + eth_cold_key, + eth_hot_key, rewards_account_key, protocol_key, validator_vp_code_path, @@ -175,6 +177,8 @@ pub async fn submit_init_validator( let validator_key_alias = format!("{}-key", alias); let consensus_key_alias = format!("{}-consensus-key", alias); let rewards_key_alias = format!("{}-rewards-key", alias); + let eth_hot_key_alias = format!("{}-eth-hot-key", alias); + let eth_cold_key_alias = format!("{}-eth-cold-key", alias); let account_key = ctx.get_opt_cached(&account_key).unwrap_or_else(|| { println!("Generating validator account key..."); ctx.wallet @@ -208,6 +212,48 @@ pub async fn submit_init_validator( .1 }); + let eth_cold_key = ctx + .get_opt_cached(ð_cold_key) + .map(|key| match *key { + common::SecretKey::Secp256k1(_) => key, + common::SecretKey::Ed25519(_) => { + eprintln!("Eth cold key can only be secp256k1"); + safe_exit(1) + } + }) + .unwrap_or_else(|| { + println!("Generating Eth cold key..."); + ctx.wallet + .gen_key( + // Note that ETH only allows secp256k1 + SchemeType::Secp256k1, + Some(eth_cold_key_alias.clone()), + unsafe_dont_encrypt, + ) + .1 + }); + + let eth_hot_key = ctx + .get_opt_cached(ð_hot_key) + .map(|key| match *key { + common::SecretKey::Secp256k1(_) => key, + common::SecretKey::Ed25519(_) => { + eprintln!("Eth hot key can only be secp256k1"); + safe_exit(1) + } + }) + .unwrap_or_else(|| { + println!("Generating Eth hot key..."); + ctx.wallet + .gen_key( + // Note that ETH only allows secp256k1 + SchemeType::Secp256k1, + Some(eth_hot_key_alias.clone()), + unsafe_dont_encrypt, + ) + .1 + }); + let rewards_account_key = ctx.get_opt_cached(&rewards_account_key).unwrap_or_else(|| { println!("Generating staking reward account key..."); @@ -225,9 +271,12 @@ pub async fn submit_init_validator( if protocol_key.is_none() { println!("Generating protocol signing key..."); } + let eth_hot_pk = eth_hot_key.ref_to(); // Generate the validator keys - let validator_keys = - ctx.wallet.gen_validator_keys(protocol_key, scheme).unwrap(); + let validator_keys = ctx + .wallet + .gen_validator_keys(Some(eth_hot_pk), protocol_key, scheme) + .unwrap(); let protocol_key = validator_keys.get_protocol_keypair().ref_to(); let dkg_key = validator_keys .dkg_keypair @@ -269,6 +318,14 @@ pub async fn submit_init_validator( let data = InitValidator { account_key, consensus_key: consensus_key.ref_to(), + eth_cold_key: key::secp256k1::PublicKey::try_from_pk( + ð_cold_key.ref_to(), + ) + .unwrap(), + eth_hot_key: key::secp256k1::PublicKey::try_from_pk( + ð_hot_key.ref_to(), + ) + .unwrap(), rewards_account_key, protocol_key, dkg_key, @@ -890,7 +947,7 @@ pub async fn submit_unbond(ctx: Context, args: args::Unbond) { Some(bonds) => { let mut bond_amount: token::Amount = 0.into(); for bond in bonds.iter() { - for delta in bond.deltas.values() { + for delta in bond.pos_deltas.values() { bond_amount += *delta; } } diff --git a/apps/src/lib/client/utils.rs b/apps/src/lib/client/utils.rs index 9043a14db7..0c06851684 100644 --- a/apps/src/lib/client/utils.rs +++ b/apps/src/lib/client/utils.rs @@ -590,6 +590,36 @@ pub fn init_network( keypair.ref_to() }); + let eth_hot_pk = try_parse_public_key( + format!("validator {name} eth hot key"), + &config.eth_hot_key, + ) + .unwrap_or_else(|| { + let alias = format!("{}-eth-hot-key", name); + println!("Generating validator {} eth hot key...", name); + let (_alias, keypair) = wallet.gen_key( + SchemeType::Secp256k1, + Some(alias), + unsafe_dont_encrypt, + ); + keypair.ref_to() + }); + + let eth_cold_pk = try_parse_public_key( + format!("validator {name} eth cold key"), + &config.eth_cold_key, + ) + .unwrap_or_else(|| { + let alias = format!("{}-eth-cold-key", name); + println!("Generating validator {} eth cold key...", name); + let (_alias, keypair) = wallet.gen_key( + SchemeType::Secp256k1, + Some(alias), + unsafe_dont_encrypt, + ); + keypair.ref_to() + }); + let dkg_pk = &config .dkg_public_key .as_ref() @@ -608,6 +638,7 @@ pub fn init_network( let validator_keys = wallet .gen_validator_keys( + Some(eth_hot_pk.clone()), Some(protocol_pk.clone()), SchemeType::Ed25519, ) @@ -624,6 +655,10 @@ pub fn init_network( Some(genesis_config::HexString(account_pk.to_string())); config.staking_reward_public_key = Some(genesis_config::HexString(staking_reward_pk.to_string())); + config.eth_cold_key = + Some(genesis_config::HexString(eth_cold_pk.to_string())); + config.eth_hot_key = + Some(genesis_config::HexString(eth_hot_pk.to_string())); config.protocol_public_key = Some(genesis_config::HexString(protocol_pk.to_string())); @@ -1089,6 +1124,12 @@ pub fn init_genesis_validator( consensus_public_key: Some(HexString( pre_genesis.consensus_key.ref_to().to_string(), )), + eth_cold_key: Some(HexString( + pre_genesis.eth_cold_key.ref_to().to_string(), + )), + eth_hot_key: Some(HexString( + pre_genesis.eth_hot_key.ref_to().to_string(), + )), account_public_key: Some(HexString( pre_genesis.account_key.ref_to().to_string(), )), diff --git a/apps/src/lib/config/genesis.rs b/apps/src/lib/config/genesis.rs index 5bd3dc803f..5441ce524f 100644 --- a/apps/src/lib/config/genesis.rs +++ b/apps/src/lib/config/genesis.rs @@ -164,6 +164,10 @@ pub mod genesis_config { pub struct ValidatorConfig { // Public key for consensus. (default: generate) pub consensus_public_key: Option, + // Public key (cold) for eth governance. (default: generate) + pub eth_cold_key: Option, + // Public key (hot) for eth bridge. (default: generate) + pub eth_hot_key: Option, // Public key for validator account. (default: generate) pub account_public_key: Option, // Public key for staking reward account. (default: generate) @@ -318,6 +322,18 @@ pub mod genesis_config { .unwrap() .to_public_key() .unwrap(), + eth_cold_key: config + .eth_cold_key + .as_ref() + .unwrap() + .to_public_key() + .unwrap(), + eth_hot_key: config + .eth_hot_key + .as_ref() + .unwrap() + .to_public_key() + .unwrap(), }, account_key: config .account_public_key @@ -750,11 +766,21 @@ pub fn genesis() -> Genesis { 24, 247, 69, 6, 9, 30, 44, 16, 88, 238, 77, 162, 243, 125, 240, 206, ]) .unwrap(); + + let secp_eth_cold_keypair = secp256k1::SecretKey::try_from_slice(&[ + 90, 83, 107, 155, 193, 251, 120, 27, 76, 1, 188, 8, 116, 121, 90, 99, + 65, 17, 187, 6, 238, 141, 63, 188, 76, 38, 102, 7, 47, 185, 28, 52, + ]) + .unwrap(); + let staking_reward_keypair = common::SecretKey::try_from_sk(&ed_staking_reward_keypair).unwrap(); + let eth_cold_keypair = + common::SecretKey::try_from_sk(&secp_eth_cold_keypair).unwrap(); let address = wallet::defaults::validator_address(); let staking_reward_address = Address::decode("atest1v4ehgw36xcersvee8qerxd35x9prsw2xg5erxv6pxfpygd2x89z5xsf5xvmnysejgv6rwd2rnj2avt").unwrap(); - let (protocol_keypair, dkg_keypair) = wallet::defaults::validator_keys(); + let (protocol_keypair, eth_bridge_keypair, dkg_keypair) = + wallet::defaults::validator_keys(); let validator = Validator { pos_data: GenesisValidator { address, @@ -762,6 +788,8 @@ pub fn genesis() -> Genesis { tokens: token::Amount::whole(200_000), consensus_key: consensus_keypair.ref_to(), staking_reward_key: staking_reward_keypair.ref_to(), + eth_cold_key: eth_cold_keypair.ref_to(), + eth_hot_key: eth_bridge_keypair.ref_to(), }, account_key: account_keypair.ref_to(), protocol_key: protocol_keypair.ref_to(), @@ -886,13 +914,32 @@ pub mod tests { let staking_reward_keypair: common::SecretKey = ed25519::SigScheme::generate(&mut rng).try_to_sk().unwrap(); let srkp_arr = staking_reward_keypair.try_to_vec().unwrap(); - let (protocol_keypair, dkg_keypair) = + let (protocol_keypair, _eth_hot_bridge_keypair, dkg_keypair) = wallet::defaults::validator_keys(); + + // TODO: derive validator eth address from an eth keypair + let eth_cold_gov_keypair: common::SecretKey = + secp256k1::SigScheme::generate(&mut rng) + .try_to_sk() + .unwrap(); + let eth_hot_bridge_keypair: common::SecretKey = + secp256k1::SigScheme::generate(&mut rng) + .try_to_sk() + .unwrap(); + println!("address: {}", address); println!("staking_reward_address: {}", staking_reward_address); println!("keypair: {:?}", kp_arr); println!("staking_reward_keypair: {:?}", srkp_arr); println!("protocol_keypair: {:?}", protocol_keypair); println!("dkg_keypair: {:?}", dkg_keypair.try_to_vec().unwrap()); + println!( + "eth_cold_gov_keypair: {:?}", + eth_cold_gov_keypair.try_to_vec().unwrap() + ); + println!( + "eth_hot_bridge_keypair: {:?}", + eth_hot_bridge_keypair.try_to_vec().unwrap() + ); } } diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index eb47160f9c..207112da18 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -289,13 +289,15 @@ where } #[cfg(feature = "dev")] { - let validator_keys = wallet::defaults::validator_keys(); + let (protocol_keypair, eth_bridge_keypair, dkg_keypair) = + wallet::defaults::validator_keys(); ShellMode::Validator { data: wallet::ValidatorData { address: wallet::defaults::validator_address(), keys: wallet::ValidatorKeys { - protocol_keypair: validator_keys.0, - dkg_keypair: Some(validator_keys.1), + protocol_keypair, + eth_bridge_keypair, + dkg_keypair: Some(dkg_keypair), }, }, broadcast_sender, diff --git a/apps/src/lib/wallet/defaults.rs b/apps/src/lib/wallet/defaults.rs index 6a4f9dacc3..5f835c9f11 100644 --- a/apps/src/lib/wallet/defaults.rs +++ b/apps/src/lib/wallet/defaults.rs @@ -81,13 +81,23 @@ mod dev { use crate::wallet::alias::Alias; - /// Generate a new protocol signing keypair and DKG session keypair - pub fn validator_keys() -> (common::SecretKey, DkgKeypair) { + /// Generate a new protocol signing keypair, eth hot key and DKG session + /// keypair + pub fn validator_keys() -> (common::SecretKey, common::SecretKey, DkgKeypair) + { + // ed25519 bytes let bytes: [u8; 33] = [ 0, 200, 107, 23, 252, 78, 80, 8, 164, 142, 3, 194, 33, 12, 250, 169, 211, 127, 47, 13, 194, 54, 199, 81, 102, 246, 189, 119, 144, 25, 27, 113, 222, ]; + // secp256k1 bytes + let eth_bridge_key_bytes = [ + 1, 117, 93, 118, 129, 202, 67, 51, 62, 202, 196, 130, 244, 5, 44, + 88, 200, 121, 169, 11, 227, 79, 223, 74, 88, 49, 132, 213, 59, 64, + 20, 13, 82, + ]; + // DkgKeypair let dkg_bytes = [ 32, 0, 0, 0, 210, 193, 55, 24, 92, 233, 23, 2, 73, 204, 221, 107, 110, 222, 192, 136, 54, 24, 108, 236, 137, 27, 121, 142, 142, 7, @@ -96,6 +106,8 @@ mod dev { ( BorshDeserialize::deserialize(&mut bytes.as_ref()).unwrap(), + BorshDeserialize::deserialize(&mut eth_bridge_key_bytes.as_ref()) + .unwrap(), BorshDeserialize::deserialize(&mut dkg_bytes.as_ref()).unwrap(), ) } diff --git a/apps/src/lib/wallet/mod.rs b/apps/src/lib/wallet/mod.rs index b3048ef97d..5646416a38 100644 --- a/apps/src/lib/wallet/mod.rs +++ b/apps/src/lib/wallet/mod.rs @@ -118,27 +118,47 @@ impl Wallet { /// we should re-use a keypair already in the wallet pub fn gen_validator_keys( &mut self, + eth_bridge_pk: Option, protocol_pk: Option, - scheme: SchemeType, + protocol_key_scheme: SchemeType, ) -> Result { - let protocol_keypair = protocol_pk.map(|pk| { - self.find_key_by_pkh(&PublicKeyHash::from(&pk)) - .ok() - .or_else(|| { - self.store - .validator_data - .take() - .map(|data| Rc::new(data.keys.protocol_keypair)) - }) - .ok_or(FindKeyError::KeyNotFound) - }); - match protocol_keypair { - Some(Err(err)) => Err(err), - other => Ok(Store::gen_validator_keys( - other.map(|res| res.unwrap().as_ref().clone()), - scheme, - )), - } + let protocol_keypair = self.find_secret_key(protocol_pk, |data| { + Rc::new(data.keys.protocol_keypair) + })?; + let eth_bridge_keypair = self + .find_secret_key(eth_bridge_pk, |data| { + Rc::new(data.keys.eth_bridge_keypair) + })?; + Ok(Store::gen_validator_keys( + eth_bridge_keypair.map(|sk| sk.as_ref().clone()), + protocol_keypair.map(|sk| sk.as_ref().clone()), + protocol_key_scheme, + )) + } + + /// Find a corresponding [`common::SecretKey`] in [`Store`], for some + /// [`common::PublicKey`]. + /// + /// If a key was provided in `maybe_pk`, and it's found in [`Store`], we use + /// `extract_key` to retrieve it from [`ValidatorData`]. + fn find_secret_key( + &mut self, + maybe_pk: Option, + extract_key: F, + ) -> Result>, FindKeyError> + where + F: Fn(ValidatorData) -> Rc, + { + maybe_pk + .map(|pk| { + self.find_key_by_pkh(&PublicKeyHash::from(&pk)) + .ok() + .or_else(|| { + self.store.validator_data.take().map(extract_key) + }) + .ok_or(FindKeyError::KeyNotFound) + }) + .transpose() } /// Add validator data to the store diff --git a/apps/src/lib/wallet/pre_genesis.rs b/apps/src/lib/wallet/pre_genesis.rs index 72f719d1e4..c2c7cf393b 100644 --- a/apps/src/lib/wallet/pre_genesis.rs +++ b/apps/src/lib/wallet/pre_genesis.rs @@ -40,6 +40,10 @@ pub struct ValidatorWallet { pub account_key: Rc, /// Cryptographic keypair for consensus key pub consensus_key: Rc, + /// Cryptographic keypair for eth cold key + pub eth_cold_key: Rc, + /// Cryptographic keypair for eth hot key + pub eth_hot_key: Rc, /// Cryptographic keypair for rewards key pub rewards_key: Rc, /// Cryptographic keypair for Tendermint node key @@ -54,11 +58,13 @@ pub struct ValidatorStore { pub account_key: wallet::StoredKeypair, /// Cryptographic keypair for consensus key pub consensus_key: wallet::StoredKeypair, + /// Cryptographic keypair for eth cold key + pub eth_cold_key: wallet::StoredKeypair, /// Cryptographic keypair for rewards key pub rewards_key: wallet::StoredKeypair, /// Cryptographic keypair for Tendermint node key pub tendermint_node_key: wallet::StoredKeypair, - /// Special validator keys + /// Special validator keys. Contains the ETH hot key. pub validator_keys: wallet::ValidatorKeys, } @@ -119,6 +125,11 @@ impl ValidatorWallet { store.account_key.get(true, password.clone())?; let consensus_key = store.consensus_key.get(true, password.clone())?; + let eth_cold_key = + store.eth_cold_key.get(true, password.clone())?; + let eth_hot_key = + Rc::new(store.validator_keys.eth_bridge_keypair.clone()); + let rewards_key = store.rewards_key.get(true, password.clone())?; let tendermint_node_key = @@ -128,6 +139,8 @@ impl ValidatorWallet { store, account_key, consensus_key, + eth_cold_key, + eth_hot_key, rewards_key, tendermint_node_key, }) @@ -149,16 +162,22 @@ impl ValidatorWallet { SchemeType::Ed25519, &password, ); + let (eth_cold_key, eth_cold_sk) = + gen_key_to_store(SchemeType::Secp256k1, &password); + let (rewards_key, rewards_sk) = gen_key_to_store(scheme, &password); let (tendermint_node_key, tendermint_node_sk) = gen_key_to_store( // Note that TM only allows ed25519 for node IDs SchemeType::Ed25519, &password, ); - let validator_keys = store::Store::gen_validator_keys(None, scheme); + let validator_keys = + store::Store::gen_validator_keys(None, None, scheme); + let eth_hot_key = Rc::new(validator_keys.eth_bridge_keypair.clone()); let store = ValidatorStore { account_key, consensus_key, + eth_cold_key, rewards_key, tendermint_node_key, validator_keys, @@ -167,6 +186,8 @@ impl ValidatorWallet { store, account_key: account_sk, consensus_key: consensus_sk, + eth_cold_key: eth_cold_sk, + eth_hot_key, rewards_key: rewards_sk, tendermint_node_key: tendermint_node_sk, } diff --git a/apps/src/lib/wallet/store.rs b/apps/src/lib/wallet/store.rs index 76a2053419..88af4c9111 100644 --- a/apps/src/lib/wallet/store.rs +++ b/apps/src/lib/wallet/store.rs @@ -27,6 +27,8 @@ use crate::config::genesis::genesis_config::GenesisConfig; pub struct ValidatorKeys { /// Special keypair for signing protocol txs pub protocol_keypair: common::SecretKey, + /// Special hot keypair for signing Ethereum bridge txs + pub eth_bridge_keypair: common::SecretKey, /// Special session keypair needed by validators for participating /// in the DKG protocol pub dkg_keypair: Option, @@ -287,16 +289,28 @@ impl Store { /// /// Note that this removes the validator data. pub fn gen_validator_keys( + eth_bridge_keypair: Option, protocol_keypair: Option, - scheme: SchemeType, + protocol_keypair_scheme: SchemeType, ) -> ValidatorKeys { + let eth_bridge_keypair = eth_bridge_keypair + .map(|k| { + if !matches!(&k, common::SecretKey::Secp256k1(_)) { + panic!( + "Ethereum bridge keys can only be of kind Secp256k1" + ); + } + k + }) + .unwrap_or_else(|| gen_sk(SchemeType::Secp256k1)); let protocol_keypair = - protocol_keypair.unwrap_or_else(|| gen_sk(scheme)); + protocol_keypair.unwrap_or_else(|| gen_sk(protocol_keypair_scheme)); let dkg_keypair = ferveo_common::Keypair::::new( &mut StdRng::from_entropy(), ); ValidatorKeys { protocol_keypair, + eth_bridge_keypair, dkg_keypair: Some(dkg_keypair.into()), } } @@ -527,7 +541,7 @@ mod test_wallet { fn test_toml_roundtrip_ed25519() { let mut store = Store::new(); let validator_keys = - Store::gen_validator_keys(None, SchemeType::Ed25519); + Store::gen_validator_keys(None, None, SchemeType::Ed25519); store.add_validator_data( Address::decode("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), validator_keys @@ -540,7 +554,7 @@ mod test_wallet { fn test_toml_roundtrip_secp256k1() { let mut store = Store::new(); let validator_keys = - Store::gen_validator_keys(None, SchemeType::Secp256k1); + Store::gen_validator_keys(None, None, SchemeType::Secp256k1); store.add_validator_data( Address::decode("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").unwrap(), validator_keys diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 29137b49bd..f13bec3ee0 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -11,7 +11,17 @@ use crate::PosParams; /// Data that may have values set for future epochs, up to an epoch at offset as /// set via the `Offset` type parameter. -#[derive(Debug, Clone, BorshDeserialize, BorshSerialize, BorshSchema)] +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] pub struct Epoched where Data: Clone + BorshDeserialize + BorshSerialize + BorshSchema, @@ -27,7 +37,17 @@ where /// Data that may have delta values (a difference from the predecessor epoch) /// set for future epochs, up to an epoch at offset as set via the `Offset` type /// parameter. -#[derive(Debug, Clone, BorshDeserialize, BorshSerialize, BorshSchema)] +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] pub struct EpochedDelta where Data: Clone @@ -56,7 +76,17 @@ pub trait EpochOffset: } /// Offset at pipeline length. -#[derive(Debug, Clone, BorshDeserialize, BorshSerialize, BorshSchema)] +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] pub struct OffsetPipelineLen; impl EpochOffset for OffsetPipelineLen { fn value(params: &PosParams) -> u64 { @@ -69,9 +99,19 @@ impl EpochOffset for OffsetPipelineLen { } /// Offset at unbonding length. -#[derive(Debug, Clone, BorshDeserialize, BorshSerialize, BorshSchema)] -pub struct OffsetUnboundingLen; -impl EpochOffset for OffsetUnboundingLen { +#[derive( + Debug, + Clone, + BorshDeserialize, + BorshSerialize, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +pub struct OffsetUnbondingLen; +impl EpochOffset for OffsetUnbondingLen { fn value(params: &PosParams) -> u64 { params.unbonding_len } @@ -82,7 +122,7 @@ impl EpochOffset for OffsetUnboundingLen { } /// Offset length dynamic choice. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum DynEpochOffset { /// Offset at pipeline length. PipelineLen, @@ -522,21 +562,20 @@ where ); } - /// Update the delta values in reverse order (starting from the future-most - /// epoch) while the update function returns `true`. - pub fn rev_update_while( - &mut self, - mut update_value: impl FnMut(&mut Data, Epoch) -> bool, + /// Apply the given `f` function on each delta value in reverse order + /// (starting from the future-most epoch) while the given function returns + /// `true`. + pub fn rev_while( + &self, + mut f: impl FnMut(&Data, Epoch) -> bool, current_epoch: impl Into, params: &PosParams, ) { let epoch = current_epoch.into(); - self.update_data(epoch, params); - let offset = Offset::value(params) as usize; for ix in (0..offset + 1).rev() { - if let Some(Some(current)) = self.data.get_mut(ix) { - let keep_going = update_value(current, epoch + ix); + if let Some(Some(current)) = self.data.get(ix) { + let keep_going = f(current, epoch + ix); if !keep_going { break; } @@ -569,16 +608,16 @@ mod tests { sequential 1..20 => EpochedAbstractStateMachine); #[test] - fn epoched_state_machine_with_unbounding_offset( - sequential 1..20 => EpochedAbstractStateMachine); + fn epoched_state_machine_with_unbonding_offset( + sequential 1..20 => EpochedAbstractStateMachine); #[test] fn epoched_delta_state_machine_with_pipeline_offset( sequential 1..20 => EpochedDeltaAbstractStateMachine); #[test] - fn epoched_delta_state_machine_with_unbounding_offset( - sequential 1..20 => EpochedDeltaAbstractStateMachine); + fn epoched_delta_state_machine_with_unbonding_offset( + sequential 1..20 => EpochedDeltaAbstractStateMachine); } /// Abstract representation of [`Epoched`]. diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index a137eb8a91..f6513c1cdd 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -33,10 +33,11 @@ use epoched::{ use parameters::PosParams; use thiserror::Error; use types::{ - ActiveValidator, Bonds, Epoch, GenesisValidator, Slash, SlashType, Slashes, - TotalVotingPowers, Unbond, Unbonds, ValidatorConsensusKeys, ValidatorSet, - ValidatorSetUpdate, ValidatorSets, ValidatorState, ValidatorStates, - ValidatorTotalDeltas, ValidatorVotingPowers, VotingPower, VotingPowerDelta, + ActiveValidator, Bonds, Epoch, EthAddress, GenesisValidator, Slash, + SlashType, Slashes, TotalVotingPowers, TryRefTo, Unbond, Unbonds, + ValidatorConsensusKeys, ValidatorEthKey, ValidatorSet, ValidatorSetUpdate, + ValidatorSets, ValidatorState, ValidatorStates, ValidatorTotalDeltas, + ValidatorVotingPowers, VotingPower, VotingPowerDelta, }; use crate::btree_set::BTreeSetShims; @@ -64,7 +65,7 @@ pub trait PosReadOnly { + Copy + Add + AddAssign - + Sub + + Sub + PartialOrd + Into + From @@ -146,6 +147,18 @@ pub trait PosReadOnly { fn read_validator_set(&self) -> ValidatorSets; /// Read PoS total voting power of all validators (active and inactive). fn read_total_voting_power(&self) -> TotalVotingPowers; + + /// Read PoS validator's Eth bridge governance key + fn read_validator_eth_cold_key( + &self, + key: &Self::Address, + ) -> Option>; + + /// Read PoS validator's Eth validator set update signing key + fn read_validator_eth_hot_key( + &self, + key: &Self::Address, + ) -> Option>; } /// PoS system trait to be implemented in integration that can read and write @@ -204,6 +217,19 @@ pub trait PosActions: PosReadOnly { fn write_validator_set(&mut self, value: ValidatorSets); /// Write PoS total voting power of all validators (active and inactive). fn write_total_voting_power(&mut self, value: TotalVotingPowers); + /// Write PoS validator's Eth bridge governance key + fn write_validator_eth_cold_key( + &mut self, + address: &Self::Address, + value: ValidatorEthKey, + ); + + /// Write PoS validator's Eth validator set update signing key + fn write_validator_eth_hot_key( + &self, + address: &Self::Address, + value: ValidatorEthKey, + ); /// Delete an emptied PoS bond (validator self-bond or a delegation). fn delete_bond(&mut self, key: &BondId); @@ -226,8 +252,13 @@ pub trait PosActions: PosReadOnly { address: &Self::Address, staking_reward_address: &Self::Address, consensus_key: &Self::PublicKey, + eth_cold_key: &Self::PublicKey, + eth_hot_key: &Self::PublicKey, current_epoch: impl Into, - ) -> Result<(), BecomeValidatorError> { + ) -> Result<(), BecomeValidatorError> + where + Self::PublicKey: TryRefTo, + { let current_epoch = current_epoch.into(); let params = self.read_pos_params(); let mut validator_set = self.read_validator_set(); @@ -245,6 +276,8 @@ pub trait PosActions: PosReadOnly { } let BecomeValidatorData { consensus_key, + eth_cold_key, + eth_hot_key, state, total_deltas, voting_power, @@ -252,14 +285,18 @@ pub trait PosActions: PosReadOnly { ¶ms, address, consensus_key, + eth_cold_key, + eth_hot_key, &mut validator_set, current_epoch, - ); + )?; self.write_validator_staking_reward_address( address, staking_reward_address.clone(), ); self.write_validator_consensus_key(address, consensus_key); + self.write_validator_eth_cold_key(address, eth_cold_key); + self.write_validator_eth_hot_key(address, eth_hot_key); self.write_validator_state(address, state); self.write_validator_set(validator_set); self.write_validator_address_raw_hash(address); @@ -571,6 +608,17 @@ pub trait PosBase { fn read_validator_set(&self) -> ValidatorSets; /// Read PoS total voting power of all validators (active and inactive). fn read_total_voting_power(&self) -> TotalVotingPowers; + /// Read PoS validator's Eth bridge governance key + fn read_validator_eth_cold_key( + &self, + key: &Self::Address, + ) -> Option>; + + /// Read PoS validator's Eth validator set update signing key + fn read_validator_eth_hot_key( + &self, + key: &Self::Address, + ) -> Option>; /// Write PoS parameters. fn write_pos_params(&mut self, params: &PosParams); @@ -624,6 +672,18 @@ pub trait PosBase { fn write_validator_set(&mut self, value: &ValidatorSets); /// Read PoS total voting power of all validators (active and inactive). fn write_total_voting_power(&mut self, value: &TotalVotingPowers); + /// Write PoS validator's Eth bridge governance key + fn write_validator_eth_cold_key( + &mut self, + address: &Self::Address, + value: &ValidatorEthKey, + ); + /// Write PoS validator's Eth validator set update signing key + fn write_validator_eth_hot_key( + &mut self, + address: &Self::Address, + value: &ValidatorEthKey, + ); /// Initialize staking reward account with the given public key. fn init_staking_reward_account( &mut self, @@ -663,7 +723,10 @@ pub trait PosBase { > + Clone + 'a, current_epoch: impl Into, - ) -> Result<(), GenesisError> { + ) -> Result<(), GenesisError> + where + Self::PublicKey: TryRefTo, + { let current_epoch = current_epoch.into(); self.write_pos_params(params); @@ -684,6 +747,8 @@ pub trait PosBase { total_deltas, voting_power, bond: (bond_id, bond), + eth_cold_key, + eth_hot_key, } = res?; self.write_validator_address_raw_hash(address); self.write_validator_staking_reward_address( @@ -691,6 +756,8 @@ pub trait PosBase { &staking_reward_address, ); self.write_validator_consensus_key(address, &consensus_key); + self.write_validator_eth_cold_key(address, ð_cold_key); + self.write_validator_eth_hot_key(address, ð_hot_key); self.write_validator_state(address, &state); self.write_validator_total_deltas(address, &total_deltas); self.write_validator_voting_power(address, &voting_power); @@ -733,12 +800,12 @@ pub trait PosBase { let prev_validators = previous_epoch.and_then(|epoch| validators.get(epoch)); - // If the validator never been active before and it doesn't have more - // than 0 voting power, we should not tell Tendermint to update it until - // it does. Tendermint uses 0 voting power as a way to signal - // that a validator has been removed from the validator set, but - // fails if we attempt to give it a new validator with 0 voting - // power. + // If the validator has never been active before and it doesn't have + // more than 0 voting power, we should not tell Tendermint to + // update it until it does. Tendermint uses 0 voting power as a + // way to signal that a validator has been removed from the + // validator set, but fails if we attempt to give it a new + // validator with 0 voting power. // For active validators, this would only ever happen until all the // validator slots are filled with non-0 voting power validators, but we // still need to guard against it. @@ -893,6 +960,8 @@ pub trait PosBase { pub enum GenesisError { #[error("Voting power overflow: {0}")] VotingPowerOverflow(TryFromIntError), + #[error("Ethereum address can only be of secp kind")] + SecpKeyConversion, } #[allow(missing_docs)] @@ -905,6 +974,8 @@ pub enum BecomeValidatorError { address {0}" )] StakingRewardAddressEqValidatorAddress(Address), + #[error("Ethereum address can only be of secp kind")] + SecpKeyConversion, } #[allow(missing_docs)] @@ -921,7 +992,7 @@ pub enum BondError { InactiveValidator(Address), #[error("Voting power overflow: {0}")] VotingPowerOverflow(TryFromIntError), - #[error("Given zero amount to unbond")] + #[error("Given zero amount to bond")] ZeroAmount, } @@ -1056,6 +1127,8 @@ where total_deltas: ValidatorTotalDeltas, voting_power: ValidatorVotingPowers, bond: (BondId
, Bonds), + eth_cold_key: ValidatorEthKey, + eth_hot_key: ValidatorEthKey, } /// A function that returns genesis data created from the initial validator set. @@ -1109,7 +1182,13 @@ where + BorshDeserialize + BorshSerialize + BorshSchema, - PK: 'a + Debug + Clone + BorshDeserialize + BorshSerialize + BorshSchema, + PK: 'a + + Debug + + Clone + + BorshDeserialize + + BorshSerialize + + BorshSchema + + TryRefTo, { // Accumulate the validator set and total voting power let mut active: BTreeSet> = BTreeSet::default(); @@ -1155,9 +1234,15 @@ where tokens, consensus_key, staking_reward_key, + eth_cold_key, + eth_hot_key, }| { let consensus_key = Epoched::init_at_genesis(consensus_key.clone(), current_epoch); + let eth_cold_key = + Epoched::init_at_genesis(eth_cold_key.clone(), current_epoch); + let eth_hot_key = + Epoched::init_at_genesis(eth_hot_key.clone(), current_epoch); let state = Epoched::init_at_genesis( ValidatorState::Candidate, current_epoch, @@ -1174,10 +1259,15 @@ where source: address.clone(), validator: address.clone(), }; - let mut deltas = HashMap::default(); - deltas.insert(current_epoch, *tokens); - let bond = - EpochedDelta::init_at_genesis(Bond { deltas }, current_epoch); + let mut pos_deltas = HashMap::default(); + pos_deltas.insert(current_epoch, *tokens); + let bond = EpochedDelta::init_at_genesis( + Bond { + pos_deltas, + neg_deltas: Default::default(), + }, + current_epoch, + ); Ok(GenesisValidatorData { address: address.clone(), staking_reward_address: staking_reward_address.clone(), @@ -1187,6 +1277,8 @@ where total_deltas, voting_power, bond: (bond_id, bond), + eth_cold_key, + eth_hot_key, }) }, ); @@ -1297,28 +1389,38 @@ where + BorshSchema, { consensus_key: ValidatorConsensusKeys, + eth_cold_key: ValidatorEthKey, + eth_hot_key: ValidatorEthKey, state: ValidatorStates, total_deltas: ValidatorTotalDeltas, voting_power: ValidatorVotingPowers, } /// A function that initialized data for a new validator. -fn become_validator( +fn become_validator<'a, Address, PK, TokenChange>( params: &PosParams, address: &Address, consensus_key: &PK, + eth_cold_key: &'a PK, + eth_hot_key: &'a PK, validator_set: &mut ValidatorSets
, current_epoch: Epoch, -) -> BecomeValidatorData +) -> Result, BecomeValidatorError
> where - Address: Debug + Address: Display + + Debug + Clone + Ord + Hash + BorshDeserialize + BorshSerialize + BorshSchema, - PK: Debug + Clone + BorshDeserialize + BorshSerialize + BorshSchema, + PK: Debug + + Clone + + BorshDeserialize + + BorshSerialize + + BorshSchema + + TryRefTo, TokenChange: Default + Debug + Clone @@ -1330,6 +1432,9 @@ where { let consensus_key = Epoched::init(consensus_key.clone(), current_epoch, params); + let eth_cold_key = + Epoched::init(eth_cold_key.clone(), current_epoch, params); + let eth_hot_key = Epoched::init(eth_hot_key.clone(), current_epoch, params); let mut state = Epoched::init_at_genesis(ValidatorState::Pending, current_epoch); @@ -1366,12 +1471,14 @@ where params, ); - BecomeValidatorData { + Ok(BecomeValidatorData { consensus_key, state, total_deltas, voting_power, - } + eth_cold_key, + eth_hot_key, + }) } struct BondData @@ -1479,15 +1586,21 @@ where // Update or create the bond let mut value = Bond { - deltas: HashMap::default(), + pos_deltas: HashMap::default(), + neg_deltas: TokenAmount::default(), }; value - .deltas + .pos_deltas .insert(current_epoch + update_offset.value(params), amount); let bond = match current_bond { - None => EpochedDelta::init(value, current_epoch, params), + None => EpochedDelta::init_at_offset( + value, + current_epoch, + update_offset, + params, + ), Some(mut bond) => { - bond.add(value, current_epoch, params); + bond.add_at_offset(value, current_epoch, update_offset, params); bond } }; @@ -1605,6 +1718,7 @@ where + AddAssign + Into + From + + Sub + SubAssign + BorshDeserialize + BorshSerialize @@ -1615,7 +1729,7 @@ where + Clone + Copy + Add - + Sub + + Sub + From + Neg + Into @@ -1650,27 +1764,25 @@ where let mut slashed_amount = TokenAmount::default(); // Decrement the bond deltas starting from the rightmost value (a bond in a // future-most epoch) until whole amount is decremented - bond.rev_update_while( + bond.rev_while( |bonds, _epoch| { - bonds.deltas.retain(|epoch_start, bond_delta| { + for (epoch_start, bond_delta) in bonds.pos_deltas.iter() { if *to_unbond == 0.into() { return true; } let mut unbonded = HashMap::default(); let unbond_end = current_epoch + update_offset.value(params) - 1; - // We need to accumulate the slashed delta for multiple slashes - // applicable to a bond, where each slash should be - // calculated from the delta reduced by the previous slash. - let applied_delta = if to_unbond > bond_delta { + // We need to accumulate the slashed delta for multiple + // slashes applicable to a bond, where + // each slash should be calculated from + // the delta reduced by the previous slash. + let applied_delta = if *to_unbond > *bond_delta { unbonded.insert((*epoch_start, unbond_end), *bond_delta); *to_unbond -= *bond_delta; - let applied_delta = *bond_delta; - *bond_delta = 0.into(); - applied_delta + *bond_delta } else { unbonded.insert((*epoch_start, unbond_end), *to_unbond); - *bond_delta -= *to_unbond; let applied_delta = *to_unbond; *to_unbond = 0.into(); applied_delta @@ -1690,9 +1802,7 @@ where // For each decremented bond value write a new unbond unbond.add(Unbond { deltas: unbonded }, current_epoch, params); - // Remove bonds with no tokens left - *bond_delta != 0.into() - }); + } // Stop the update once all the tokens are unbonded *to_unbond != 0.into() }, @@ -1700,6 +1810,16 @@ where params, ); + bond.add_at_offset( + Bond { + pos_deltas: Default::default(), + neg_deltas: amount, + }, + current_epoch, + update_offset, + params, + ); + // Update validator set. This has to be done before we update the // `validator_total_deltas`, because we need to look-up the validator with // its voting power before the change. diff --git a/proof_of_stake/src/parameters.rs b/proof_of_stake/src/parameters.rs index 84bd59d4a5..7ee0abdf98 100644 --- a/proof_of_stake/src/parameters.rs +++ b/proof_of_stake/src/parameters.rs @@ -78,7 +78,8 @@ const MAX_TOTAL_VOTING_POWER: i64 = i64::MAX / 8; const TOKEN_MAX_AMOUNT: u64 = u64::MAX / 1_000_000; impl PosParams { - /// Validate PoS parameters values. Returns empty list the values are valid. + /// Validate PoS parameters values. Returns an empty list if the values are + /// valid. #[must_use] pub fn validate(&self) -> Vec { let mut errors = vec![]; diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index 45342ef277..bd655530ed 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -11,7 +11,7 @@ use std::ops::{Add, AddAssign, Mul, Sub, SubAssign}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use crate::epoched::{ - Epoched, EpochedDelta, OffsetPipelineLen, OffsetUnboundingLen, + Epoched, EpochedDelta, OffsetPipelineLen, OffsetUnbondingLen, }; use crate::parameters::PosParams; @@ -22,22 +22,46 @@ pub type ValidatorConsensusKeys = pub type ValidatorStates = Epoched; /// Epoched validator's total deltas. pub type ValidatorTotalDeltas = - EpochedDelta; + EpochedDelta; /// Epoched validator's voting power. pub type ValidatorVotingPowers = - EpochedDelta; + EpochedDelta; /// Epoched bond. pub type Bonds = - EpochedDelta, OffsetPipelineLen>; + EpochedDelta, OffsetUnbondingLen>; /// Epoched unbond. pub type Unbonds = - EpochedDelta, OffsetUnboundingLen>; + EpochedDelta, OffsetUnbondingLen>; /// Epoched validator set. pub type ValidatorSets
= - Epoched, OffsetUnboundingLen>; + Epoched, OffsetUnbondingLen>; /// Epoched total voting power. -pub type TotalVotingPowers = - EpochedDelta; +pub type TotalVotingPowers = EpochedDelta; +/// Epoched validator's eth key. +pub type ValidatorEthKey = Epoched; + +/// Eth address derived from secp256k1 key +#[derive( + Debug, + Eq, + PartialEq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshSchema, +)] +pub struct EthAddress(pub [u8; 20]); + +/// A ref-to-value conversion that may fail + +pub trait TryRefTo { + /// The error + type Error; + /// Try to perform the conversion. + fn try_ref_to(&self) -> Result; +} /// Epoch identifier. Epochs are identified by consecutive natural numbers. /// @@ -118,6 +142,11 @@ pub struct GenesisValidator { pub consensus_key: PK, /// An public key associated with the staking reward address pub staking_reward_key: PK, + /// An Eth bridge governance public key + pub eth_cold_key: PK, + /// An Eth bridge hot signing public key used for validator set updates and + /// cross-chain transactions + pub eth_hot_key: PK, } /// An update of the active and inactive validator set. @@ -282,20 +311,24 @@ pub enum ValidatorState { // TODO consider adding `Jailed` } -/// A bond is validator's self-bond or a delegation from a regular account to a -/// validator. +/// A bond is either a validator's self-bond or a delegation from a regular +/// account to a validator. #[derive( Debug, Clone, Default, BorshDeserialize, BorshSerialize, BorshSchema, )] pub struct Bond { - /// A key is a the epoch set for the bond. This is used in unbonding, where - /// it's needed for slash epoch range check. + /// Bonded positive deltas. A key is the epoch set for the bond. This is + /// used in unbonding, where it's needed for slash epoch range check. /// /// TODO: For Bonds, there's unnecessary redundancy with this hash map. /// We only need to keep the start `Epoch` for the Epoched head element /// (i.e. the current epoch data), the rest of the array can be calculated /// from the offset from the head - pub deltas: HashMap, + pub pos_deltas: HashMap, + /// Unbonded negative deltas. The values are recorded as positive, but + /// should be subtracted when we're finding the total for some given + /// epoch. + pub neg_deltas: Token, } /// An unbond contains unbonded tokens from a validator's self-bond or a @@ -546,13 +579,15 @@ where impl Bond where - Token: Clone + Copy + Add + Default, + Token: Clone + Copy + Add + Sub + Default, { /// Find the sum of all the bonds amounts. pub fn sum(&self) -> Token { - self.deltas + let pos_deltas_sum: Token = self + .pos_deltas .iter() - .fold(Default::default(), |acc, (_epoch, amount)| acc + *amount) + .fold(Default::default(), |acc, (_epoch, amount)| acc + *amount); + pos_deltas_sum - self.neg_deltas } } @@ -563,24 +598,26 @@ where type Output = Self; fn add(mut self, rhs: Self) -> Self::Output { - // This is almost the same as `self.delta.extend(rhs.delta);`, except - // that we add values where a key is present on both sides. - let iter = rhs.deltas.into_iter(); - let reserve = if self.deltas.is_empty() { + // This is almost the same as `self.pos_deltas.extend(rhs.pos_deltas);`, + // except that we add values where a key is present on both + // sides. + let iter = rhs.pos_deltas.into_iter(); + let reserve = if self.pos_deltas.is_empty() { iter.size_hint().0 } else { (iter.size_hint().0 + 1) / 2 }; - self.deltas.reserve(reserve); + self.pos_deltas.reserve(reserve); iter.for_each(|(k, v)| { // Add or insert - match self.deltas.get_mut(&k) { + match self.pos_deltas.get_mut(&k) { Some(value) => *value += v, None => { - self.deltas.insert(k, v); + self.pos_deltas.insert(k, v); } } }); + self.neg_deltas += rhs.neg_deltas; self } } diff --git a/proof_of_stake/src/validation.rs b/proof_of_stake/src/validation.rs index 41485bcbb0..38fc3cc20a 100644 --- a/proof_of_stake/src/validation.rs +++ b/proof_of_stake/src/validation.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use std::convert::TryFrom; use std::fmt::{Debug, Display}; use std::hash::Hash; +use std::marker::PhantomData; use std::ops::{Add, AddAssign, Neg, Sub, SubAssign}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -14,7 +15,7 @@ use crate::btree_set::BTreeSetShims; use crate::epoched::DynEpochOffset; use crate::parameters::PosParams; use crate::types::{ - BondId, Bonds, Epoch, Slashes, TotalVotingPowers, Unbonds, + BondId, Bonds, Epoch, Slash, Slashes, TotalVotingPowers, Unbonds, ValidatorConsensusKeys, ValidatorSets, ValidatorState, ValidatorStates, ValidatorTotalDeltas, ValidatorVotingPowers, VotingPower, VotingPowerDelta, WeightedValidator, @@ -96,6 +97,17 @@ where got: u64, expected: u64, }, + + #[error( + "Bond ID {id} must be subtracted at the correct epoch. Got epoch \ + {got}, expected {expected}" + )] + InvalidNegDeltaEpoch { + id: BondId
, + got: u64, + expected: u64, + }, + #[error( "Invalid validator {address} sum of total deltas. Total Δ \ {total_delta}, bonds Δ {bond_delta}" @@ -302,6 +314,16 @@ pub struct NewValidator { voting_power: VotingPower, } +/// Validation constants +#[derive(Clone, Debug)] +struct Constants { + current_epoch: Epoch, + pipeline_epoch: Epoch, + unbonding_epoch: Epoch, + pipeline_offset: u64, + unbonding_offset: u64, +} + /// Validate the given list of PoS data `changes`. Returns empty list, if all /// the changes are valid. #[must_use] @@ -364,892 +386,36 @@ where + BorshSchema + PartialEq, { - let current_epoch = current_epoch.into(); - use DataUpdate::*; - use ValidatorUpdate::*; - + let current_epoch: Epoch = current_epoch.into(); let pipeline_offset = DynEpochOffset::PipelineLen.value(params); let unbonding_offset = DynEpochOffset::UnbondingLen.value(params); let pipeline_epoch = current_epoch + pipeline_offset; let unbonding_epoch = current_epoch + unbonding_offset; + let constants = Constants { + current_epoch, + pipeline_epoch, + unbonding_epoch, + pipeline_offset, + unbonding_offset, + }; let mut errors = vec![]; - let mut balance_delta = TokenChange::default(); - // Changes of validators' bonds - let mut bond_delta: HashMap = HashMap::default(); - // Changes of validators' unbonds - let mut unbond_delta: HashMap = HashMap::default(); - - // Changes of all validator total deltas (up to `unbonding_epoch`) - let mut total_deltas: HashMap = HashMap::default(); - // Accumulative stake calculated from validator total deltas for each epoch - // in which it has changed (the tuple of values are in pre and post state) - let mut total_stake_by_epoch: HashMap< - Epoch, - HashMap, - > = HashMap::default(); - // Total voting power delta calculated from validators' total deltas - let mut expected_total_voting_power_delta_by_epoch: HashMap< - Epoch, - VotingPowerDelta, - > = HashMap::default(); - // Changes of validators' voting power data - let mut voting_power_by_epoch: HashMap< - Epoch, - HashMap, - > = HashMap::default(); - - let mut validator_set_pre: Option> = None; - let mut validator_set_post: Option> = None; - - let mut total_voting_power_delta_by_epoch: HashMap< - Epoch, - VotingPowerDelta, - > = HashMap::default(); - - let mut new_validators: HashMap = HashMap::default(); - - for change in changes { - match change { - Validator { address, update } => match update { - State(data) => match (data.pre, data.post) { - (None, Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - // Before pipeline epoch, the state must be `Pending` - for epoch in - Epoch::iter_range(current_epoch, pipeline_offset) - { - match post.get(epoch) { - Some(ValidatorState::Pending) => {} - _ => errors.push( - Error::InvalidNewValidatorState( - epoch.into(), - ), - ), - } - } - // At pipeline epoch, the state must be `Candidate` - match post.get(pipeline_epoch) { - Some(ValidatorState::Candidate) => {} - _ => errors.push(Error::InvalidNewValidatorState( - pipeline_epoch.into(), - )), - } - let validator = - new_validators.entry(address.clone()).or_default(); - validator.has_state = true; - } - (Some(pre), Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - use ValidatorState::*; - // Before pipeline epoch, the only allowed state change - // is from `Inactive` to `Pending` - for epoch in - Epoch::iter_range(current_epoch, pipeline_offset) - { - match (pre.get(epoch), post.get(epoch)) { - (Some(Inactive), Some(Pending)) => {} - (Some(state_pre), Some(state_post)) - if state_pre == state_post => {} - _ => errors.push( - Error::InvalidValidatorStateUpdate( - epoch.into(), - ), - ), - } - } - // Check allowed state changes at pipeline epoch - match ( - pre.get(pipeline_epoch), - post.get(pipeline_epoch), - ) { - ( - Some(Pending), - Some(Candidate) | Some(Inactive), - ) - | (Some(Candidate), Some(Inactive)) - | ( - Some(Inactive), - Some(Candidate) | Some(Pending), - ) => {} - _ => errors.push(Error::InvalidNewValidatorState( - pipeline_epoch.into(), - )), - } - } - (Some(_), None) => errors - .push(Error::ValidatorStateIsRequired(address.clone())), - (None, None) => continue, - }, - ConsensusKey(data) => match (data.pre, data.post) { - (None, Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - // The value must be known at pipeline epoch - match post.get(pipeline_epoch) { - Some(_) => {} - _ => errors.push( - Error::MissingNewValidatorConsensusKey( - pipeline_epoch.into(), - ), - ), - } - let validator = - new_validators.entry(address.clone()).or_default(); - validator.has_consensus_key = true; - } - (Some(pre), Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - // Before pipeline epoch, the key must not change - for epoch in - Epoch::iter_range(current_epoch, pipeline_offset) - { - match (pre.get(epoch), post.get(epoch)) { - (Some(key_pre), Some(key_post)) - if key_pre == key_post => - { - continue; - } - _ => errors.push( - Error::InvalidValidatorConsensusKeyUpdate( - epoch.into(), - ), - ), - } - } - } - (Some(_), None) => errors - .push(Error::ValidatorStateIsRequired(address.clone())), - (None, None) => continue, - }, - StakingRewardAddress(data) => match (data.pre, data.post) { - (Some(_), Some(post)) => { - if post == address { - errors.push( - Error::StakingRewardAddressEqValidator( - address.clone(), - ), - ); - } - } - (None, Some(post)) => { - if post == address { - errors.push( - Error::StakingRewardAddressEqValidator( - address.clone(), - ), - ); - } - let validator = - new_validators.entry(address.clone()).or_default(); - validator.has_staking_reward_address = true; - } - _ => errors.push(Error::StakingRewardAddressIsRequired( - address.clone(), - )), - }, - TotalDeltas(data) => match (data.pre, data.post) { - (Some(pre), Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - // Changes of all total deltas (up to `unbonding_epoch`) - let mut deltas = TokenChange::default(); - // Sum of pre total deltas - let mut pre_deltas_sum = TokenChange::default(); - // Sum of post total deltas - let mut post_deltas_sum = TokenChange::default(); - // Iter from the first epoch to the last epoch of `post` - for epoch in Epoch::iter_range( - current_epoch, - unbonding_offset + 1, - ) { - // Changes of all total deltas (up to - // `unbonding_epoch`) - let mut delta = TokenChange::default(); - // Find the delta in `pre` - if let Some(change) = { - if epoch == current_epoch { - // On the first epoch, we have to get the - // sum of all deltas at and before that - // epoch as the `pre` could have been set in - // an older epoch - pre.get(epoch) - } else { - pre.get_delta_at_epoch(epoch).copied() - } - } { - delta -= change; - pre_deltas_sum += change; - } - // Find the delta in `post` - if let Some(change) = post.get_delta_at_epoch(epoch) - { - delta += *change; - post_deltas_sum += *change; - let stake_pre: i128 = - Into::into(pre_deltas_sum); - let stake_post: i128 = - Into::into(post_deltas_sum); - match ( - u64::try_from(stake_pre), - u64::try_from(stake_post), - ) { - (Ok(stake_pre), Ok(stake_post)) => { - let stake_pre = - TokenAmount::from(stake_pre); - let stake_post = - TokenAmount::from(stake_post); - total_stake_by_epoch - .entry(epoch) - .or_insert_with(HashMap::default) - .insert( - address.clone(), - (stake_pre, stake_post), - ); - } - _ => errors.push( - Error::InvalidValidatorTotalDeltas( - address.clone(), - stake_post, - ), - ), - } - } - deltas += delta; - // A total delta can only be increased at - // `pipeline_offset` from bonds and decreased at - // `unbonding_offset` from unbonding - if delta > TokenChange::default() - && epoch != pipeline_epoch - { - errors.push(Error::EpochedDataWrongEpoch { - got: epoch.into(), - expected: vec![pipeline_epoch.into()], - }) - } - if delta < TokenChange::default() - && epoch != unbonding_epoch - { - errors.push(Error::EpochedDataWrongEpoch { - got: epoch.into(), - expected: vec![unbonding_epoch.into()], - }) - } - } - if post_deltas_sum < TokenChange::default() { - errors.push(Error::NegativeValidatorTotalDeltasSum( - address.clone(), - )) - } - if deltas != TokenChange::default() { - let deltas_entry = total_deltas - .entry(address.clone()) - .or_default(); - *deltas_entry += deltas; - } - } - (None, Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - // Changes of all total deltas (up to `unbonding_epoch`) - let mut deltas = TokenChange::default(); - for epoch in Epoch::iter_range( - current_epoch, - unbonding_offset + 1, - ) { - if let Some(change) = post.get_delta_at_epoch(epoch) - { - // A new total delta can only be initialized - // at `pipeline_offset` (from bonds) and updated - // at `unbonding_offset` (from unbonding) - if epoch != pipeline_epoch - && epoch != unbonding_epoch - { - errors.push(Error::EpochedDataWrongEpoch { - got: epoch.into(), - expected: vec![pipeline_epoch.into()], - }) - } - deltas += *change; - let stake: i128 = Into::into(deltas); - match u64::try_from(stake) { - Ok(stake) => { - let stake = TokenAmount::from(stake); - total_stake_by_epoch - .entry(epoch) - .or_insert_with(HashMap::default) - .insert( - address.clone(), - (0.into(), stake), - ); - } - Err(_) => errors.push( - Error::InvalidValidatorTotalDeltas( - address.clone(), - stake, - ), - ), - } - } - } - if deltas < TokenChange::default() { - errors.push(Error::NegativeValidatorTotalDeltasSum( - address.clone(), - )) - } - if deltas != TokenChange::default() { - let deltas_entry = total_deltas - .entry(address.clone()) - .or_default(); - *deltas_entry += deltas; - } - let validator = - new_validators.entry(address.clone()).or_default(); - validator.has_total_deltas = true; - } - (Some(_), None) => { - errors.push(Error::MissingValidatorTotalDeltas(address)) - } - (None, None) => continue, - }, - VotingPowerUpdate(data) => match (&data.pre, data.post) { - (Some(_), Some(post)) | (None, Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - let mut voting_power = VotingPowerDelta::default(); - // Iter from the current epoch to the last epoch of - // `post` - for epoch in Epoch::iter_range( - current_epoch, - unbonding_offset + 1, - ) { - if let Some(delta_post) = - post.get_delta_at_epoch(epoch) - { - voting_power += *delta_post; - - // If the delta is not the same as in pre-state, - // accumulate the expected total voting power - // change - let delta_pre = data - .pre - .as_ref() - .and_then(|data| { - if epoch == current_epoch { - // On the first epoch, we have to - // get the sum of all deltas at and - // before that epoch as the `pre` - // could have been set in an older - // epoch - data.get(epoch) - } else { - data.get_delta_at_epoch(epoch) - .copied() - } - }) - .unwrap_or_default(); - if delta_pre != *delta_post { - let current_delta = - expected_total_voting_power_delta_by_epoch - .entry(epoch) - .or_insert_with(Default::default); - *current_delta += *delta_post - delta_pre; - } - - let vp: i64 = Into::into(voting_power); - match u64::try_from(vp) { - Ok(vp) => { - let vp = VotingPower::from(vp); - voting_power_by_epoch - .entry(epoch) - .or_insert_with(HashMap::default) - .insert(address.clone(), vp); - } - Err(_) => errors.push( - Error::InvalidValidatorVotingPower( - address.clone(), - vp, - ), - ), - } - } - } - if data.pre.is_none() { - let validator = new_validators - .entry(address.clone()) - .or_default(); - validator.has_voting_power = true; - validator.voting_power = post - .get_at_offset( - current_epoch, - DynEpochOffset::PipelineLen, - params, - ) - .unwrap_or_default() - .try_into() - .unwrap_or_default() - } - } - (Some(_), None) => errors.push( - Error::MissingValidatorVotingPower(address.clone()), - ), - (None, None) => continue, - }, - }, - Balance(data) => match (data.pre, data.post) { - (None, Some(post)) => balance_delta += TokenChange::from(post), - (Some(pre), Some(post)) => { - balance_delta -= TokenChange::from(pre); - balance_delta += TokenChange::from(post); - } - (Some(_), None) => errors.push(Error::MissingBalance), - (None, None) => continue, - }, - Bond { id, data, slashes } => match (data.pre, data.post) { - // Bond may be updated from newly bonded tokens and unbonding - (Some(pre), Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - let pre_offset: u64 = - match current_epoch.checked_sub(pre.last_update()) { - Some(offset) => offset.into(), - None => { - // If the last_update > current_epoch, the check - // above must have failed with - // `Error::InvalidLastUpdate` - continue; - } - }; - - // Pre-bonds keyed by their `start_epoch` - let mut pre_bonds: HashMap = - HashMap::default(); - // We have to slash only the difference between post and - // pre, not both pre and post to avoid rounding errors - let mut slashed_deltas: HashMap = - HashMap::default(); - - // Iter from the first epoch of `pre` to the last epoch of - // `post` - for epoch in Epoch::iter_range( - pre.last_update(), - pre_offset + pipeline_offset + 1, - ) { - if let Some(bond) = pre.get_delta_at_epoch(epoch) { - for (start_epoch, delta) in bond.deltas.iter() { - let delta = TokenChange::from(*delta); - slashed_deltas.insert(*start_epoch, -delta); - pre_bonds.insert(*start_epoch, delta); - } - } - if let Some(bond) = post.get_delta_at_epoch(epoch) { - for (start_epoch, delta) in bond.deltas.iter() { - // An empty bond must be deleted - if *delta == TokenAmount::default() { - errors.push(Error::EmptyBond(id.clone())) - } - // On the current epoch, all bond's - // `start_epoch`s must be equal or lower than - // `current_epoch`. For all others, the - // `start_epoch` must be equal - // to the `epoch` at which it's set. - if (epoch == current_epoch - && *start_epoch > current_epoch) - || (epoch != current_epoch - && *start_epoch != epoch) - { - errors.push(Error::InvalidBondStartEpoch { - id: id.clone(), - got: (*start_epoch).into(), - expected: epoch.into(), - }) - } - let delta = TokenChange::from(*delta); - match slashed_deltas.get_mut(start_epoch) { - Some(pre_delta) => { - if *pre_delta + delta == 0_i128.into() { - slashed_deltas.remove(start_epoch); - } else { - *pre_delta += delta; - } - } - None => { - slashed_deltas - .insert(*start_epoch, delta); - } - } - - // Anywhere other than at `pipeline_offset` - // where new bonds are added, check against the - // data in `pre_bonds` to ensure that no new - // bond has been added and that the deltas are - // equal or lower to `pre_bonds` deltas. - // Note that any bonds from any epoch can be - // unbonded, even if they are not yet active. - if epoch != pipeline_epoch { - match pre_bonds.get(start_epoch) { - Some(pre_delta) => { - if &delta > pre_delta { - errors.push( - Error::InvalidNewBondEpoch { - id: id.clone(), - got: epoch.into(), - expected: pipeline_epoch - .into(), - }); - } - } - None => { - errors.push( - Error::InvalidNewBondEpoch { - id: id.clone(), - got: epoch.into(), - expected: (current_epoch - + pipeline_offset) - .into(), - }, - ); - } - } - } - } - } - } - // Check slashes - for (start_epoch, delta) in slashed_deltas.iter_mut() { - for slash in &slashes { - if slash.epoch >= *start_epoch { - let raw_delta: i128 = (*delta).into(); - let current_slashed = - TokenChange::from(slash.rate * raw_delta); - *delta -= current_slashed; - } - } - } - let total = slashed_deltas - .values() - .fold(TokenChange::default(), |acc, delta| { - acc + *delta - }); - if total != TokenChange::default() { - let bond_entry = - bond_delta.entry(id.validator).or_default(); - *bond_entry += total; - } - } - // Bond may be created from newly bonded tokens only - (None, Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - let mut total_delta = TokenChange::default(); - for epoch in - Epoch::iter_range(current_epoch, pipeline_offset + 1) - { - if let Some(bond) = post.get_delta_at_epoch(epoch) { - // A new bond must be initialized at - // `pipeline_offset` - if epoch != pipeline_epoch { - errors.push(Error::EpochedDataWrongEpoch { - got: epoch.into(), - expected: vec![pipeline_epoch.into()], - }) - } - for (start_epoch, delta) in bond.deltas.iter() { - if *start_epoch != epoch { - errors.push(Error::InvalidBondStartEpoch { - id: id.clone(), - got: (*start_epoch).into(), - expected: epoch.into(), - }) - } - let mut delta = *delta; - // Check slashes - for slash in &slashes { - if slash.epoch >= *start_epoch { - let raw_delta: u64 = delta.into(); - let current_slashed = TokenAmount::from( - slash.rate * raw_delta, - ); - delta -= current_slashed; - } - } - let delta = TokenChange::from(delta); - total_delta += delta - } - } - } - // An empty bond must be deleted - if total_delta == TokenChange::default() { - errors.push(Error::EmptyBond(id.clone())) - } - let bond_entry = - bond_delta.entry(id.validator).or_default(); - *bond_entry += total_delta; - } - // Bond may be deleted when all the tokens are unbonded - (Some(pre), None) => { - let mut total_delta = TokenChange::default(); - for index in 0..pipeline_offset + 1 { - let index = index as usize; - let epoch = pre.last_update() + index; - if let Some(bond) = pre.get_delta_at_epoch(epoch) { - for (start_epoch, delta) in &bond.deltas { - let mut delta = *delta; - // Check slashes - for slash in &slashes { - if slash.epoch >= *start_epoch { - let raw_delta: u64 = delta.into(); - let current_slashed = TokenAmount::from( - slash.rate * raw_delta, - ); - delta -= current_slashed; - } - } - let delta = TokenChange::from(delta); - total_delta -= delta - } - } - } - let bond_entry = - bond_delta.entry(id.validator).or_default(); - *bond_entry += total_delta; - } - (None, None) => continue, - }, - Unbond { id, data, slashes } => match (data.pre, data.post) { - // Unbond may be updated from newly unbonded tokens - (Some(pre), Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - let pre_offset: u64 = - match current_epoch.checked_sub(pre.last_update()) { - Some(offset) => offset.into(), - None => { - // If the last_update > current_epoch, the check - // above must have failed with - // `Error::InvalidLastUpdate` - continue; - } - }; - - // We have to slash only the difference between post and - // pre, not both pre and post to avoid rounding errors - let mut slashed_deltas: HashMap< - (Epoch, Epoch), - TokenChange, - > = HashMap::default(); - // Iter from the first epoch of `pre` to the last epoch of - // `post` - for epoch in Epoch::iter_range( - pre.last_update(), - pre_offset + unbonding_offset + 1, - ) { - if let Some(unbond) = pre.get_delta_at_epoch(epoch) { - for ((start_epoch, end_epoch), delta) in - unbond.deltas.iter() - { - let delta = TokenChange::from(*delta); - slashed_deltas - .insert((*start_epoch, *end_epoch), -delta); - } - } - if let Some(unbond) = post.get_delta_at_epoch(epoch) { - for ((start_epoch, end_epoch), delta) in - unbond.deltas.iter() - { - let delta = TokenChange::from(*delta); - let key = (*start_epoch, *end_epoch); - match slashed_deltas.get_mut(&key) { - Some(pre_delta) => { - if *pre_delta + delta == 0_i128.into() { - slashed_deltas.remove(&key); - } else { - *pre_delta += delta; - } - } - None => { - slashed_deltas.insert(key, delta); - } - } - } - } - } - // Check slashes - for ((start_epoch, end_epoch), delta) in - slashed_deltas.iter_mut() - { - for slash in &slashes { - if slash.epoch >= *start_epoch - && slash.epoch <= *end_epoch - { - let raw_delta: i128 = (*delta).into(); - let current_slashed = - TokenChange::from(slash.rate * raw_delta); - *delta -= current_slashed; - } - } - } - let total = slashed_deltas - .values() - .fold(TokenChange::default(), |acc, delta| { - acc + *delta - }); - if total != TokenChange::default() { - let unbond_entry = - unbond_delta.entry(id.validator).or_default(); - *unbond_entry += total; - } - } - // Unbond may be created from a bond - (None, Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - let mut total_delta = TokenChange::default(); - for epoch in Epoch::iter_range( - post.last_update(), - unbonding_offset + 1, - ) { - if let Some(unbond) = post.get_delta_at_epoch(epoch) { - for ((start_epoch, end_epoch), delta) in - unbond.deltas.iter() - { - let mut delta = *delta; - // Check and apply slashes, if any - for slash in &slashes { - if slash.epoch >= *start_epoch - && slash.epoch <= *end_epoch - { - let raw_delta: u64 = delta.into(); - let current_slashed = TokenAmount::from( - slash.rate * raw_delta, - ); - delta -= current_slashed; - } - } - let delta = TokenChange::from(delta); - total_delta += delta; - } - } - } - let unbond_entry = - unbond_delta.entry(id.validator).or_default(); - *unbond_entry += total_delta; - } - // Unbond may be deleted when all the tokens are withdrawn - (Some(pre), None) => { - let mut total_delta = TokenChange::default(); - for epoch in Epoch::iter_range( - pre.last_update(), - unbonding_offset + 1, - ) { - if let Some(unbond) = pre.get_delta_at_epoch(epoch) { - for ((start_epoch, end_epoch), delta) in - unbond.deltas.iter() - { - let mut delta = *delta; - // Check and apply slashes, if any - for slash in &slashes { - if slash.epoch >= *start_epoch - && slash.epoch <= *end_epoch - { - let raw_delta: u64 = delta.into(); - let current_slashed = TokenAmount::from( - slash.rate * raw_delta, - ); - delta -= current_slashed; - } - } - let delta = TokenChange::from(delta); - total_delta -= delta; - } - } - } - let unbond_entry = - unbond_delta.entry(id.validator).or_default(); - *unbond_entry += total_delta; - } - (None, None) => continue, - }, - ValidatorSet(data) => match (data.pre, data.post) { - (Some(pre), Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - validator_set_pre = Some(pre); - validator_set_post = Some(post); - } - _ => errors.push(Error::MissingValidatorSet), - }, - TotalVotingPower(data) => match (data.pre, data.post) { - (Some(pre), Some(post)) => { - if post.last_update() != current_epoch { - errors.push(Error::InvalidLastUpdate) - } - // Iter from the first epoch to the last epoch of `post` - for epoch in Epoch::iter_range( - post.last_update(), - unbonding_offset + 1, - ) { - // Find the delta in `pre` - let delta_pre = (if epoch == post.last_update() { - // On the first epoch, we have to get the - // sum of all deltas at and before that - // epoch as the `pre` could have been set in - // an older epoch - pre.get(epoch) - } else { - pre.get_delta_at_epoch(epoch).copied() - }) - .unwrap_or_default(); - // Find the delta in `post` - let delta_post = post - .get_delta_at_epoch(epoch) - .copied() - .unwrap_or_default(); - if delta_pre != delta_post { - total_voting_power_delta_by_epoch - .insert(epoch, delta_post - delta_pre); - } - } - } - _ => errors.push(Error::MissingTotalVotingPower), - }, - ValidatorAddressRawHash { raw_hash, data } => { - match (data.pre, data.post) { - (None, Some((address, expected_raw_hash))) => { - if raw_hash != expected_raw_hash { - errors.push(Error::InvalidAddressRawHash( - raw_hash, - expected_raw_hash, - )) - } - let validator = - new_validators.entry(address.clone()).or_default(); - validator.has_address_raw_hash = true; - } - (pre, post) if pre != post => { - errors.push(Error::InvalidRawHashUpdate) - } - _ => continue, - } - } - } - } + let Accumulator { + balance_delta, + bond_delta, + unbond_delta, + total_deltas, + total_stake_by_epoch, + expected_total_voting_power_delta_by_epoch, + voting_power_by_epoch, + validator_set_pre, + validator_set_post, + total_voting_power_delta_by_epoch, + new_validators, + } = Validate::::accumulate_changes( + changes, params, &constants, &mut errors + ); // Check total deltas against bonds for (validator, total_delta) in total_deltas.iter() { @@ -1695,3 +861,1292 @@ where errors } + +#[derive(Clone, Debug)] +struct Accumulator +where + Address: Display + + Debug + + Clone + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + BorshDeserialize + + BorshSerialize + + BorshSchema, + TokenAmount: Display + + Clone + + Copy + + Debug + + Default + + Eq + + Add + + Sub + + AddAssign + + SubAssign + + Into + + From + + BorshDeserialize + + BorshSerialize + + BorshSchema, + TokenChange: Display + + Debug + + Default + + Clone + + Copy + + Add + + Sub + + Neg + + SubAssign + + AddAssign + + From + + Into + + From + + PartialEq + + Eq + + PartialOrd + + Ord + + BorshDeserialize + + BorshSerialize + + BorshSchema, +{ + balance_delta: TokenChange, + /// Changes of validators' bonds + bond_delta: HashMap, + /// Changes of validators' unbonds + unbond_delta: HashMap, + + /// Changes of all validator total deltas (up to `unbonding_epoch`) + total_deltas: HashMap, + /// Stake calculated from validator total deltas for each epoch + /// in which it has changed (the tuple of values are in pre and post state) + total_stake_by_epoch: + HashMap>, + /// Total voting power delta calculated from validators' total deltas + expected_total_voting_power_delta_by_epoch: + HashMap, + /// Changes of validators' voting power data + voting_power_by_epoch: HashMap>, + validator_set_pre: Option>, + validator_set_post: Option>, + total_voting_power_delta_by_epoch: HashMap, + new_validators: HashMap, +} + +/// Accumulator of storage changes +impl Default + for Accumulator +where + Address: Display + + Debug + + Clone + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + BorshDeserialize + + BorshSerialize + + BorshSchema, + TokenAmount: Display + + Clone + + Copy + + Debug + + Default + + Eq + + Add + + Sub + + AddAssign + + SubAssign + + Into + + From + + BorshDeserialize + + BorshSerialize + + BorshSchema, + TokenChange: Display + + Debug + + Default + + Clone + + Copy + + Add + + Sub + + Neg + + SubAssign + + AddAssign + + From + + Into + + From + + PartialEq + + Eq + + PartialOrd + + Ord + + BorshDeserialize + + BorshSerialize + + BorshSchema, +{ + fn default() -> Self { + Self { + balance_delta: Default::default(), + bond_delta: Default::default(), + unbond_delta: Default::default(), + total_deltas: Default::default(), + total_stake_by_epoch: Default::default(), + expected_total_voting_power_delta_by_epoch: Default::default(), + voting_power_by_epoch: Default::default(), + validator_set_pre: Default::default(), + validator_set_post: Default::default(), + total_voting_power_delta_by_epoch: Default::default(), + new_validators: Default::default(), + } + } +} + +/// An empty local type to re-use trait bounds for the functions associated with +/// `Validate` in the `impl` below +struct Validate { + address: PhantomData
, + token_amount: PhantomData, + token_change: PhantomData, + public_key: PhantomData, +} + +impl + Validate +where + Address: Display + + Debug + + Clone + + PartialEq + + Eq + + PartialOrd + + Ord + + Hash + + BorshDeserialize + + BorshSerialize + + BorshSchema, + TokenAmount: Display + + Clone + + Copy + + Debug + + Default + + Eq + + Add + + Sub + + AddAssign + + SubAssign + + Into + + From + + BorshDeserialize + + BorshSerialize + + BorshSchema, + TokenChange: Display + + Debug + + Default + + Clone + + Copy + + Add + + Sub + + Neg + + SubAssign + + AddAssign + + From + + Into + + From + + PartialEq + + Eq + + PartialOrd + + Ord + + BorshDeserialize + + BorshSerialize + + BorshSchema, + PublicKey: Debug + + Clone + + BorshDeserialize + + BorshSerialize + + BorshSchema + + PartialEq, +{ + fn accumulate_changes( + changes: Vec>, + params: &PosParams, + constants: &Constants, + errors: &mut Vec>, + ) -> Accumulator { + use DataUpdate::*; + use ValidatorUpdate::*; + + let mut accumulator = Accumulator::default(); + let Accumulator { + balance_delta, + bond_delta, + unbond_delta, + total_deltas, + total_stake_by_epoch, + expected_total_voting_power_delta_by_epoch, + voting_power_by_epoch, + validator_set_pre, + validator_set_post, + total_voting_power_delta_by_epoch, + new_validators, + } = &mut accumulator; + + for change in changes { + match change { + Validator { address, update } => match update { + State(data) => Self::validator_state( + constants, + errors, + new_validators, + address, + data, + ), + ConsensusKey(data) => Self::validator_consensus_key( + constants, + errors, + new_validators, + address, + data, + ), + StakingRewardAddress(data) => { + Self::validator_staking_reward_address( + errors, + new_validators, + address, + data, + ) + } + + TotalDeltas(data) => Self::validator_total_deltas( + constants, + errors, + total_deltas, + total_stake_by_epoch, + new_validators, + address, + data, + ), + VotingPowerUpdate(data) => Self::validator_voting_power( + params, + constants, + errors, + voting_power_by_epoch, + expected_total_voting_power_delta_by_epoch, + new_validators, + address, + data, + ), + }, + Balance(data) => Self::balance(errors, balance_delta, data), + Bond { id, data, slashes } => { + Self::bond(constants, errors, bond_delta, id, data, slashes) + } + Unbond { id, data, slashes } => Self::unbond( + constants, + errors, + unbond_delta, + id, + data, + slashes, + ), + ValidatorSet(data) => Self::validator_set( + constants, + errors, + validator_set_pre, + validator_set_post, + data, + ), + TotalVotingPower(data) => Self::total_voting_power( + constants, + errors, + total_voting_power_delta_by_epoch, + data, + ), + ValidatorAddressRawHash { raw_hash, data } => { + Self::validator_address_raw_hash( + errors, + new_validators, + raw_hash, + data, + ) + } + } + } + + accumulator + } + + fn validator_state( + constants: &Constants, + errors: &mut Vec>, + new_validators: &mut HashMap, + address: Address, + data: Data, + ) { + match (data.pre, data.post) { + (None, Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + // Before pipeline epoch, the state must be `Pending` + for epoch in Epoch::iter_range( + constants.current_epoch, + constants.pipeline_offset, + ) { + match post.get(epoch) { + Some(ValidatorState::Pending) => {} + _ => errors.push(Error::InvalidNewValidatorState( + epoch.into(), + )), + } + } + // At pipeline epoch, the state must be `Candidate` + match post.get(constants.pipeline_epoch) { + Some(ValidatorState::Candidate) => {} + _ => errors.push(Error::InvalidNewValidatorState( + constants.pipeline_epoch.into(), + )), + } + // Add the validator to the accumulator + let validator = new_validators.entry(address).or_default(); + validator.has_state = true; + } + (Some(pre), Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + use ValidatorState::*; + // Before pipeline epoch, the only allowed state change + // is from `Inactive` to `Pending` + for epoch in Epoch::iter_range( + constants.current_epoch, + constants.pipeline_offset, + ) { + match (pre.get(epoch), post.get(epoch)) { + (Some(Inactive), Some(Pending)) => {} + (Some(state_pre), Some(state_post)) + if state_pre == state_post => {} + _ => errors.push(Error::InvalidValidatorStateUpdate( + epoch.into(), + )), + } + } + // Check allowed state changes at pipeline epoch + match ( + pre.get(constants.pipeline_epoch), + post.get(constants.pipeline_epoch), + ) { + (Some(Pending), Some(Candidate) | Some(Inactive)) + | (Some(Candidate), Some(Inactive)) + | (Some(Inactive), Some(Candidate) | Some(Pending)) => {} + _ => errors.push(Error::InvalidNewValidatorState( + constants.pipeline_epoch.into(), + )), + } + } + (Some(_), None) => { + errors.push(Error::ValidatorStateIsRequired(address)) + } + (None, None) => {} + } + } + + fn validator_consensus_key( + constants: &Constants, + errors: &mut Vec>, + new_validators: &mut HashMap, + address: Address, + data: Data>, + ) { + match (data.pre, data.post) { + (None, Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + // The value must be known at pipeline epoch + match post.get(constants.pipeline_epoch) { + Some(_) => {} + _ => errors.push(Error::MissingNewValidatorConsensusKey( + constants.pipeline_epoch.into(), + )), + } + let validator = new_validators.entry(address).or_default(); + validator.has_consensus_key = true; + } + (Some(pre), Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + // Before pipeline epoch, the key must not change + for epoch in Epoch::iter_range( + constants.current_epoch, + constants.pipeline_offset, + ) { + match (pre.get(epoch), post.get(epoch)) { + (Some(key_pre), Some(key_post)) + if key_pre == key_post => + { + continue; + } + _ => errors.push( + Error::InvalidValidatorConsensusKeyUpdate( + epoch.into(), + ), + ), + } + } + } + (Some(_), None) => { + errors.push(Error::ValidatorStateIsRequired(address)) + } + (None, None) => {} + } + } + + fn validator_staking_reward_address( + errors: &mut Vec>, + new_validators: &mut HashMap, + address: Address, + data: Data
, + ) { + match (data.pre, data.post) { + (Some(_), Some(post)) => { + if post == address { + errors + .push(Error::StakingRewardAddressEqValidator(address)); + } + } + (None, Some(post)) => { + if post == address { + errors.push(Error::StakingRewardAddressEqValidator( + address.clone(), + )); + } + let validator = new_validators.entry(address).or_default(); + validator.has_staking_reward_address = true; + } + _ => errors.push(Error::StakingRewardAddressIsRequired(address)), + } + } + + fn validator_total_deltas( + constants: &Constants, + errors: &mut Vec>, + total_deltas: &mut HashMap, + total_stake_by_epoch: &mut HashMap< + Epoch, + HashMap, + >, + new_validators: &mut HashMap, + address: Address, + data: Data>, + ) { + match (data.pre, data.post) { + (Some(pre), Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + // Changes of all total deltas (up to `unbonding_epoch`) + let mut deltas = TokenChange::default(); + // Sum of pre total deltas + let mut pre_deltas_sum = TokenChange::default(); + // Sum of post total deltas + let mut post_deltas_sum = TokenChange::default(); + // Iter from the first epoch to the last epoch of `post` + for epoch in Epoch::iter_range( + constants.current_epoch, + constants.unbonding_offset + 1, + ) { + // Changes of all total deltas (up to + // `unbonding_epoch`) + let mut delta = TokenChange::default(); + // Find the delta in `pre` + if let Some(change) = { + if epoch == constants.current_epoch { + // On the first epoch, we have to get the + // sum of all deltas at and before that + // epoch as the `pre` could have been set in + // an older epoch + pre.get(epoch) + } else { + pre.get_delta_at_epoch(epoch).copied() + } + } { + delta -= change; + pre_deltas_sum += change; + } + // Find the delta in `post` + if let Some(change) = post.get_delta_at_epoch(epoch) { + delta += *change; + post_deltas_sum += *change; + let stake_pre: i128 = Into::into(pre_deltas_sum); + let stake_post: i128 = Into::into(post_deltas_sum); + match ( + u64::try_from(stake_pre), + u64::try_from(stake_post), + ) { + (Ok(stake_pre), Ok(stake_post)) => { + let stake_pre = TokenAmount::from(stake_pre); + let stake_post = TokenAmount::from(stake_post); + total_stake_by_epoch + .entry(epoch) + .or_insert_with(HashMap::default) + .insert( + address.clone(), + (stake_pre, stake_post), + ); + } + _ => { + errors.push(Error::InvalidValidatorTotalDeltas( + address.clone(), + stake_post, + )) + } + } + } + deltas += delta; + // A total delta can only be increased at + // `pipeline_offset` from bonds and decreased at + // `unbonding_offset` from unbonding + if delta > TokenChange::default() + && epoch != constants.pipeline_epoch + { + errors.push(Error::EpochedDataWrongEpoch { + got: epoch.into(), + expected: vec![constants.pipeline_epoch.into()], + }) + } + if delta < TokenChange::default() + && epoch != constants.unbonding_epoch + { + errors.push(Error::EpochedDataWrongEpoch { + got: epoch.into(), + expected: vec![constants.unbonding_epoch.into()], + }) + } + } + if post_deltas_sum < TokenChange::default() { + errors.push(Error::NegativeValidatorTotalDeltasSum( + address.clone(), + )) + } + if deltas != TokenChange::default() { + let deltas_entry = total_deltas.entry(address).or_default(); + *deltas_entry += deltas; + } + } + (None, Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + // Changes of all total deltas (up to `unbonding_epoch`) + let mut deltas = TokenChange::default(); + for epoch in Epoch::iter_range( + constants.current_epoch, + constants.unbonding_offset + 1, + ) { + if let Some(change) = post.get_delta_at_epoch(epoch) { + // A new total delta can only be initialized + // at `pipeline_offset` (from bonds) and updated + // at `unbonding_offset` (from unbonding) + if epoch != constants.pipeline_epoch + && epoch != constants.unbonding_epoch + { + errors.push(Error::EpochedDataWrongEpoch { + got: epoch.into(), + expected: vec![constants.pipeline_epoch.into()], + }) + } + deltas += *change; + let stake: i128 = Into::into(deltas); + match u64::try_from(stake) { + Ok(stake) => { + let stake = TokenAmount::from(stake); + total_stake_by_epoch + .entry(epoch) + .or_insert_with(HashMap::default) + .insert(address.clone(), (0.into(), stake)); + } + Err(_) => { + errors.push(Error::InvalidValidatorTotalDeltas( + address.clone(), + stake, + )) + } + } + } + } + if deltas < TokenChange::default() { + errors.push(Error::NegativeValidatorTotalDeltasSum( + address.clone(), + )) + } + if deltas != TokenChange::default() { + let deltas_entry = + total_deltas.entry(address.clone()).or_default(); + *deltas_entry += deltas; + } + let validator = new_validators.entry(address).or_default(); + validator.has_total_deltas = true; + } + (Some(_), None) => { + errors.push(Error::MissingValidatorTotalDeltas(address)) + } + (None, None) => {} + } + } + + #[allow(clippy::too_many_arguments)] + fn validator_voting_power( + params: &PosParams, + constants: &Constants, + errors: &mut Vec>, + voting_power_by_epoch: &mut HashMap< + Epoch, + HashMap, + >, + expected_total_voting_power_delta_by_epoch: &mut HashMap< + Epoch, + VotingPowerDelta, + >, + new_validators: &mut HashMap, + address: Address, + data: Data, + ) { + match (&data.pre, data.post) { + (Some(_), Some(post)) | (None, Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + let mut voting_power = VotingPowerDelta::default(); + // Iter from the current epoch to the last epoch of + // `post` + for epoch in Epoch::iter_range( + constants.current_epoch, + constants.unbonding_offset + 1, + ) { + if let Some(delta_post) = post.get_delta_at_epoch(epoch) { + voting_power += *delta_post; + + // If the delta is not the same as in pre-state, + // accumulate the expected total voting power + // change + let delta_pre = data + .pre + .as_ref() + .and_then(|data| { + if epoch == constants.current_epoch { + // On the first epoch, we have to + // get the sum of all deltas at and + // before that epoch as the `pre` + // could have been set in an older + // epoch + data.get(epoch) + } else { + data.get_delta_at_epoch(epoch).copied() + } + }) + .unwrap_or_default(); + if delta_pre != *delta_post { + let current_delta = + expected_total_voting_power_delta_by_epoch + .entry(epoch) + .or_insert_with(Default::default); + *current_delta += *delta_post - delta_pre; + } + + let vp: i64 = Into::into(voting_power); + match u64::try_from(vp) { + Ok(vp) => { + let vp = VotingPower::from(vp); + voting_power_by_epoch + .entry(epoch) + .or_insert_with(HashMap::default) + .insert(address.clone(), vp); + } + Err(_) => { + errors.push(Error::InvalidValidatorVotingPower( + address.clone(), + vp, + )) + } + } + } + } + if data.pre.is_none() { + let validator = new_validators.entry(address).or_default(); + validator.has_voting_power = true; + validator.voting_power = post + .get_at_offset( + constants.current_epoch, + DynEpochOffset::PipelineLen, + params, + ) + .unwrap_or_default() + .try_into() + .unwrap_or_default() + } + } + (Some(_), None) => { + errors.push(Error::MissingValidatorVotingPower(address)) + } + (None, None) => {} + } + } + + fn balance( + errors: &mut Vec>, + balance_delta: &mut TokenChange, + data: Data, + ) { + match (data.pre, data.post) { + (None, Some(post)) => *balance_delta += TokenChange::from(post), + (Some(pre), Some(post)) => { + *balance_delta += + TokenChange::from(post) - TokenChange::from(pre); + } + (Some(_), None) => errors.push(Error::MissingBalance), + (None, None) => {} + } + } + + fn bond( + constants: &Constants, + errors: &mut Vec>, + bond_delta: &mut HashMap, + id: BondId
, + data: Data>, + slashes: Vec, + ) { + match (data.pre, data.post) { + // Bond may be updated from newly bonded tokens and unbonding + (Some(pre), Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + let pre_offset: u64 = match constants + .current_epoch + .checked_sub(pre.last_update()) + { + Some(offset) => offset.into(), + None => { + // If the last_update > current_epoch, the check + // above must have failed with + // `Error::InvalidLastUpdate` + return; + } + }; + + // Pre-bonds keyed by their `start_epoch` + let mut pre_bonds: HashMap = + HashMap::default(); + // We have to slash only the difference between post and + // pre, not both pre and post to avoid rounding errors + let mut slashed_deltas: HashMap = + HashMap::default(); + let mut neg_deltas: HashMap = + Default::default(); + // Iter from the first epoch of `pre` to the last epoch of + // `post` + for epoch in Epoch::iter_range( + pre.last_update(), + pre_offset + constants.unbonding_offset + 1, + ) { + if let Some(bond) = pre.get_delta_at_epoch(epoch) { + for (start_epoch, delta) in bond.pos_deltas.iter() { + let delta = TokenChange::from(*delta); + slashed_deltas.insert(*start_epoch, -delta); + pre_bonds.insert(*start_epoch, delta); + } + let ins_epoch = if epoch <= constants.current_epoch { + constants.current_epoch + } else { + epoch + }; + let entry = neg_deltas.entry(ins_epoch).or_default(); + *entry -= TokenChange::from(bond.neg_deltas); + } + if let Some(bond) = post.get_delta_at_epoch(epoch) { + for (start_epoch, delta) in bond.pos_deltas.iter() { + // An empty bond must be deleted + if *delta == TokenAmount::default() { + errors.push(Error::EmptyBond(id.clone())) + } + // On the current epoch, all bond's + // `start_epoch`s must be equal or lower than + // `current_epoch`. For all others, the + // `start_epoch` must be equal + // to the `epoch` at which it's set. + if (epoch == constants.current_epoch + && *start_epoch > constants.current_epoch) + || (epoch != constants.current_epoch + && *start_epoch != epoch) + { + errors.push(Error::InvalidBondStartEpoch { + id: id.clone(), + got: (*start_epoch).into(), + expected: epoch.into(), + }) + } + let delta = TokenChange::from(*delta); + match slashed_deltas.get_mut(start_epoch) { + Some(pre_delta) => { + if *pre_delta + delta == 0_i128.into() { + slashed_deltas.remove(start_epoch); + } else { + *pre_delta += delta; + } + } + None => { + slashed_deltas.insert(*start_epoch, delta); + } + } + + // Anywhere other than at `pipeline_offset` + // where new bonds are added, check against the + // data in `pre_bonds` to ensure that no new + // bond has been added and that the deltas are + // equal or lower to `pre_bonds` deltas. + // Note that any bonds from any epoch can be + // unbonded, even if they are not yet active. + if epoch != constants.pipeline_epoch { + match pre_bonds.get(start_epoch) { + Some(pre_delta) => { + if &delta != pre_delta { + errors.push( + Error::InvalidNewBondEpoch { + id: id.clone(), + got: epoch.into(), + expected: constants + .pipeline_epoch + .into(), + }, + ); + } + } + None => { + errors.push( + Error::InvalidNewBondEpoch { + id: id.clone(), + got: epoch.into(), + expected: (constants + .current_epoch + + constants + .pipeline_offset) + .into(), + }, + ); + } + } + } + } + if epoch != constants.unbonding_epoch { + match neg_deltas.get(&epoch) { + Some(deltas) => { + if -*deltas + != TokenChange::from(bond.neg_deltas) + { + errors.push( + Error::InvalidNegDeltaEpoch { + id: id.clone(), + got: epoch.into(), + expected: constants + .unbonding_epoch + .into(), + }, + ) + } + } + None => { + if bond.neg_deltas != 0.into() { + errors.push( + Error::InvalidNegDeltaEpoch { + id: id.clone(), + got: epoch.into(), + expected: constants + .unbonding_epoch + .into(), + }, + ) + } + } + } + } + let entry = neg_deltas.entry(epoch).or_default(); + *entry += TokenChange::from(bond.neg_deltas); + } + } + // Check slashes + for (start_epoch, delta) in slashed_deltas.iter_mut() { + for slash in &slashes { + if slash.epoch >= *start_epoch { + let raw_delta: i128 = (*delta).into(); + let current_slashed = + TokenChange::from(slash.rate * raw_delta); + *delta -= current_slashed; + } + } + } + let total = slashed_deltas + .values() + .fold(TokenChange::default(), |acc, delta| acc + *delta) + - neg_deltas + .values() + .fold(TokenChange::default(), |acc, delta| { + acc + *delta + }); + + if total != TokenChange::default() { + let bond_entry = + bond_delta.entry(id.validator).or_default(); + *bond_entry += total; + } + } + // Bond may be created from newly bonded tokens only + (None, Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + let mut total_delta = TokenChange::default(); + for epoch in Epoch::iter_range( + constants.current_epoch, + constants.unbonding_offset + 1, + ) { + if let Some(bond) = post.get_delta_at_epoch(epoch) { + // A new bond must be initialized at + // `pipeline_offset` + if epoch != constants.pipeline_epoch + && !bond.pos_deltas.is_empty() + { + dbg!(&bond.pos_deltas); + errors.push(Error::EpochedDataWrongEpoch { + got: epoch.into(), + expected: vec![constants.pipeline_epoch.into()], + }) + } + if epoch != constants.unbonding_epoch + && bond.neg_deltas != 0.into() + { + errors.push(Error::InvalidNegDeltaEpoch { + id: id.clone(), + got: epoch.into(), + expected: constants.unbonding_epoch.into(), + }) + } + for (start_epoch, delta) in bond.pos_deltas.iter() { + if *start_epoch != epoch { + errors.push(Error::InvalidBondStartEpoch { + id: id.clone(), + got: (*start_epoch).into(), + expected: epoch.into(), + }) + } + let mut delta = *delta; + // Check slashes + for slash in &slashes { + if slash.epoch >= *start_epoch { + let raw_delta: u64 = delta.into(); + let current_slashed = TokenAmount::from( + slash.rate * raw_delta, + ); + delta -= current_slashed; + } + } + let delta = TokenChange::from(delta); + total_delta += delta + } + total_delta -= TokenChange::from(bond.neg_deltas) + } + } + // An empty bond must be deleted + if total_delta == TokenChange::default() { + errors.push(Error::EmptyBond(id.clone())) + } + let bond_entry = bond_delta.entry(id.validator).or_default(); + *bond_entry += total_delta; + } + // Bond may be deleted when all the tokens are unbonded + (Some(pre), None) => { + let mut total_delta = TokenChange::default(); + for index in 0..constants.pipeline_offset + 1 { + let index = index as usize; + let epoch = pre.last_update() + index; + if let Some(bond) = pre.get_delta_at_epoch(epoch) { + for (start_epoch, delta) in &bond.pos_deltas { + let mut delta = *delta; + // Check slashes + for slash in &slashes { + if slash.epoch >= *start_epoch { + let raw_delta: u64 = delta.into(); + let current_slashed = TokenAmount::from( + slash.rate * raw_delta, + ); + delta -= current_slashed; + } + } + let delta = TokenChange::from(delta); + total_delta -= delta + } + total_delta += TokenChange::from(bond.neg_deltas) + } + } + let bond_entry = bond_delta.entry(id.validator).or_default(); + *bond_entry += total_delta; + } + (None, None) => {} + } + } + + fn unbond( + constants: &Constants, + errors: &mut Vec>, + unbond_delta: &mut HashMap, + id: BondId
, + data: Data>, + slashes: Vec, + ) { + match (data.pre, data.post) { + // Unbond may be updated from newly unbonded tokens + (Some(pre), Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + let pre_offset: u64 = match constants + .current_epoch + .checked_sub(pre.last_update()) + { + Some(offset) => offset.into(), + None => { + // If the last_update > current_epoch, the check + // above must have failed with + // `Error::InvalidLastUpdate` + return; + } + }; + + // We have to slash only the difference between post and + // pre, not both pre and post to avoid rounding errors + let mut slashed_deltas: HashMap<(Epoch, Epoch), TokenChange> = + HashMap::default(); + // Iter from the first epoch of `pre` to the last epoch of + // `post` + for epoch in Epoch::iter_range( + pre.last_update(), + pre_offset + constants.unbonding_offset + 1, + ) { + if let Some(unbond) = pre.get_delta_at_epoch(epoch) { + for ((start_epoch, end_epoch), delta) in + unbond.deltas.iter() + { + let delta = TokenChange::from(*delta); + slashed_deltas + .insert((*start_epoch, *end_epoch), -delta); + } + } + if let Some(unbond) = post.get_delta_at_epoch(epoch) { + for ((start_epoch, end_epoch), delta) in + unbond.deltas.iter() + { + let delta = TokenChange::from(*delta); + let key = (*start_epoch, *end_epoch); + match slashed_deltas.get_mut(&key) { + Some(pre_delta) => { + if *pre_delta + delta == 0_i128.into() { + slashed_deltas.remove(&key); + } else { + *pre_delta += delta; + } + } + None => { + slashed_deltas.insert(key, delta); + } + } + } + } + } + // Check slashes + for ((start_epoch, end_epoch), delta) in + slashed_deltas.iter_mut() + { + for slash in &slashes { + if slash.epoch >= *start_epoch + && slash.epoch <= *end_epoch + { + let raw_delta: i128 = (*delta).into(); + let current_slashed = + TokenChange::from(slash.rate * raw_delta); + *delta -= current_slashed; + } + } + } + let total = slashed_deltas + .values() + .fold(TokenChange::default(), |acc, delta| acc + *delta); + if total != TokenChange::default() { + let unbond_entry = + unbond_delta.entry(id.validator).or_default(); + *unbond_entry += total; + } + } + // Unbond may be created from a bond + (None, Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + let mut total_delta = TokenChange::default(); + for epoch in Epoch::iter_range( + post.last_update(), + constants.unbonding_offset + 1, + ) { + if let Some(unbond) = post.get_delta_at_epoch(epoch) { + for ((start_epoch, end_epoch), delta) in + unbond.deltas.iter() + { + let mut delta = *delta; + // Check and apply slashes, if any + for slash in &slashes { + if slash.epoch >= *start_epoch + && slash.epoch <= *end_epoch + { + let raw_delta: u64 = delta.into(); + let current_slashed = TokenAmount::from( + slash.rate * raw_delta, + ); + delta -= current_slashed; + } + } + let delta = TokenChange::from(delta); + total_delta += delta; + } + } + } + let unbond_entry = + unbond_delta.entry(id.validator).or_default(); + *unbond_entry += total_delta; + } + // Unbond may be deleted when all the tokens are withdrawn + (Some(pre), None) => { + let mut total_delta = TokenChange::default(); + for epoch in Epoch::iter_range( + pre.last_update(), + constants.unbonding_offset + 1, + ) { + if let Some(unbond) = pre.get_delta_at_epoch(epoch) { + for ((start_epoch, end_epoch), delta) in + unbond.deltas.iter() + { + let mut delta = *delta; + // Check and apply slashes, if any + for slash in &slashes { + if slash.epoch >= *start_epoch + && slash.epoch <= *end_epoch + { + let raw_delta: u64 = delta.into(); + let current_slashed = TokenAmount::from( + slash.rate * raw_delta, + ); + delta -= current_slashed; + } + } + let delta = TokenChange::from(delta); + total_delta -= delta; + } + } + } + let unbond_entry = + unbond_delta.entry(id.validator).or_default(); + *unbond_entry += total_delta; + } + (None, None) => {} + } + } + + fn validator_set( + constants: &Constants, + errors: &mut Vec>, + validator_set_pre: &mut Option>, + validator_set_post: &mut Option>, + data: Data>, + ) { + match (data.pre, data.post) { + (Some(pre), Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + *validator_set_pre = Some(pre); + *validator_set_post = Some(post); + } + _ => errors.push(Error::MissingValidatorSet), + } + } + + fn total_voting_power( + constants: &Constants, + errors: &mut Vec>, + total_voting_power_delta_by_epoch: &mut HashMap< + Epoch, + VotingPowerDelta, + >, + data: Data, + ) { + match (data.pre, data.post) { + (Some(pre), Some(post)) => { + if post.last_update() != constants.current_epoch { + errors.push(Error::InvalidLastUpdate) + } + // Iter from the first epoch to the last epoch of `post` + for epoch in Epoch::iter_range( + post.last_update(), + constants.unbonding_offset + 1, + ) { + // Find the delta in `pre` + let delta_pre = (if epoch == post.last_update() { + // On the first epoch, we have to get the + // sum of all deltas at and before that + // epoch as the `pre` could have been set in + // an older epoch + pre.get(epoch) + } else { + pre.get_delta_at_epoch(epoch).copied() + }) + .unwrap_or_default(); + // Find the delta in `post` + let delta_post = post + .get_delta_at_epoch(epoch) + .copied() + .unwrap_or_default(); + if delta_pre != delta_post { + total_voting_power_delta_by_epoch + .insert(epoch, delta_post - delta_pre); + } + } + } + _ => errors.push(Error::MissingTotalVotingPower), + } + } + + fn validator_address_raw_hash( + errors: &mut Vec>, + new_validators: &mut HashMap, + raw_hash: String, + data: Data<(Address, String)>, + ) { + match (data.pre, data.post) { + (None, Some((address, expected_raw_hash))) => { + if raw_hash != expected_raw_hash { + errors.push(Error::InvalidAddressRawHash( + raw_hash, + expected_raw_hash, + )) + } + let validator = new_validators.entry(address).or_default(); + validator.has_address_raw_hash = true; + } + (pre, post) if pre != post => { + errors.push(Error::InvalidRawHashUpdate) + } + _ => {} + } + } +} diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 53ab95f15c..700db9e1fd 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -89,6 +89,7 @@ tempfile = {version = "3.2.0", optional = true} # temporarily using fork work-around for https://github.com/informalsystems/tendermint-rs/issues/971 tendermint = {git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", features = ["secp256k1"]} tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9"} +tiny-keccak = {version = "2.0.2", features = ["keccak"]} thiserror = "1.0.30" tracing = "0.1.30" wasmer = {version = "=2.2.0", optional = true} diff --git a/shared/src/ledger/governance/utils.rs b/shared/src/ledger/governance/utils.rs index aaca277f91..a4282a5c73 100644 --- a/shared/src/ledger/governance/utils.rs +++ b/shared/src/ledger/governance/utils.rs @@ -155,9 +155,21 @@ where (Some(epoched_bonds), Some(slashes)) => { let mut delegated_amount: token::Amount = 0.into(); for bond in epoched_bonds.iter() { + let mut to_deduct = bond.neg_deltas; for (start_epoch, &(mut delta)) in - bond.deltas.iter().sorted() + bond.pos_deltas.iter().sorted() { + // deduct bond's neg_deltas + if to_deduct > delta { + to_deduct -= delta; + // If the whole bond was deducted, continue to + // the next one + continue; + } else { + delta -= to_deduct; + to_deduct = token::Amount::default(); + } + let start_epoch = Epoch::from(*start_epoch); delta = apply_slashes(&slashes, delta, start_epoch); if epoch >= start_epoch { diff --git a/shared/src/ledger/pos/mod.rs b/shared/src/ledger/pos/mod.rs index c980da81da..7bfdd3cee6 100644 --- a/shared/src/ledger/pos/mod.rs +++ b/shared/src/ledger/pos/mod.rs @@ -3,6 +3,8 @@ mod storage; pub mod vp; +use std::convert::{TryFrom, TryInto}; + pub use namada_proof_of_stake; pub use namada_proof_of_stake::parameters::PosParams; pub use namada_proof_of_stake::types::{ @@ -15,8 +17,10 @@ pub use vp::PosVP; use crate::ledger::storage::{self as ledger_storage, Storage, StorageHasher}; use crate::types::address::{self, Address, InternalAddress}; +use crate::types::key::common; +use crate::types::key::secp256k1::EthAddress; use crate::types::storage::Epoch; -use crate::types::{key, token}; +use crate::types::token; /// Address of the PoS account implemented as a native VP pub const ADDRESS: Address = Address::Internal(InternalAddress::PoS); @@ -47,9 +51,7 @@ pub fn init_genesis_storage<'a, DB, H>( /// Alias for a PoS type with the same name with concrete type parameters pub type ValidatorConsensusKeys = - namada_proof_of_stake::types::ValidatorConsensusKeys< - key::common::PublicKey, - >; + namada_proof_of_stake::types::ValidatorConsensusKeys; /// Alias for a PoS type with the same name with concrete type parameters pub type ValidatorTotalDeltas = @@ -71,7 +73,7 @@ pub type BondId = namada_proof_of_stake::types::BondId
; pub type GenesisValidator = namada_proof_of_stake::types::GenesisValidator< Address, token::Amount, - key::common::PublicKey, + common::PublicKey, >; impl From for namada_proof_of_stake::types::Epoch { @@ -87,3 +89,32 @@ impl From for Epoch { Epoch(epoch) } } + +impl From for namada_proof_of_stake::types::EthAddress { + fn from(EthAddress(addr): EthAddress) -> Self { + namada_proof_of_stake::types::EthAddress(addr) + } +} + +impl TryFrom<&common::PublicKey> for namada_proof_of_stake::types::EthAddress { + type Error = common::EthAddressConvError; + + fn try_from(value: &common::PublicKey) -> Result { + let addr = EthAddress::try_from(value)?; + Ok(addr.into()) + } +} + +impl + namada_proof_of_stake::types::TryRefTo< + namada_proof_of_stake::types::EthAddress, + > for common::PublicKey +{ + type Error = common::EthAddressConvError; + + fn try_ref_to( + &self, + ) -> Result { + self.try_into() + } +} diff --git a/shared/src/ledger/pos/storage.rs b/shared/src/ledger/pos/storage.rs index cfe1126b88..496ebdfbbd 100644 --- a/shared/src/ledger/pos/storage.rs +++ b/shared/src/ledger/pos/storage.rs @@ -22,6 +22,8 @@ const VALIDATOR_ADDRESS_RAW_HASH: &str = "address_raw_hash"; const VALIDATOR_STAKING_REWARD_ADDRESS_STORAGE_KEY: &str = "staking_reward_address"; const VALIDATOR_CONSENSUS_KEY_STORAGE_KEY: &str = "consensus_key"; +const VALIDATOR_ETH_COLD_KEY_STORAGE_KEY: &str = "eth_cold_key"; +const VALIDATOR_ETH_HOT_KEY_STORAGE_KEY: &str = "eth_hot_key"; const VALIDATOR_STATE_STORAGE_KEY: &str = "state"; const VALIDATOR_TOTAL_DELTAS_STORAGE_KEY: &str = "total_deltas"; const VALIDATOR_VOTING_POWER_STORAGE_KEY: &str = "voting_power"; @@ -142,6 +144,56 @@ pub fn is_validator_consensus_key_key(key: &Key) -> Option<&Address> { } } +/// Storage key for validator's eth cold key. +pub fn validator_eth_cold_key_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_ETH_COLD_KEY_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's eth cold key? +pub fn is_validator_eth_cold_key_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_ETH_COLD_KEY_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } +} + +/// Storage key for validator's eth hot key. +pub fn validator_eth_hot_key_key(validator: &Address) -> Key { + validator_prefix(validator) + .push(&VALIDATOR_ETH_HOT_KEY_STORAGE_KEY.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Is storage key for validator's eth hot key? +pub fn is_validator_eth_hot_key_key(key: &Key) -> Option<&Address> { + match &key.segments[..] { + [ + DbKeySeg::AddressSeg(addr), + DbKeySeg::StringSeg(prefix), + DbKeySeg::AddressSeg(validator), + DbKeySeg::StringSeg(key), + ] if addr == &ADDRESS + && prefix == VALIDATOR_STORAGE_PREFIX + && key == VALIDATOR_ETH_HOT_KEY_STORAGE_KEY => + { + Some(validator) + } + _ => None, + } +} + /// Storage key for validator's state. pub fn validator_state_key(validator: &Address) -> Key { validator_prefix(validator) @@ -450,6 +502,23 @@ where decode(value.unwrap()).unwrap() } + fn read_validator_eth_cold_key( + &self, + key: &Self::Address, + ) -> Option> { + let (value, _gas) = + self.read(&validator_eth_cold_key_key(key)).unwrap(); + value.map(|value| decode(value).unwrap()) + } + + fn read_validator_eth_hot_key( + &self, + key: &Self::Address, + ) -> Option> { + let (value, _gas) = self.read(&validator_eth_hot_key_key(key)).unwrap(); + value.map(|value| decode(value).unwrap()) + } + fn write_pos_params(&mut self, params: &PosParams) { self.write(¶ms_key(), encode(params)).unwrap(); } @@ -529,6 +598,24 @@ where .unwrap(); } + fn write_validator_eth_cold_key( + &mut self, + address: &Self::Address, + value: &types::ValidatorEthKey, + ) { + self.write(&validator_eth_cold_key_key(address), encode(value)) + .unwrap(); + } + + fn write_validator_eth_hot_key( + &mut self, + address: &Self::Address, + value: &types::ValidatorEthKey, + ) { + self.write(&validator_eth_hot_key_key(address), encode(value)) + .unwrap(); + } + fn init_staking_reward_account( &mut self, address: &Self::Address, diff --git a/shared/src/ledger/pos/vp.rs b/shared/src/ledger/pos/vp.rs index 26be440536..43b4d34ab3 100644 --- a/shared/src/ledger/pos/vp.rs +++ b/shared/src/ledger/pos/vp.rs @@ -21,10 +21,11 @@ use super::{ is_validator_staking_reward_address_key, is_validator_total_deltas_key, is_validator_voting_power_key, params_key, staking_token_address, total_voting_power_key, unbond_key, validator_consensus_key_key, - validator_set_key, validator_slashes_key, - validator_staking_reward_address_key, validator_state_key, - validator_total_deltas_key, validator_voting_power_key, BondId, Bonds, - Unbonds, ValidatorConsensusKeys, ValidatorSets, ValidatorTotalDeltas, + validator_eth_cold_key_key, validator_eth_hot_key_key, validator_set_key, + validator_slashes_key, validator_staking_reward_address_key, + validator_state_key, validator_total_deltas_key, + validator_voting_power_key, BondId, Bonds, Unbonds, ValidatorConsensusKeys, + ValidatorSets, ValidatorTotalDeltas, }; use crate::ledger::governance::vp::is_proposal_accepted; use crate::ledger::native_vp::{self, Ctx, NativeVp}; @@ -110,7 +111,7 @@ where &self, tx_data: &[u8], keys_changed: &BTreeSet, - verifiers: &BTreeSet
, + _verifiers: &BTreeSet
, ) -> Result { use validation::Data; use validation::DataUpdate::{self, *}; @@ -126,16 +127,6 @@ where Some(id) => return Ok(is_proposal_accepted(&self.ctx, id)), _ => return Ok(false), } - } else if let Some(owner) = key.is_validity_predicate() { - let has_pre = self.ctx.has_key_pre(key)?; - let has_post = self.ctx.has_key_post(key)?; - if has_pre && has_post { - // VP updates must be verified by the owner - return Ok(!verifiers.contains(owner)); - } else if has_pre || !has_post { - // VP cannot be deleted - return Ok(false); - } } else if is_validator_set_key(key) { let pre = self.ctx.read_pre(key)?.and_then(|bytes| { ValidatorSets::try_from_slice(&bytes[..]).ok() @@ -415,6 +406,23 @@ where .unwrap(); decode(value).unwrap() } + + fn read_validator_eth_cold_key( + &self, + key: &Self::Address, + ) -> Option> { + let value = + self.ctx.read_pre(&validator_eth_cold_key_key(key)).unwrap(); + value.map(|value| decode(value).unwrap()) + } + + fn read_validator_eth_hot_key( + &self, + key: &Self::Address, + ) -> Option> { + let value = self.ctx.read_pre(&validator_eth_hot_key_key(key)).unwrap(); + value.map(|value| decode(value).unwrap()) + } } impl From for Error { diff --git a/shared/src/proto/types.rs b/shared/src/proto/types.rs index 7a936dd58f..bc1f072e45 100644 --- a/shared/src/proto/types.rs +++ b/shared/src/proto/types.rs @@ -39,7 +39,7 @@ pub type Result = std::result::Result; /// /// Because the signature is not checked by the ledger, we don't inline it into /// the `Tx` type directly. Instead, the signature is attached to the `tx.data`, -/// which is can then be checked by a validity predicate wasm. +/// which can then be checked by a validity predicate wasm. #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] pub struct SignedTxData { /// The original tx data bytes, if any diff --git a/shared/src/types/key/common.rs b/shared/src/types/key/common.rs index 3cdec73bb9..7e9b468e50 100644 --- a/shared/src/types/key/common.rs +++ b/shared/src/types/key/common.rs @@ -1,5 +1,6 @@ //! Cryptographic keys +use std::convert::TryFrom; use std::fmt::Display; use std::str::FromStr; @@ -7,7 +8,9 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; #[cfg(feature = "rand")] use rand::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; +use thiserror::Error; +use super::secp256k1::EthAddress; use super::{ ed25519, secp256k1, ParsePublicKeyError, ParseSecretKeyError, ParseSignatureError, RefTo, SchemeType, SigScheme as SigSchemeTrait, @@ -81,6 +84,24 @@ impl FromStr for PublicKey { } } +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum EthAddressConvError { + #[error("Eth key cannot be ed25519, only secp256k1")] + CannotBeEd25519, +} + +impl TryFrom<&PublicKey> for EthAddress { + type Error = EthAddressConvError; + + fn try_from(value: &PublicKey) -> Result { + match value { + PublicKey::Ed25519(_) => Err(EthAddressConvError::CannotBeEd25519), + PublicKey::Secp256k1(pk) => Ok(EthAddress::from(pk)), + } + } +} + /// Secret key #[derive(Debug, Clone, BorshSerialize, BorshDeserialize, BorshSchema)] #[allow(clippy::large_enum_variant)] diff --git a/shared/src/types/key/mod.rs b/shared/src/types/key/mod.rs index 666cc3fb5e..cee1d7f072 100644 --- a/shared/src/types/key/mod.rs +++ b/shared/src/types/key/mod.rs @@ -362,7 +362,7 @@ pub mod testing { use super::SigScheme; use crate::types::key::*; - /// A keypair for tests + /// An ed25519 keypair for tests pub fn keypair_1() -> ::SecretKey { // generated from `cargo test gen_keypair -- --nocapture` let bytes = [ @@ -376,7 +376,7 @@ pub mod testing { .unwrap() } - /// A keypair for tests + /// An ed25519 keypair for tests pub fn keypair_2() -> ::SecretKey { // generated from `cargo test gen_keypair -- --nocapture` let bytes = [ @@ -390,6 +390,32 @@ pub mod testing { .unwrap() } + /// An Ethereum keypair for tests + pub fn keypair_3() -> ::SecretKey { + let bytes = [ + 0xf3, 0x78, 0x78, 0x80, 0xba, 0x85, 0x0b, 0xa4, 0xc5, 0x74, 0x50, + 0x5a, 0x23, 0x54, 0x6d, 0x46, 0x74, 0xa1, 0x3f, 0x09, 0x75, 0x0c, + 0xf4, 0xb5, 0xb8, 0x17, 0x69, 0x64, 0xf4, 0x08, 0xd4, 0x80, + ]; + secp256k1::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + + /// An Ethereum keypair for tests + pub fn keypair_4() -> ::SecretKey { + let bytes = [ + 0x68, 0xab, 0xce, 0x64, 0x54, 0x07, 0x7e, 0xf5, 0x1a, 0xb4, 0x31, + 0x7a, 0xb8, 0x8b, 0x98, 0x30, 0x27, 0x11, 0x4e, 0x58, 0x69, 0xd6, + 0x45, 0x94, 0xdc, 0x90, 0x8d, 0x94, 0xee, 0x58, 0x46, 0x91, + ]; + secp256k1::SecretKey::try_from_slice(bytes.as_ref()) + .unwrap() + .try_to_sk() + .unwrap() + } + /// Generate an arbitrary [`super::SecretKey`]. pub fn arb_keypair() -> impl Strategy { any::<[u8; 32]>().prop_map(move |seed| { @@ -398,6 +424,12 @@ pub mod testing { }) } + /// Generate an arbitrary [`common::SecretKey`]. + pub fn arb_common_keypair() -> impl Strategy { + arb_keypair::() + .prop_map(|keypair| keypair.try_to_sk().unwrap()) + } + /// Generate a new random [`super::SecretKey`]. pub fn gen_keypair() -> S::SecretKey { let mut rng: ThreadRng = thread_rng(); diff --git a/shared/src/types/key/secp256k1.rs b/shared/src/types/key/secp256k1.rs index 99bcbb3f67..7b781f54cd 100644 --- a/shared/src/types/key/secp256k1.rs +++ b/shared/src/types/key/secp256k1.rs @@ -22,6 +22,10 @@ use super::{ #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct PublicKey(pub libsecp256k1::PublicKey); +/// Eth address derived from secp256k1 key +#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] +pub struct EthAddress(pub [u8; 20]); + impl super::PublicKey for PublicKey { const TYPE: SchemeType = SigScheme::TYPE; @@ -133,6 +137,23 @@ impl From for PublicKey { } } +impl From<&PublicKey> for EthAddress { + fn from(pk: &PublicKey) -> Self { + use tiny_keccak::Hasher; + + let mut hasher = tiny_keccak::Keccak::v256(); + // We're removing the first byte with + // `libsecp256k1::util::TAG_PUBKEY_FULL` + let pk_bytes = &pk.0.serialize()[1..]; + hasher.update(pk_bytes); + let mut output = [0_u8; 32]; + hasher.finalize(&mut output); + let mut addr = [0; 20]; + addr.copy_from_slice(&output[12..]); + EthAddress(addr) + } +} + /// Secp256k1 secret key #[derive(Debug, Clone)] pub struct SecretKey(pub Box); @@ -421,14 +442,14 @@ impl super::SigScheme for SigScheme { /// Sign the data with a key fn sign(keypair: &SecretKey, data: impl AsRef<[u8]>) -> Self::Signature { - #[cfg(not(any(test, features = "secp256k1-sign-verify")))] + #[cfg(not(any(test, feature = "secp256k1-sign-verify")))] { // to avoid `unused-variables` warn let _ = (keypair, data); panic!("\"secp256k1-sign-verify\" feature must be enabled"); } - #[cfg(any(test, features = "secp256k1-sign-verify"))] + #[cfg(any(test, feature = "secp256k1-sign-verify"))] { use sha2::{Digest, Sha256}; let hash = Sha256::digest(data.as_ref()); @@ -443,14 +464,14 @@ impl super::SigScheme for SigScheme { data: &T, sig: &Self::Signature, ) -> Result<(), VerifySigError> { - #[cfg(not(any(test, features = "secp256k1-sign-verify")))] + #[cfg(not(any(test, feature = "secp256k1-sign-verify")))] { // to avoid `unused-variables` warn let _ = (pk, data, sig); panic!("\"secp256k1-sign-verify\" feature must be enabled"); } - #[cfg(any(test, features = "secp256k1-sign-verify"))] + #[cfg(any(test, feature = "secp256k1-sign-verify"))] { use sha2::{Digest, Sha256}; let bytes = &data @@ -476,14 +497,14 @@ impl super::SigScheme for SigScheme { data: &[u8], sig: &Self::Signature, ) -> Result<(), VerifySigError> { - #[cfg(not(any(test, features = "secp256k1-sign-verify")))] + #[cfg(not(any(test, feature = "secp256k1-sign-verify")))] { // to avoid `unused-variables` warn let _ = (pk, data, sig); panic!("\"secp256k1-sign-verify\" feature must be enabled"); } - #[cfg(any(test, features = "secp256k1-sign-verify"))] + #[cfg(any(test, feature = "secp256k1-sign-verify"))] { use sha2::{Digest, Sha256}; let hash = Sha256::digest(data); @@ -501,3 +522,29 @@ impl super::SigScheme for SigScheme { } } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_eth_address_from_secp() { + // test vector from https://bitcoin.stackexchange.com/a/89848 + let sk_hex = + "c2c72dfbff11dfb4e9d5b0a20c620c58b15bb7552753601f043db91331b0db15"; + let expected_pk_hex = "a225bf565ff4ea039bccba3e26456e910cd74e4616d67ee0a166e26da6e5e55a08d0fa1659b4b547ba7139ca531f62907b9c2e72b80712f1c81ece43c33f4b8b"; + let expected_eth_addr_hex = "6ea27154616a29708dce7650b475dd6b82eba6a3"; + + let sk_bytes = hex::decode(sk_hex).unwrap(); + let sk = SecretKey::try_from_slice(&sk_bytes[..]).unwrap(); + let pk: PublicKey = sk.ref_to(); + // We're removing the first byte with + // `libsecp256k1::util::TAG_PUBKEY_FULL` + let pk_hex = hex::encode(&pk.0.serialize()[1..]); + assert_eq!(expected_pk_hex, pk_hex); + + let eth_addr: EthAddress = (&pk).into(); + let eth_addr_hex = hex::encode(eth_addr.0); + assert_eq!(expected_eth_addr_hex, eth_addr_hex); + } +} diff --git a/shared/src/types/storage.rs b/shared/src/types/storage.rs index fc87bc8d51..f60bb66ccf 100644 --- a/shared/src/types/storage.rs +++ b/shared/src/types/storage.rs @@ -274,7 +274,7 @@ impl Key { self.len() == 0 } - /// Returns a key of the validity predicate of the given address + /// Returns a key of the validity predicate of the given address. /// Only this function can push "?" segment for validity predicate pub fn validity_predicate(addr: &Address) -> Self { let mut segments = Self::from(addr.to_db_key()).segments; @@ -400,7 +400,7 @@ pub enum DbKeySeg { impl KeySeg for DbKeySeg { fn parse(mut string: String) -> Result { - // a separator should not included + // a separator should not be included if string.contains(KEY_SEGMENT_SEPARATOR) { return Err(Error::InvalidKeySeg(string)); } diff --git a/shared/src/types/token.rs b/shared/src/types/token.rs index f3e4cd4ed2..f3011514a1 100644 --- a/shared/src/types/token.rs +++ b/shared/src/types/token.rs @@ -393,3 +393,21 @@ mod tests { assert_eq!("0", zero.to_string()); } } + +/// Helpers for testing with addresses. +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use proptest::prelude::*; + + use super::*; + + /// Generate an arbitrary token amount + pub fn arb_amount() -> impl Strategy { + any::().prop_map(Amount::from) + } + + /// Generate an arbitrary token amount up to and including given `max` value + pub fn arb_amount_ceiled(max: u64) -> impl Strategy { + (0..=max).prop_map(Amount::from) + } +} diff --git a/shared/src/types/transaction/mod.rs b/shared/src/types/transaction/mod.rs index a7d5ee864b..b9fabcfd6b 100644 --- a/shared/src/types/transaction/mod.rs +++ b/shared/src/types/transaction/mod.rs @@ -187,6 +187,11 @@ pub struct InitValidator { pub account_key: common::PublicKey, /// A key to be used for signing blocks and votes on blocks. pub consensus_key: common::PublicKey, + /// An Eth bridge governance public key + pub eth_cold_key: secp256k1::PublicKey, + /// An Eth bridge hot signing public key used for validator set updates and + /// cross-chain transactions + pub eth_hot_key: secp256k1::PublicKey, /// Public key to be written into the staking reward account's storage. /// This can be used for signature verification of transactions for the /// newly created account. diff --git a/tests/proptest-regressions/native_vp/pos.txt b/tests/proptest-regressions/native_vp/pos.txt new file mode 100644 index 0000000000..ad157e817b --- /dev/null +++ b/tests/proptest-regressions/native_vp/pos.txt @@ -0,0 +1,2 @@ +cc 65720acc67508ccd2fefc1ca42477075ae53a7d1e3c8f31324cfb8f06587457e +cc 45b2dd2ed9619ceef6135ee6ca34406621c8a6429ffa153bbda3ce79dd4e006c \ No newline at end of file diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 1b75f83bdc..f1d2b812bc 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -9,8 +9,7 @@ mod vm_host_env; pub use vm_host_env::{ibc, tx, vp}; #[cfg(test)] mod e2e; -#[cfg(test)] -mod native_vp; +pub mod native_vp; pub mod storage; /// Using this import requires `tracing` and `tracing-subscriber` dependencies. diff --git a/tests/src/native_vp/mod.rs b/tests/src/native_vp/mod.rs index be450a7086..3fcce71f43 100644 --- a/tests/src/native_vp/mod.rs +++ b/tests/src/native_vp/mod.rs @@ -1,4 +1,4 @@ -mod pos; +pub mod pos; use namada::ledger::native_vp::{Ctx, NativeVp}; use namada::ledger::storage::mockdb::MockDB; diff --git a/tests/src/native_vp/pos.rs b/tests/src/native_vp/pos.rs index be1844c6cd..1fe56710c6 100644 --- a/tests/src/native_vp/pos.rs +++ b/tests/src/native_vp/pos.rs @@ -34,7 +34,7 @@ //! the modifications of its predecessor transition). //! //! The PoS storage modifications are modelled using -//! [`testing::PosStorageChange`]. +//! `testing::PosStorageChange`. //! //! - Bond: Requires a validator account in the state (the `#{validator}` //! segments in the keys below). Some of the storage change are optional, @@ -50,7 +50,7 @@ //! - Unbond: Requires a bond in the state (the `#{owner}` and `#{validator}` //! segments in the keys below must be the owner and a validator of an //! existing bond). The bond's total amount must be greater or equal to the -//! amount that' being unbonded. Some of the storage changes are optional, +//! amount that is being unbonded. Some of the storage changes are optional, //! which depends on whether the unbonding decreases voting power of the //! validator. //! - `#{PoS}/bond/#{owner}/#{validator}` @@ -99,15 +99,54 @@ //! - add arb invalid storage changes //! - add slashes +use namada::ledger::pos::namada_proof_of_stake::PosBase; +use namada::types::storage::Epoch; +use namada_vm_env::proof_of_stake::{ + staking_token_address, GenesisValidator, PosParams, +}; + +use crate::tx::tx_host_env; + +/// initialize proof-of-stake genesis with the given list of validators and +/// parameters. +pub fn init_pos( + genesis_validators: &[GenesisValidator], + params: &PosParams, + start_epoch: Epoch, +) { + tx_host_env::init(); + + tx_host_env::with(|tx_env| { + // Ensure that all the used + // addresses exist + tx_env.spawn_accounts([&staking_token_address()]); + for validator in genesis_validators { + tx_env.spawn_accounts([ + &validator.address, + &validator.staking_reward_address, + ]); + } + tx_env.storage.block.epoch = start_epoch; + // Initialize PoS storage + tx_env + .storage + .init_genesis( + params, + genesis_validators.iter(), + u64::from(start_epoch), + ) + .unwrap(); + }); +} + #[cfg(test)] mod tests { - use namada::ledger::pos::namada_proof_of_stake::PosBase; use namada::ledger::pos::PosParams; use namada::types::storage::Epoch; use namada::types::token; use namada_vm_env::proof_of_stake::parameters::testing::arb_pos_params; - use namada_vm_env::proof_of_stake::{staking_token_address, PosVP}; + use namada_vm_env::proof_of_stake::PosVP; use namada_vm_env::tx_prelude::Address; use proptest::prelude::*; use proptest::prop_state_machine; @@ -119,8 +158,9 @@ mod tests { arb_invalid_pos_action, arb_valid_pos_action, InvalidPosAction, ValidPosAction, }; + use super::*; use crate::native_vp::TestNativeVpEnv; - use crate::tx::{tx_host_env, TestTxEnv}; + use crate::tx::tx_host_env; prop_state_machine! { #![proptest_config(Config { @@ -189,28 +229,8 @@ mod tests { ) -> Self::ConcreteState { println!(); println!("New test case"); - // Initialize the transaction env - let mut tx_env = TestTxEnv::default(); - - // Set the epoch - let storage = &mut tx_env.storage; - storage.block.epoch = initial_state.epoch; - - // Initialize PoS storage - storage - .init_genesis( - &initial_state.params, - [].into_iter(), - initial_state.epoch, - ) - .unwrap(); - - // Make sure that the staking token account exist - tx_env.spawn_accounts([staking_token_address()]); - - // Use the `tx_env` for host env calls - tx_host_env::set(tx_env); + init_pos(&[], &initial_state.params, initial_state.epoch); // The "genesis" block state for change in initial_state.committed_valid_actions { @@ -596,11 +616,14 @@ pub mod testing { #[derivative(Debug)] pub enum PosStorageChange { - /// Ensure that the account exists when initialing a valid new + /// Ensure that the account exists when initializing a valid new /// validator or delegation from a new owner SpawnAccount { address: Address, }, + /// Add tokens included in a new bond at given offset. Bonded tokens + /// are added at pipeline offset and unbonded tokens are added as + /// negative values at unbonding offset. Bond { owner: Address, validator: Address, @@ -1131,9 +1154,10 @@ pub mod testing { let amount: u64 = delta.try_into().unwrap(); let amount: token::Amount = amount.into(); let mut value = Bond { - deltas: HashMap::default(), + pos_deltas: HashMap::default(), + neg_deltas: Default::default(), }; - value.deltas.insert( + value.pos_deltas.insert( (current_epoch + offset.value(params)).into(), amount, ); @@ -1158,34 +1182,27 @@ pub mod testing { ); bonds } - None => Bonds::init(value, current_epoch, params), + None => Bonds::init_at_offset( + value, + current_epoch, + offset, + params, + ), } } else { let mut bonds = bonds.unwrap_or_else(|| { Bonds::init(Default::default(), current_epoch, params) }); let to_unbond: u64 = (-delta).try_into().unwrap(); - let mut to_unbond: token::Amount = to_unbond.into(); - let to_unbond = &mut to_unbond; - bonds.rev_update_while( - |bonds, _epoch| { - bonds.deltas.retain(|_epoch_start, bond_delta| { - if *to_unbond == 0.into() { - return true; - } - if to_unbond > bond_delta { - *to_unbond -= *bond_delta; - *bond_delta = 0.into(); - } else { - *bond_delta -= *to_unbond; - *to_unbond = 0.into(); - } - // Remove bonds with no tokens left - *bond_delta != 0.into() - }); - *to_unbond != 0.into() + let to_unbond: token::Amount = to_unbond.into(); + + bonds.add_at_offset( + Bond { + pos_deltas: Default::default(), + neg_deltas: to_unbond, }, current_epoch, + offset, params, ); bonds @@ -1217,7 +1234,7 @@ pub mod testing { && bond_epoch >= bonds.last_update().into() { if let Some(bond) = bonds.get_delta_at_epoch(bond_epoch) { - for (start_epoch, delta) in &bond.deltas { + for (start_epoch, delta) in &bond.pos_deltas { if delta >= &to_unbond { value.deltas.insert( ( @@ -1592,7 +1609,7 @@ pub mod testing { // any u64 but `0` let arb_delta = - prop_oneof![(-(u64::MAX as i128)..0), (1..=u64::MAX as i128),]; + prop_oneof![(-(u32::MAX as i128)..0), (1..=u32::MAX as i128),]; prop_oneof![ ( diff --git a/tests/src/vm_host_env/tx.rs b/tests/src/vm_host_env/tx.rs index 3a684e8382..382e1edc5d 100644 --- a/tests/src/vm_host_env/tx.rs +++ b/tests/src/vm_host_env/tx.rs @@ -101,7 +101,7 @@ impl TestTxEnv { } /// Fake accounts existence by initializating their VP storage. - /// This is needed for accounts that are being modified by a tx test to be + /// This is needed for accounts that are being modified by a tx test to /// pass account existence check in `tx_write` function. pub fn spawn_accounts( &mut self, diff --git a/vm_env/src/proof_of_stake.rs b/vm_env/src/proof_of_stake.rs index 8e4bba4223..59355d703c 100644 --- a/vm_env/src/proof_of_stake.rs +++ b/vm_env/src/proof_of_stake.rs @@ -62,6 +62,8 @@ pub fn init_validator( InitValidator { account_key, consensus_key, + eth_cold_key, + eth_hot_key, rewards_account_key, protocol_key, dkg_key, @@ -84,10 +86,14 @@ pub fn init_validator( let pk_key = key::pk_key(&rewards_address); tx::write(&pk_key.to_string(), &rewards_account_key); + let eth_cold_key = key::common::PublicKey::Secp256k1(eth_cold_key); + let eth_hot_key = key::common::PublicKey::Secp256k1(eth_hot_key); PoS.become_validator( &validator_address, &rewards_address, &consensus_key, + ð_cold_key, + ð_hot_key, current_epoch, )?; Ok((validator_address, rewards_address)) @@ -167,6 +173,20 @@ impl namada_proof_of_stake::PosReadOnly for PoS { fn read_total_voting_power(&self) -> TotalVotingPowers { tx::read(total_voting_power_key().to_string()).unwrap() } + + fn read_validator_eth_cold_key( + &self, + key: &Self::Address, + ) -> Option> { + tx::read(validator_eth_cold_key_key(key).to_string()) + } + + fn read_validator_eth_hot_key( + &self, + key: &Self::Address, + ) -> Option> { + tx::read(validator_eth_hot_key_key(key).to_string()) + } } impl namada_proof_of_stake::PosActions for PoS { @@ -258,4 +278,20 @@ impl namada_proof_of_stake::PosActions for PoS { ) { crate::token::tx::transfer(src, dest, token, amount) } + + fn write_validator_eth_cold_key( + &mut self, + address: &Self::Address, + value: types::ValidatorEthKey, + ) { + tx::write(validator_eth_cold_key_key(address).to_string(), &value) + } + + fn write_validator_eth_hot_key( + &self, + address: &Self::Address, + value: types::ValidatorEthKey, + ) { + tx::write(validator_eth_hot_key_key(address).to_string(), &value) + } } diff --git a/wasm/checksums.json b/wasm/checksums.json index 898f6e9763..de782ccbfd 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,19 +1,19 @@ { - "tx_bond.wasm": "tx_bond.16097490afa7378c79e6216751b20796cde3a9026c34255c3f1e5ec5a4c9482e.wasm", - "tx_from_intent.wasm": "tx_from_intent.f8d1937b17a3abaf7ea595526c870b3d57ddef8e0c1bc96f8e0a448864b186c7.wasm", + "tx_bond.wasm": "tx_bond.0ea9f2355a92449d897b5d6e4a9505765bb9656aaef05b9e85696c3ff45bdbfd.wasm", + "tx_from_intent.wasm": "tx_from_intent.4a9475467c6857c5fa126e7b877e8dd8b3c0a9c219d5f5e2991c09aedd41aa41.wasm", "tx_ibc.wasm": "tx_ibc.378b10551c0b22c2c892d24e2676ee5160d654e2e53a50e7925e0f2c6321497b.wasm", "tx_init_account.wasm": "tx_init_account.adab66c2b4d635e9c42133936aafb143363f91dddff2a60f94df504ffec951a6.wasm", "tx_init_nft.wasm": "tx_init_nft.d1065ebd80ba6ea97f29bc2268becf9ba3ba2952641992464f3e9e868df17447.wasm", "tx_init_proposal.wasm": "tx_init_proposal.184131576a579f9ece96460d1eb20e5970fcd149b0527c8e56b711e5c535aa5f.wasm", - "tx_init_validator.wasm": "tx_init_validator.2990747d24d467b56e19724c5d13df826a3aab83f7e1bf26558dbdf44e260f8a.wasm", + "tx_init_validator.wasm": "tx_init_validator.e2360fd8bc5d97acd66b2b06824f02afe13af2fd0ed661d2a7880501a8f82e8e.wasm", "tx_mint_nft.wasm": "tx_mint_nft.33db14dea4a03ff7508ca44f3ae956d83c0abceb3dae5be844668e54ac22b273.wasm", "tx_transfer.wasm": "tx_transfer.a601d62296f56f6b4dabb0a2ad082478d195e667c7469f363bdfd5fe41349bd8.wasm", - "tx_unbond.wasm": "tx_unbond.014cbf5b0aa3ac592c0a6940dd502ec8569a3af4d12782e3a5931c15dc13042f.wasm", + "tx_unbond.wasm": "tx_unbond.b5eec0e7a8263b1b809560b48b1bcf4701db7e0fede04e6405efcc0d398ff996.wasm", "tx_update_vp.wasm": "tx_update_vp.83d4caeb5a9ca3009cd899810493a6b87b4c07fa9ed36f297db99dc881fb9a1c.wasm", "tx_vote_proposal.wasm": "tx_vote_proposal.bcb5280be9dfeed0a7650ba5e4a3cebc2c19b76780fd74dcb345be3da766b64a.wasm", - "tx_withdraw.wasm": "tx_withdraw.8fc0a3439ee9ae66047c520519877bc1f540e0cb02abfa31afa8cce8cd069b6f.wasm", + "tx_withdraw.wasm": "tx_withdraw.d3717bdcd9da223262b2c228796bfb68d11da670ec8a5a4ecdc55138e886d76e.wasm", "vp_nft.wasm": "vp_nft.2c820c728d241b82bf0ed3c552ee9e7c046bceaa4f7b6f12d3236a1a3d7c1589.wasm", "vp_testnet_faucet.wasm": "vp_testnet_faucet.6e762f3fda8aa7a252e2b29a4a312db91ded062d6c18b8b489883733c89dc227.wasm", "vp_token.wasm": "vp_token.c45cc3848f12fc47713702dc206d1312ad740a6bbee7f141556714f6f89d4985.wasm", - "vp_user.wasm": "vp_user.d6cd2f4b5bc26f96df6aa300fddf4d25e1656920d59896209bd54ae8d407ecde.wasm" + "vp_user.wasm": "vp_user.b1660ec9b1125ea1ffef252215c686e65839fb212051a3d6d8acc0dcc9ba113b.wasm" } \ No newline at end of file diff --git a/wasm/tx_template/Cargo.lock b/wasm/tx_template/Cargo.lock index 4e85d68f2c..54ae82c5c8 100644 --- a/wasm/tx_template/Cargo.lock +++ b/wasm/tx_template/Cargo.lock @@ -1551,6 +1551,7 @@ dependencies = [ "tendermint", "tendermint-proto", "thiserror", + "tiny-keccak", "tonic-build", "tracing", "wasmer", @@ -2740,6 +2741,15 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25eb0ca3468fc0acc11828786797f6ef9aa1555e4a211a60d64cc8e4d1be47d6" +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.5.1" diff --git a/wasm/vp_template/Cargo.lock b/wasm/vp_template/Cargo.lock index 25070a9319..961eab4636 100644 --- a/wasm/vp_template/Cargo.lock +++ b/wasm/vp_template/Cargo.lock @@ -1551,6 +1551,7 @@ dependencies = [ "tendermint", "tendermint-proto", "thiserror", + "tiny-keccak", "tonic-build", "tracing", "wasmer", @@ -2740,6 +2741,15 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25eb0ca3468fc0acc11828786797f6ef9aa1555e4a211a60d64cc8e4d1be47d6" +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.5.1" diff --git a/wasm/wasm_source/Cargo.lock b/wasm/wasm_source/Cargo.lock index 269535735e..5cc30e1ee2 100644 --- a/wasm/wasm_source/Cargo.lock +++ b/wasm/wasm_source/Cargo.lock @@ -1551,6 +1551,7 @@ dependencies = [ "tendermint", "tendermint-proto", "thiserror", + "tiny-keccak", "tonic-build", "tracing", "wasmer", @@ -2766,6 +2767,15 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25eb0ca3468fc0acc11828786797f6ef9aa1555e4a211a60d64cc8e4d1be47d6" +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.5.1" diff --git a/wasm/wasm_source/src/tx_bond.rs b/wasm/wasm_source/src/tx_bond.rs index 9a5309f927..ea3946bb25 100644 --- a/wasm/wasm_source/src/tx_bond.rs +++ b/wasm/wasm_source/src/tx_bond.rs @@ -17,3 +17,353 @@ fn apply_tx(tx_data: Vec) { panic!() } } + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use namada::ledger::pos::PosParams; + use namada::proto::Tx; + use namada::types::storage::Epoch; + use namada_tests::log::test; + use namada_tests::native_vp::pos::init_pos; + use namada_tests::native_vp::TestNativeVpEnv; + use namada_tests::tx::*; + use namada_tx_prelude::address::testing::{ + arb_established_address, arb_non_internal_address, + }; + use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::key::testing::arb_common_keypair; + use namada_tx_prelude::key::RefTo; + use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; + use namada_tx_prelude::token; + use namada_vp_prelude::proof_of_stake::types::{ + Bond, VotingPower, VotingPowerDelta, + }; + use namada_vp_prelude::proof_of_stake::{ + staking_token_address, BondId, GenesisValidator, PosVP, + }; + use proptest::prelude::*; + + use super::*; + + proptest! { + /// In this test we setup the ledger and PoS system with an arbitrary + /// initial state with 1 genesis validator and arbitrary PoS parameters. We then + /// generate an arbitrary bond that we'd like to apply. + /// + /// After we apply the bond, we check that all the storage values + /// in PoS system have been updated as expected and then we also check + /// that this transaction is accepted by the PoS validity predicate. + #[test] + fn test_tx_bond( + (initial_stake, bond) in arb_initial_stake_and_bond(), + // A key to sign the transaction + key in arb_common_keypair(), + pos_params in arb_pos_params()) { + test_tx_bond_aux(initial_stake, bond, key, pos_params) + } + } + + fn test_tx_bond_aux( + initial_stake: token::Amount, + bond: transaction::pos::Bond, + key: key::common::SecretKey, + pos_params: PosParams, + ) { + let staking_reward_address = address::testing::established_address_1(); + let consensus_key = key::testing::keypair_1().ref_to(); + let staking_reward_key = key::testing::keypair_2().ref_to(); + let eth_hot_key = key::testing::keypair_3().ref_to(); + let eth_cold_key = key::testing::keypair_4().ref_to(); + + let genesis_validators = [GenesisValidator { + address: bond.validator.clone(), + staking_reward_address, + tokens: initial_stake, + consensus_key, + staking_reward_key, + eth_hot_key, + eth_cold_key, + }]; + + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + + tx_host_env::with(|tx_env| { + if let Some(source) = &bond.source { + tx_env.spawn_accounts([source]); + } + + // Ensure that the bond's source has enough tokens for the bond + let target = bond.source.as_ref().unwrap_or(&bond.validator); + tx_env.credit_tokens(target, &staking_token_address(), bond.amount); + }); + + let tx_code = vec![]; + let tx_data = bond.try_to_vec().unwrap(); + let tx = Tx::new(tx_code, Some(tx_data)); + let signed_tx = tx.sign(&key); + let tx_data = signed_tx.data.unwrap(); + + // Read the data before the tx is executed + let pos_balance_key = token::balance_key( + &staking_token_address(), + &Address::Internal(InternalAddress::PoS), + ); + let pos_balance_pre: token::Amount = + read(&pos_balance_key.to_string()).expect("PoS must have balance"); + assert_eq!(pos_balance_pre, initial_stake); + let total_voting_powers_pre = PoS.read_total_voting_power(); + let validator_sets_pre = PoS.read_validator_set(); + let validator_voting_powers_pre = + PoS.read_validator_voting_power(&bond.validator).unwrap(); + + apply_tx(tx_data); + + // Read the data after the tx is executed + + // The following storage keys should be updated: + + // - `#{PoS}/validator/#{validator}/total_deltas` + let total_delta_post = PoS.read_validator_total_deltas(&bond.validator); + for epoch in 0..pos_params.pipeline_len { + assert_eq!( + total_delta_post.as_ref().unwrap().get(epoch), + Some(initial_stake.into()), + "The total deltas before the pipeline offset must not change \ + - checking in epoch: {epoch}" + ); + } + for epoch in pos_params.pipeline_len..=pos_params.unbonding_len { + let expected_stake = + i128::from(initial_stake) + i128::from(bond.amount); + assert_eq!( + total_delta_post.as_ref().unwrap().get(epoch), + Some(expected_stake), + "The total deltas at and after the pipeline offset epoch must \ + be incremented by the bonded amount - checking in epoch: \ + {epoch}" + ); + } + + // - `#{staking_token}/balance/#{PoS}` + let pos_balance_post: token::Amount = + read(&pos_balance_key.to_string()).unwrap(); + assert_eq!(pos_balance_pre + bond.amount, pos_balance_post); + + // - `#{PoS}/bond/#{owner}/#{validator}` + let bond_src = bond + .source + .clone() + .unwrap_or_else(|| bond.validator.clone()); + let bond_id = BondId { + validator: bond.validator.clone(), + source: bond_src, + }; + let bonds_post = PoS.read_bond(&bond_id).unwrap(); + match &bond.source { + Some(_) => { + // This bond was a delegation + for epoch in 0..pos_params.pipeline_len { + let bond: Option> = + bonds_post.get(epoch); + assert!( + bond.is_none(), + "Delegation before pipeline offset should be empty - \ + checking epoch {epoch}" + ); + } + for epoch in pos_params.pipeline_len..=pos_params.unbonding_len + { + let start_epoch = + namada_tx_prelude::proof_of_stake::types::Epoch::from( + pos_params.pipeline_len, + ); + let expected_bond = + HashMap::from_iter([(start_epoch, bond.amount)]); + let bond: Bond = + bonds_post.get(epoch).unwrap(); + assert_eq!( + bond.pos_deltas, expected_bond, + "Delegation at and after pipeline offset should be \ + equal to the bonded amount - checking epoch {epoch}" + ); + } + } + None => { + let genesis_epoch = + namada_tx_prelude::proof_of_stake::types::Epoch::from(0); + // It was a self-bond + for epoch in 0..pos_params.pipeline_len { + let expected_bond = + HashMap::from_iter([(genesis_epoch, initial_stake)]); + let bond: Bond = + bonds_post.get(epoch).expect( + "Genesis validator should already have self-bond", + ); + assert_eq!( + bond.pos_deltas, expected_bond, + "Delegation before pipeline offset should be equal to \ + the genesis initial stake - checking epoch {epoch}" + ); + } + for epoch in pos_params.pipeline_len..=pos_params.unbonding_len + { + let start_epoch = + namada_tx_prelude::proof_of_stake::types::Epoch::from( + pos_params.pipeline_len, + ); + let expected_bond = HashMap::from_iter([ + (genesis_epoch, initial_stake), + (start_epoch, bond.amount), + ]); + let bond: Bond = + bonds_post.get(epoch).unwrap(); + assert_eq!( + bond.pos_deltas, expected_bond, + "Delegation at and after pipeline offset should \ + contain genesis stake and the bonded amount - \ + checking epoch {epoch}" + ); + } + } + } + + // If the voting power from validator's initial stake is different + // from the voting power after the bond is applied, we expect the + // following 3 fields to be updated: + // - `#{PoS}/total_voting_power` (optional) + // - `#{PoS}/validator_set` (optional) + // - `#{PoS}/validator/#{validator}/voting_power` (optional) + let total_voting_powers_post = PoS.read_total_voting_power(); + let validator_sets_post = PoS.read_validator_set(); + let validator_voting_powers_post = + PoS.read_validator_voting_power(&bond.validator).unwrap(); + + let voting_power_pre = + VotingPower::from_tokens(initial_stake, &pos_params); + let voting_power_post = + VotingPower::from_tokens(initial_stake + bond.amount, &pos_params); + if voting_power_pre == voting_power_post { + // None of the optional storage fields should have been updated + assert_eq!(total_voting_powers_pre, total_voting_powers_post); + assert_eq!(validator_sets_pre, validator_sets_post); + assert_eq!( + validator_voting_powers_pre, + validator_voting_powers_post + ); + } else { + for epoch in 0..pos_params.pipeline_len { + let total_voting_power_pre = total_voting_powers_pre.get(epoch); + let total_voting_power_post = + total_voting_powers_post.get(epoch); + assert_eq!( + total_voting_power_pre, total_voting_power_post, + "Total voting power before pipeline offset must not \ + change - checking epoch {epoch}" + ); + + let validator_set_pre = validator_sets_pre.get(epoch); + let validator_set_post = validator_sets_post.get(epoch); + assert_eq!( + validator_set_pre, validator_set_post, + "Validator set before pipeline offset must not change - \ + checking epoch {epoch}" + ); + + let validator_voting_power_pre = + validator_voting_powers_pre.get(epoch); + let validator_voting_power_post = + validator_voting_powers_post.get(epoch); + assert_eq!( + validator_voting_power_pre, validator_voting_power_post, + "Validator's voting power before pipeline offset must not \ + change - checking epoch {epoch}" + ); + } + for epoch in pos_params.pipeline_len..=pos_params.unbonding_len { + let total_voting_power_pre = + total_voting_powers_pre.get(epoch).unwrap(); + let total_voting_power_post = + total_voting_powers_post.get(epoch).unwrap(); + assert_ne!( + total_voting_power_pre, total_voting_power_post, + "Total voting power at and after pipeline offset must \ + have changed - checking epoch {epoch}" + ); + + let validator_set_pre = validator_sets_pre.get(epoch).unwrap(); + let validator_set_post = + validator_sets_post.get(epoch).unwrap(); + assert_ne!( + validator_set_pre, validator_set_post, + "Validator set at and after pipeline offset must have \ + changed - checking epoch {epoch}" + ); + + let validator_voting_power_pre = + validator_voting_powers_pre.get(epoch).unwrap(); + let validator_voting_power_post = + validator_voting_powers_post.get(epoch).unwrap(); + assert_ne!( + validator_voting_power_pre, validator_voting_power_post, + "Validator's voting power at and after pipeline offset \ + must have changed - checking epoch {epoch}" + ); + + // Expected voting power from the model ... + let expected_validator_voting_power: VotingPowerDelta = + voting_power_post.try_into().unwrap(); + // ... must match the voting power read from storage + assert_eq!( + validator_voting_power_post, + expected_validator_voting_power + ); + } + } + + // Use the tx_env to run PoS VP + let tx_env = tx_host_env::take(); + let vp_env = TestNativeVpEnv::new(tx_env); + let result = vp_env.validate_tx(PosVP::new, |_tx_data| {}); + let result = + result.expect("Validation of valid changes must not fail!"); + assert!( + result, + "PoS Validity predicate must accept this transaction" + ); + } + + prop_compose! { + /// Generates an initial validator stake and a bond, while making sure + /// that the `initial_stake + bond.amount <= u64::MAX` to avoid + /// overflow. + fn arb_initial_stake_and_bond() + // Generate initial stake + (initial_stake in token::testing::arb_amount()) + // Use the initial stake to limit the bond amount + (bond in arb_bond(u64::MAX - u64::from(initial_stake)), + // Use the generated initial stake too + initial_stake in Just(initial_stake), + ) -> (token::Amount, transaction::pos::Bond) { + (initial_stake, bond) + } + } + + fn arb_bond( + max_amount: u64, + ) -> impl Strategy { + ( + arb_established_address(), + prop::option::of(arb_non_internal_address()), + token::testing::arb_amount_ceiled(max_amount), + ) + .prop_map(|(validator, source, amount)| { + transaction::pos::Bond { + validator: Address::Established(validator), + amount, + source, + } + }) + } +} diff --git a/wasm/wasm_source/src/tx_unbond.rs b/wasm/wasm_source/src/tx_unbond.rs index 5d2662ed5c..72697c58c0 100644 --- a/wasm/wasm_source/src/tx_unbond.rs +++ b/wasm/wasm_source/src/tx_unbond.rs @@ -18,3 +18,404 @@ fn apply_tx(tx_data: Vec) { panic!() } } + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use namada::ledger::pos::PosParams; + use namada::proto::Tx; + use namada::types::storage::Epoch; + use namada_tests::log::test; + use namada_tests::native_vp::pos::init_pos; + use namada_tests::native_vp::TestNativeVpEnv; + use namada_tests::tx::*; + use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::key::testing::arb_common_keypair; + use namada_tx_prelude::key::RefTo; + use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; + use namada_tx_prelude::token; + use namada_vp_prelude::proof_of_stake::types::{ + Bond, Unbond, VotingPower, VotingPowerDelta, + }; + use namada_vp_prelude::proof_of_stake::{ + staking_token_address, BondId, GenesisValidator, PosVP, + }; + use proptest::prelude::*; + + use super::*; + + proptest! { + /// In this test we setup the ledger and PoS system with an arbitrary + /// initial state with 1 genesis validator, a delegation bond if the + /// unbond is for a delegation, arbitrary PoS parameters, and + /// we generate an arbitrary unbond that we'd like to apply. + /// + /// After we apply the unbond, we check that all the storage values + /// in PoS system have been updated as expected and then we also check + /// that this transaction is accepted by the PoS validity predicate. + #[test] + fn test_tx_unbond( + (initial_stake, unbond) in arb_initial_stake_and_unbond(), + // A key to sign the transaction + key in arb_common_keypair(), + pos_params in arb_pos_params()) { + test_tx_unbond_aux(initial_stake, unbond, key, pos_params) + } + } + + fn test_tx_unbond_aux( + initial_stake: token::Amount, + unbond: transaction::pos::Unbond, + key: key::common::SecretKey, + pos_params: PosParams, + ) { + let is_delegation = matches!( + &unbond.source, Some(source) if *source != unbond.validator); + let staking_reward_address = address::testing::established_address_1(); + let consensus_key = key::testing::keypair_1().ref_to(); + let staking_reward_key = key::testing::keypair_2().ref_to(); + let eth_hot_key = key::testing::keypair_3().ref_to(); + let eth_cold_key = key::testing::keypair_4().ref_to(); + + let genesis_validators = [GenesisValidator { + address: unbond.validator.clone(), + staking_reward_address, + tokens: if is_delegation { + // If we're unbonding a delegation, we'll give the initial stake + // to the delegation instead of the validator + token::Amount::default() + } else { + initial_stake + }, + consensus_key, + staking_reward_key, + eth_hot_key, + eth_cold_key, + }]; + + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + + tx_host_env::with(|tx_env| { + if is_delegation { + let source = unbond.source.as_ref().unwrap(); + tx_env.spawn_accounts([source]); + + // To allow to unbond delegation, there must be a delegation + // bond first. + // First, credit the bond's source with the initial stake, + // before we initialize the bond below + tx_env.credit_tokens( + source, + &staking_token_address(), + initial_stake, + ); + } + }); + + if is_delegation { + // Initialize the delegation - unlike genesis validator's self-bond, + // this happens at pipeline offset + namada_tx_prelude::proof_of_stake::bond_tokens( + unbond.source.as_ref(), + &unbond.validator, + initial_stake, + ) + .unwrap(); + } + tx_host_env::commit_tx_and_block(); + + let tx_code = vec![]; + let tx_data = unbond.try_to_vec().unwrap(); + let tx = Tx::new(tx_code, Some(tx_data)); + let signed_tx = tx.sign(&key); + let tx_data = signed_tx.data.unwrap(); + + let unbond_src = unbond + .source + .clone() + .unwrap_or_else(|| unbond.validator.clone()); + let unbond_id = BondId { + validator: unbond.validator.clone(), + source: unbond_src, + }; + + let pos_balance_key = token::balance_key( + &staking_token_address(), + &Address::Internal(InternalAddress::PoS), + ); + let pos_balance_pre: token::Amount = + read(&pos_balance_key.to_string()).expect("PoS must have balance"); + assert_eq!(pos_balance_pre, initial_stake); + let total_voting_powers_pre = PoS.read_total_voting_power(); + let validator_sets_pre = PoS.read_validator_set(); + let validator_voting_powers_pre = + PoS.read_validator_voting_power(&unbond.validator).unwrap(); + let bonds_pre = PoS.read_bond(&unbond_id).unwrap(); + dbg!(&bonds_pre); + + apply_tx(tx_data); + + // Read the data after the tx is executed + + // The following storage keys should be updated: + + // - `#{PoS}/validator/#{validator}/total_deltas` + let total_delta_post = + PoS.read_validator_total_deltas(&unbond.validator); + + let expected_deltas_at_pipeline = if is_delegation { + // When this is a delegation, there will be no bond until pipeline + 0.into() + } else { + // Before pipeline offset, there can only be self-bond + initial_stake + }; + + // Before pipeline offset, there can only be self-bond for genesis + // validator. In case of a delegation the state is setup so that there + // is no bond until pipeline offset. + for epoch in 0..pos_params.pipeline_len { + assert_eq!( + total_delta_post.as_ref().unwrap().get(epoch), + Some(expected_deltas_at_pipeline.into()), + "The total deltas before the pipeline offset must not change \ + - checking in epoch: {epoch}" + ); + } + + // At and after pipeline offset, there can be either delegation or + // self-bond, both of which are initialized to the same `initial_stake` + for epoch in pos_params.pipeline_len..pos_params.unbonding_len { + assert_eq!( + total_delta_post.as_ref().unwrap().get(epoch), + Some(initial_stake.into()), + "The total deltas before the unbonding offset must not change \ + - checking in epoch: {epoch}" + ); + } + + { + let epoch = pos_params.unbonding_len + 1; + let expected_stake = + i128::from(initial_stake) - i128::from(unbond.amount); + assert_eq!( + total_delta_post.as_ref().unwrap().get(epoch), + Some(expected_stake), + "The total deltas after the unbonding offset epoch must be \ + decremented by the unbonded amount - checking in epoch: \ + {epoch}" + ); + } + + // - `#{staking_token}/balance/#{PoS}` + let pos_balance_post: token::Amount = + read(&pos_balance_key.to_string()).unwrap(); + assert_eq!( + pos_balance_pre, pos_balance_post, + "Unbonding doesn't affect PoS system balance" + ); + + // - `#{PoS}/unbond/#{owner}/#{validator}` + let unbonds_post = PoS.read_unbond(&unbond_id).unwrap(); + let bonds_post = PoS.read_bond(&unbond_id).unwrap(); + for epoch in 0..pos_params.unbonding_len { + let unbond: Option> = unbonds_post.get(epoch); + + assert!( + unbond.is_none(), + "There should be no unbond until unbonding offset - checking \ + epoch {epoch}" + ); + } + let start_epoch = match &unbond.source { + Some(_) => { + // This bond was a delegation + namada_tx_prelude::proof_of_stake::types::Epoch::from( + pos_params.pipeline_len, + ) + } + None => { + // This bond was a genesis validator self-bond + namada_tx_prelude::proof_of_stake::types::Epoch::default() + } + }; + let end_epoch = namada_tx_prelude::proof_of_stake::types::Epoch::from( + pos_params.unbonding_len - 1, + ); + + let expected_unbond = + HashMap::from_iter([((start_epoch, end_epoch), unbond.amount)]); + let actual_unbond: Unbond = + unbonds_post.get(pos_params.unbonding_len).unwrap(); + assert_eq!( + actual_unbond.deltas, expected_unbond, + "Delegation at unbonding offset should be equal to the unbonded \ + amount" + ); + + for epoch in pos_params.pipeline_len..pos_params.unbonding_len { + let bond: Bond = bonds_post.get(epoch).unwrap(); + let expected_bond = + HashMap::from_iter([(start_epoch, initial_stake)]); + assert_eq!( + bond.pos_deltas, expected_bond, + "Before unbonding offset, the bond should be untouched, \ + checking epoch {epoch}" + ); + } + { + let epoch = pos_params.unbonding_len + 1; + let bond: Bond = bonds_post.get(epoch).unwrap(); + let expected_bond = + HashMap::from_iter([(start_epoch, initial_stake)]); + assert_eq!( + bond.pos_deltas, expected_bond, + "At unbonding offset, the pos deltas should not change, \ + checking epoch {epoch}" + ); + assert_eq!( + bond.neg_deltas, unbond.amount, + "At unbonding offset, the unbonded amount should have been \ + deducted, checking epoch {epoch}" + ) + } + // If the voting power from validator's initial stake is different + // from the voting power after the bond is applied, we expect the + // following 3 fields to be updated: + // - `#{PoS}/total_voting_power` (optional) + // - `#{PoS}/validator_set` (optional) + // - `#{PoS}/validator/#{validator}/voting_power` (optional) + let total_voting_powers_post = PoS.read_total_voting_power(); + let validator_sets_post = PoS.read_validator_set(); + let validator_voting_powers_post = + PoS.read_validator_voting_power(&unbond.validator).unwrap(); + + let voting_power_pre = + VotingPower::from_tokens(initial_stake, &pos_params); + let voting_power_post = VotingPower::from_tokens( + initial_stake - unbond.amount, + &pos_params, + ); + if voting_power_pre == voting_power_post { + // None of the optional storage fields should have been updated + assert_eq!(total_voting_powers_pre, total_voting_powers_post); + assert_eq!(validator_sets_pre, validator_sets_post); + assert_eq!( + validator_voting_powers_pre, + validator_voting_powers_post + ); + } else { + for epoch in 0..pos_params.unbonding_len { + let total_voting_power_pre = total_voting_powers_pre.get(epoch); + let total_voting_power_post = + total_voting_powers_post.get(epoch); + assert_eq!( + total_voting_power_pre, total_voting_power_post, + "Total voting power before pipeline offset must not \ + change - checking epoch {epoch}" + ); + + let validator_set_pre = validator_sets_pre.get(epoch); + let validator_set_post = validator_sets_post.get(epoch); + assert_eq!( + validator_set_pre, validator_set_post, + "Validator set before pipeline offset must not change - \ + checking epoch {epoch}" + ); + + let validator_voting_power_pre = + validator_voting_powers_pre.get(epoch); + let validator_voting_power_post = + validator_voting_powers_post.get(epoch); + assert_eq!( + validator_voting_power_pre, validator_voting_power_post, + "Validator's voting power before pipeline offset must not \ + change - checking epoch {epoch}" + ); + } + { + let epoch = pos_params.unbonding_len; + let total_voting_power_pre = + total_voting_powers_pre.get(epoch).unwrap(); + let total_voting_power_post = + total_voting_powers_post.get(epoch).unwrap(); + assert_ne!( + total_voting_power_pre, total_voting_power_post, + "Total voting power at and after pipeline offset must \ + have changed - checking epoch {epoch}" + ); + + let validator_set_pre = validator_sets_pre.get(epoch).unwrap(); + let validator_set_post = + validator_sets_post.get(epoch).unwrap(); + assert_ne!( + validator_set_pre, validator_set_post, + "Validator set at and after pipeline offset must have \ + changed - checking epoch {epoch}" + ); + + let validator_voting_power_pre = + validator_voting_powers_pre.get(epoch).unwrap(); + let validator_voting_power_post = + validator_voting_powers_post.get(epoch).unwrap(); + assert_ne!( + validator_voting_power_pre, validator_voting_power_post, + "Validator's voting power at and after pipeline offset \ + must have changed - checking epoch {epoch}" + ); + + // Expected voting power from the model ... + let expected_validator_voting_power: VotingPowerDelta = + voting_power_post.try_into().unwrap(); + // ... must match the voting power read from storage + assert_eq!( + validator_voting_power_post, + expected_validator_voting_power + ); + } + } + + // Use the tx_env to run PoS VP + let tx_env = tx_host_env::take(); + let vp_env = TestNativeVpEnv::new(tx_env); + let result = vp_env.validate_tx(PosVP::new, |_tx_data| {}); + let result = + result.expect("Validation of valid changes must not fail!"); + assert!( + result, + "PoS Validity predicate must accept this transaction" + ); + } + + fn arb_initial_stake_and_unbond() + -> impl Strategy { + // Generate initial stake + token::testing::arb_amount().prop_flat_map(|initial_stake| { + // Use the initial stake to limit the bond amount + let unbond = arb_unbond(u64::from(initial_stake)); + // Use the generated initial stake too too + (Just(initial_stake), unbond) + }) + } + + /// Generates an initial validator stake and a unbond, while making sure + /// that the `initial_stake >= unbond.amount`. + fn arb_unbond( + max_amount: u64, + ) -> impl Strategy { + ( + address::testing::arb_established_address(), + prop::option::of(address::testing::arb_non_internal_address()), + token::testing::arb_amount_ceiled(max_amount), + ) + .prop_map(|(validator, source, amount)| { + let validator = Address::Established(validator); + transaction::pos::Unbond { + validator, + amount, + source, + } + }) + } +} diff --git a/wasm/wasm_source/src/tx_withdraw.rs b/wasm/wasm_source/src/tx_withdraw.rs index 27bd984a66..fae57b94f8 100644 --- a/wasm/wasm_source/src/tx_withdraw.rs +++ b/wasm/wasm_source/src/tx_withdraw.rs @@ -21,3 +21,211 @@ fn apply_tx(tx_data: Vec) { } } } + +#[cfg(test)] +mod tests { + use namada::ledger::pos::PosParams; + use namada::proto::Tx; + use namada::types::storage::Epoch; + use namada_tests::log::test; + use namada_tests::native_vp::pos::init_pos; + use namada_tests::native_vp::TestNativeVpEnv; + use namada_tests::tx::*; + use namada_tx_prelude::address::testing::{ + arb_established_address, arb_non_internal_address, + }; + use namada_tx_prelude::address::InternalAddress; + use namada_tx_prelude::key::testing::arb_common_keypair; + use namada_tx_prelude::key::RefTo; + use namada_tx_prelude::proof_of_stake::parameters::testing::arb_pos_params; + use namada_vp_prelude::proof_of_stake::{ + staking_token_address, BondId, GenesisValidator, PosVP, + }; + use proptest::prelude::*; + + use super::*; + + proptest! { + /// In this test we setup the ledger and PoS system with an arbitrary + /// initial state with 1 genesis validator, a delegation bond if the + /// withdrawal is for a delegation, arbitrary PoS parameters and + /// a we generate an arbitrary withdrawal that we'd like to apply. + /// + /// After we apply the withdrawal, we're checking that all the storage + /// values in PoS system have been updated as expected and then we also + /// check that this transaction is accepted by the PoS validity + /// predicate. + #[test] + fn test_tx_withdraw( + (initial_stake, unbonded_amount) in arb_initial_stake_and_unbonded_amount(), + withdraw in arb_withdraw(), + // A key to sign the transaction + key in arb_common_keypair(), + pos_params in arb_pos_params()) { + test_tx_withdraw_aux(initial_stake, unbonded_amount, withdraw, key, + pos_params) + } + } + + fn test_tx_withdraw_aux( + initial_stake: token::Amount, + unbonded_amount: token::Amount, + withdraw: transaction::pos::Withdraw, + key: key::common::SecretKey, + pos_params: PosParams, + ) { + let is_delegation = matches!( + &withdraw.source, Some(source) if *source != withdraw.validator); + let staking_reward_address = address::testing::established_address_1(); + let consensus_key = key::testing::keypair_1().ref_to(); + let staking_reward_key = key::testing::keypair_2().ref_to(); + let eth_hot_key = key::testing::keypair_3().ref_to(); + let eth_cold_key = key::testing::keypair_4().ref_to(); + + let genesis_validators = [GenesisValidator { + address: withdraw.validator.clone(), + staking_reward_address, + tokens: if is_delegation { + // If we're withdrawing a delegation, we'll give the initial + // stake to the delegation instead of the + // validator + token::Amount::default() + } else { + initial_stake + }, + consensus_key, + staking_reward_key, + eth_hot_key, + eth_cold_key, + }]; + + init_pos(&genesis_validators[..], &pos_params, Epoch(0)); + + tx_host_env::with(|tx_env| { + if is_delegation { + let source = withdraw.source.as_ref().unwrap(); + tx_env.spawn_accounts([source]); + + // To allow to unbond delegation, there must be a delegation + // bond first. + // First, credit the bond's source with the initial stake, + // before we initialize the bond below + tx_env.credit_tokens( + source, + &staking_token_address(), + initial_stake, + ); + } + }); + + if is_delegation { + // Initialize the delegation - unlike genesis validator's self-bond, + // this happens at pipeline offset + namada_tx_prelude::proof_of_stake::bond_tokens( + withdraw.source.as_ref(), + &withdraw.validator, + initial_stake, + ) + .unwrap(); + } + + // Unbond the `unbonded_amount` at the starting epoch 0 + namada_tx_prelude::proof_of_stake::unbond_tokens( + withdraw.source.as_ref(), + &withdraw.validator, + unbonded_amount, + ) + .unwrap(); + + tx_host_env::commit_tx_and_block(); + + // Fast forward to unbonding offset epoch so that it's possible to + // withdraw the unbonded tokens + tx_host_env::with(|env| { + for _ in 0..pos_params.unbonding_len { + env.storage.block.epoch = env.storage.block.epoch.next(); + } + }); + assert_eq!( + tx_host_env::with(|env| env.storage.block.epoch), + Epoch(pos_params.unbonding_len) + ); + + let tx_code = vec![]; + let tx_data = withdraw.try_to_vec().unwrap(); + let tx = Tx::new(tx_code, Some(tx_data)); + let signed_tx = tx.sign(&key); + let tx_data = signed_tx.data.unwrap(); + + // Read data before we apply tx: + let pos_balance_key = token::balance_key( + &staking_token_address(), + &Address::Internal(InternalAddress::PoS), + ); + let pos_balance_pre: token::Amount = + read(&pos_balance_key.to_string()).expect("PoS must have balance"); + assert_eq!(pos_balance_pre, initial_stake); + let unbond_src = withdraw + .source + .clone() + .unwrap_or_else(|| withdraw.validator.clone()); + let unbond_id = BondId { + validator: withdraw.validator, + source: unbond_src, + }; + let unbonds_pre = PoS.read_unbond(&unbond_id).unwrap(); + assert_eq!( + unbonds_pre.get(pos_params.unbonding_len).unwrap().sum(), + unbonded_amount + ); + + apply_tx(tx_data); + + // Read the data after the tx is executed + let unbonds_post = PoS.read_unbond(&unbond_id); + assert!( + unbonds_post.is_none(), + "Because we're withdraw the full unbonded amount, there should be \ + no unbonds left" + ); + let pos_balance_post: token::Amount = + read(&pos_balance_key.to_string()).expect("PoS must have balance"); + assert_eq!(pos_balance_pre - pos_balance_post, unbonded_amount); + + // Use the tx_env to run PoS VP + let tx_env = tx_host_env::take(); + let vp_env = TestNativeVpEnv::new(tx_env); + let result = vp_env.validate_tx(PosVP::new, |_tx_data| {}); + let result = + result.expect("Validation of valid changes must not fail!"); + assert!( + result, + "PoS Validity predicate must accept this transaction" + ); + } + + fn arb_initial_stake_and_unbonded_amount() + -> impl Strategy { + // Generate initial stake + token::testing::arb_amount().prop_flat_map(|initial_stake| { + // Use the initial stake to limit the unbonded amount from the stake + let unbonded_amount = + token::testing::arb_amount_ceiled(initial_stake.into()); + // Use the generated initial stake too too + (Just(initial_stake), unbonded_amount) + }) + } + + fn arb_withdraw() -> impl Strategy { + ( + arb_established_address(), + prop::option::of(arb_non_internal_address()), + ) + .prop_map(|(validator, source)| { + transaction::pos::Withdraw { + validator: Address::Established(validator), + source, + } + }) + } +} diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 9df5195b2f..4750eb74cc 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -1357,7 +1357,7 @@ dependencies = [ [[package]] name = "libsecp256k1" version = "0.7.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "arrayref", "base64", @@ -1373,7 +1373,7 @@ dependencies = [ [[package]] name = "libsecp256k1-core" version = "0.3.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "crunchy", "digest 0.9.0", @@ -1383,7 +1383,7 @@ dependencies = [ [[package]] name = "libsecp256k1-gen-ecmult" version = "0.3.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "libsecp256k1-core", ] @@ -1391,7 +1391,7 @@ dependencies = [ [[package]] name = "libsecp256k1-gen-genmult" version = "0.3.0" -source = "git+https://github.com/brentstone/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" +source = "git+https://github.com/heliaxdev/libsecp256k1?rev=bbb3bd44a49db361f21d9db80f9a087c194c0ae9#bbb3bd44a49db361f21d9db80f9a087c194c0ae9" dependencies = [ "libsecp256k1-core", ] @@ -1527,7 +1527,7 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.7.0" +version = "0.7.1" dependencies = [ "ark-bls12-381", "ark-serialize", @@ -1562,6 +1562,7 @@ dependencies = [ "tendermint", "tendermint-proto", "thiserror", + "tiny-keccak", "tonic-build", "tracing", "wasmer", @@ -1576,7 +1577,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.7.0" +version = "0.7.1" dependencies = [ "quote", "syn", @@ -1584,7 +1585,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.7.0" +version = "0.7.1" dependencies = [ "borsh", "proptest", @@ -1593,7 +1594,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.7.0" +version = "0.7.1" dependencies = [ "chrono", "concat-idents", @@ -1611,7 +1612,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.7.0" +version = "0.7.1" dependencies = [ "namada_vm_env", "sha2 0.10.2", @@ -1619,7 +1620,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.7.0" +version = "0.7.1" dependencies = [ "borsh", "hex", @@ -1629,7 +1630,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.7.0" +version = "0.7.1" dependencies = [ "namada_vm_env", "sha2 0.10.2", @@ -1637,7 +1638,7 @@ dependencies = [ [[package]] name = "namada_wasm_for_tests" -version = "0.7.0" +version = "0.7.1" dependencies = [ "borsh", "getrandom", @@ -2772,6 +2773,15 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.5.1"