diff --git a/apps/src/lib/config/genesis/chain.rs b/apps/src/lib/config/genesis/chain.rs index 0f7b9cdd7e..08ec89f4e1 100644 --- a/apps/src/lib/config/genesis/chain.rs +++ b/apps/src/lib/config/genesis/chain.rs @@ -334,6 +334,8 @@ impl Finalized { light_client_attack_min_slash_rate, cubic_slashing_window_length, validator_stake_threshold, + liveness_window_check, + liveness_threshold, } = self.parameters.pos_params.clone(); namada::proof_of_stake::parameters::PosParams { @@ -350,6 +352,8 @@ impl Finalized { light_client_attack_min_slash_rate, cubic_slashing_window_length, validator_stake_threshold, + liveness_window_check, + liveness_threshold, }, max_proposal_period: self.parameters.gov_params.max_proposal_period, } diff --git a/apps/src/lib/config/genesis/templates.rs b/apps/src/lib/config/genesis/templates.rs index d4012163e8..fbc810d79a 100644 --- a/apps/src/lib/config/genesis/templates.rs +++ b/apps/src/lib/config/genesis/templates.rs @@ -400,6 +400,12 @@ pub struct PosParams { /// The minimum amount of bonded tokens that a validator needs to be in /// either the `consensus` or `below_capacity` validator sets pub validator_stake_threshold: token::Amount, + /// The length, in blocks, of the sliding window for consensus validators + /// inactivity verification + pub liveness_window_check: u64, + /// The minimum required activity of consensus validators, in percentage, + /// over the `liveness_window_check` + pub liveness_threshold: Dec, } #[derive( diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 35c23b4ebe..22d1e80e6e 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -88,6 +88,9 @@ where .expect("Failed tx hashes finalization") } + let pos_params = + namada_proof_of_stake::read_pos_params(&self.wl_storage)?; + if new_epoch { namada::ledger::storage::update_allowed_conversions( &mut self.wl_storage, @@ -97,24 +100,35 @@ where // Copy the new_epoch + pipeline_len - 1 validator set into // new_epoch + pipeline_len - let pos_params = - namada_proof_of_stake::read_pos_params(&self.wl_storage)?; namada_proof_of_stake::copy_validator_sets_and_positions( &mut self.wl_storage, &pos_params, current_epoch, current_epoch + pos_params.pipeline_len, )?; + + // Compute the total stake of the consensus validator set and record + // it in storage namada_proof_of_stake::store_total_consensus_stake( &mut self.wl_storage, current_epoch, )?; } + // Get the actual votes from cometBFT in the preferred format + let votes = pos_votes_from_abci(&self.wl_storage, &req.votes); + // Invariant: Has to be applied before `record_slashes_from_evidence` // because it potentially needs to be able to read validator state from // previous epoch and jailing validator removes the historical state - self.log_block_rewards(&req.votes, height, current_epoch, new_epoch)?; + if !votes.is_empty() { + self.log_block_rewards( + votes.clone(), + height, + current_epoch, + new_epoch, + )?; + } // Invariant: This has to be applied after // `copy_validator_sets_and_positions` and before `self.update_epoch`. @@ -128,6 +142,48 @@ where self.apply_inflation(current_epoch)?; } + // Consensus set liveness check + if !votes.is_empty() { + let vote_height = height.prev_height(); + let epoch_of_votes = self + .wl_storage + .storage + .block + .pred_epochs + .get_epoch(vote_height) + .expect( + "Should always find an epoch when looking up the vote \ + height before recording liveness data.", + ); + namada_proof_of_stake::record_liveness_data( + &mut self.wl_storage, + &votes, + epoch_of_votes, + vote_height, + &pos_params, + )?; + } + + let validator_set_update_epoch = + self.get_validator_set_update_epoch(current_epoch); + + // Jail validators for inactivity + namada_proof_of_stake::jail_for_liveness( + &mut self.wl_storage, + &pos_params, + current_epoch, + validator_set_update_epoch, + )?; + + if new_epoch { + // Prune liveness data from validators that are no longer in the + // consensus set + namada_proof_of_stake::prune_liveness_data( + &mut self.wl_storage, + current_epoch, + )?; + } + let mut stats = InternalStats::default(); let native_block_proposer_address = { @@ -762,7 +818,7 @@ where // Process the proposer and votes in the block to assign their PoS rewards. fn log_block_rewards( &mut self, - votes: &[VoteInfo], + votes: Vec, height: BlockHeight, current_epoch: Epoch, new_epoch: bool, @@ -774,7 +830,6 @@ where tracing::debug!( "Found last block proposer: {proposer_address}" ); - let votes = pos_votes_from_abci(&self.wl_storage, votes); namada_proof_of_stake::log_block_rewards( &mut self.wl_storage, if new_epoch { @@ -938,6 +993,10 @@ mod test_finalize_block { use namada::types::uint::Uint; use namada::types::vote_extensions::ethereum_events; use namada_sdk::eth_bridge::MinimumConfirmations; + use namada_sdk::proof_of_stake::{ + liveness_missed_votes_handle, liveness_sum_missed_votes_handle, + read_consensus_validator_set_addresses, + }; use namada_test_utils::tx_data::TxWriteData; use namada_test_utils::TestWasms; use test_log::test; @@ -4506,6 +4565,317 @@ mod test_finalize_block { Ok(()) } + #[test] + fn test_jail_validator_for_inactivity() -> storage_api::Result<()> { + let num_validators = 5_u64; + let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { + last_height: 0, + num_validators, + ..Default::default() + }); + let params = read_pos_params(&shell.wl_storage).unwrap(); + + let initial_consensus_set: Vec
= + read_consensus_validator_set_addresses( + &shell.wl_storage, + Epoch::default(), + ) + .unwrap() + .into_iter() + .collect(); + let val1 = initial_consensus_set[0].clone(); + let pkh1 = get_pkh_from_address( + &shell.wl_storage, + ¶ms, + val1.clone(), + Epoch::default(), + ); + let val2 = initial_consensus_set[1].clone(); + let pkh2 = get_pkh_from_address( + &shell.wl_storage, + ¶ms, + val2.clone(), + Epoch::default(), + ); + + let validator_stake = namada_proof_of_stake::read_validator_stake( + &shell.wl_storage, + ¶ms, + &val2, + Epoch::default(), + ) + .unwrap(); + + let val3 = initial_consensus_set[2].clone(); + let val4 = initial_consensus_set[3].clone(); + let val5 = initial_consensus_set[4].clone(); + + // Finalize block 1 + next_block_for_inflation(&mut shell, pkh1.to_vec(), vec![], None); + + // Ensure that there is no liveness data yet since there were no votes + let missed_votes = liveness_missed_votes_handle(); + let sum_missed_votes = liveness_sum_missed_votes_handle(); + assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(sum_missed_votes.is_empty(&shell.wl_storage)?); + + let minimum_unsigned_blocks = ((Dec::one() + - params.liveness_threshold) + * params.liveness_window_check) + .to_uint() + .unwrap() + .as_u64(); + + // Finalize block 2 and ensure that some data has been written + let default_all_votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + next_block_for_inflation( + &mut shell, + pkh1.to_vec(), + default_all_votes, + None, + ); + assert!(missed_votes.is_empty(&shell.wl_storage)?); + for val in &initial_consensus_set { + let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + assert_eq!(sum, Some(0u64)); + } + + // Completely unbond one of the validator to test the pruning at the + // pipeline epoch + let mut current_epoch = shell.wl_storage.storage.block.epoch; + namada_proof_of_stake::unbond_tokens( + &mut shell.wl_storage, + None, + &val5, + validator_stake, + current_epoch, + false, + )?; + let pipeline_vals = read_consensus_validator_set_addresses( + &shell.wl_storage, + current_epoch + params.pipeline_len, + )?; + assert_eq!(pipeline_vals.len(), initial_consensus_set.len() - 1); + let val5_pipeline_state = validator_state_handle(&val5) + .get( + &shell.wl_storage, + current_epoch + params.pipeline_len, + ¶ms, + )? + .unwrap(); + assert_eq!(val5_pipeline_state, ValidatorState::BelowThreshold); + + // Advance to the next epoch with no votes from validator 2 + // NOTE: assume the minimum blocks for jailing is larger than remaining + // blocks to next epoch! + let mut votes_no2 = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + votes_no2.retain(|vote| vote.validator.address != pkh2); + + let first_height_without_vote = 2; + let mut val2_num_missed_blocks = 0u64; + while current_epoch == Epoch::default() { + next_block_for_inflation( + &mut shell, + pkh1.to_vec(), + votes_no2.clone(), + None, + ); + current_epoch = shell.wl_storage.storage.block.epoch; + val2_num_missed_blocks += 1; + } + + // Checks upon the new epoch + for val in &initial_consensus_set { + let missed_votes = liveness_missed_votes_handle().at(val); + let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + + if val == &val2 { + assert_eq!(sum, Some(val2_num_missed_blocks)); + for height in first_height_without_vote + ..first_height_without_vote + val2_num_missed_blocks + { + assert!(missed_votes.contains(&shell.wl_storage, &height)?); + assert!(sum.unwrap() < minimum_unsigned_blocks); + } + } else { + assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert_eq!(sum, Some(0u64)); + } + } + + // Advance blocks up to just before the next epoch + loop { + next_block_for_inflation( + &mut shell, + pkh1.to_vec(), + votes_no2.clone(), + None, + ); + if shell.wl_storage.storage.update_epoch_blocks_delay == Some(1) { + break; + } + } + assert_eq!(shell.wl_storage.storage.block.epoch, current_epoch); + let pipeline_vals = read_consensus_validator_set_addresses( + &shell.wl_storage, + current_epoch + params.pipeline_len, + )?; + assert_eq!(pipeline_vals.len(), initial_consensus_set.len() - 1); + let val2_sum_missed_votes = + liveness_sum_missed_votes_handle().get(&shell.wl_storage, &val2)?; + assert_eq!( + val2_sum_missed_votes, + Some(shell.wl_storage.storage.block.height.0 - 2) + ); + for val in &initial_consensus_set { + if val == &val2 { + continue; + } + let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + assert_eq!(sum, Some(0u64)); + } + + // Now advance one more block to the next epoch, where validator 2 will + // miss its 10th vote and should thus be jailed for liveness + next_block_for_inflation( + &mut shell, + pkh1.to_vec(), + votes_no2.clone(), + None, + ); + current_epoch = shell.wl_storage.storage.block.epoch; + assert_eq!(current_epoch, Epoch(2)); + + let val2_sum_missed_votes = + liveness_sum_missed_votes_handle().get(&shell.wl_storage, &val2)?; + assert_eq!(val2_sum_missed_votes, Some(minimum_unsigned_blocks)); + + // Check the validator sets for all epochs up through the pipeline + let consensus_vals = read_consensus_validator_set_addresses( + &shell.wl_storage, + current_epoch, + )?; + assert_eq!( + consensus_vals, + HashSet::from_iter([ + val1.clone(), + val2.clone(), + val3.clone(), + val4.clone() + ]) + ); + for offset in 1..=params.pipeline_len { + let consensus_vals = read_consensus_validator_set_addresses( + &shell.wl_storage, + current_epoch + offset, + )?; + assert_eq!( + consensus_vals, + HashSet::from_iter([val1.clone(), val3.clone(), val4.clone()]) + ); + let val2_state = validator_state_handle(&val2) + .get(&shell.wl_storage, current_epoch + offset, ¶ms)? + .unwrap(); + assert_eq!(val2_state, ValidatorState::Jailed); + let val5_state = validator_state_handle(&val5) + .get(&shell.wl_storage, current_epoch + offset, ¶ms)? + .unwrap(); + assert_eq!(val5_state, ValidatorState::BelowThreshold); + } + + // Check the liveness data for validators 2 and 5 (2 should still be + // there, 5 should be removed) + for val in &initial_consensus_set { + let missed_votes = liveness_missed_votes_handle().at(val); + let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + + if val == &val2 { + assert_eq!( + sum, + Some(shell.wl_storage.storage.block.height.0 - 2) + ); + for height in first_height_without_vote + ..shell.wl_storage.storage.block.height.0 + { + assert!(missed_votes.contains(&shell.wl_storage, &height)?); + } + } else if val == &val5 { + assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert!(sum.is_none()); + } else { + assert!(missed_votes.is_empty(&shell.wl_storage)?); + assert_eq!(sum, Some(0u64)); + } + } + + // Advance to the next epoch to ensure that the val2 data is removed + // from the liveness data + let next_epoch = current_epoch.next(); + loop { + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; + if current_epoch == next_epoch { + break; + } + } + + // Check that the liveness data only contains data for vals 1, 3, and 4 + for val in &initial_consensus_set { + let missed_votes = liveness_missed_votes_handle().at(val); + let sum = sum_missed_votes.get(&shell.wl_storage, val)?; + + assert!(missed_votes.is_empty(&shell.wl_storage)?); + if val == &val2 || val == &val5 { + assert!(sum.is_none()); + } else { + assert_eq!(sum, Some(0u64)); + } + } + + // Validator 2 unjail itself + namada_proof_of_stake::unjail_validator( + &mut shell.wl_storage, + &val2, + current_epoch, + )?; + let pipeline_epoch = current_epoch + params.pipeline_len; + let val2_pipeline_state = validator_state_handle(&val2).get( + &shell.wl_storage, + pipeline_epoch, + ¶ms, + )?; + assert_eq!(val2_pipeline_state, Some(ValidatorState::Consensus)); + + // Advance to the pipeline epoch + loop { + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None).0; + if current_epoch == pipeline_epoch { + break; + } + } + let sum_liveness = liveness_sum_missed_votes_handle(); + assert_eq!(sum_liveness.get(&shell.wl_storage, &val1)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.wl_storage, &val2)?, None); + assert_eq!(sum_liveness.get(&shell.wl_storage, &val3)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.wl_storage, &val4)?, Some(0u64)); + assert_eq!(sum_liveness.get(&shell.wl_storage, &val5)?, None); + + Ok(()) + } + fn get_default_true_votes(storage: &S, epoch: Epoch) -> Vec where S: StorageRead, diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index 4b22542ced..bc99b0d628 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -750,23 +750,8 @@ where } }; // Check if we're gonna switch to a new epoch after a delay - let validator_set_update_epoch = if let Some(delay) = - self.wl_storage.storage.update_epoch_blocks_delay - { - if delay == EPOCH_SWITCH_BLOCKS_DELAY { - // If we're about to update validator sets for the - // upcoming epoch, we can still remove the validator - current_epoch.next() - } else { - // If we're waiting to switch to a new epoch, it's too - // late to update validator sets - // on the next epoch, so we need to - // wait for the one after. - current_epoch.next().next() - } - } else { - current_epoch.next() - }; + let validator_set_update_epoch = + self.get_validator_set_update_epoch(current_epoch); tracing::info!( "Slashing {} for {} in epoch {}, block height {} (current \ epoch = {}, validator set update epoch = \ @@ -793,6 +778,28 @@ where } } + /// Get the next epoch for which we can request validator set changed + pub fn get_validator_set_update_epoch( + &self, + current_epoch: namada_sdk::core::types::storage::Epoch, + ) -> namada_sdk::core::types::storage::Epoch { + if let Some(delay) = self.wl_storage.storage.update_epoch_blocks_delay { + if delay == EPOCH_SWITCH_BLOCKS_DELAY { + // If we're about to update validator sets for the + // upcoming epoch, we can still remove the validator + current_epoch.next() + } else { + // If we're waiting to switch to a new epoch, it's too + // late to update validator sets + // on the next epoch, so we need to + // wait for the one after. + current_epoch.next().next() + } + } else { + current_epoch.next() + } + } + /// Process and apply slashes that have already been recorded for the /// current epoch fn process_slashes(&mut self) { diff --git a/core/src/types/storage.rs b/core/src/types/storage.rs index a1dbdbdeaa..f5f3ceea55 100644 --- a/core/src/types/storage.rs +++ b/core/src/types/storage.rs @@ -292,6 +292,11 @@ impl BlockHeight { pub fn next_height(&self) -> BlockHeight { BlockHeight(self.0 + 1) } + + /// Get the height of the previous block + pub fn prev_height(&self) -> BlockHeight { + BlockHeight(self.0 - 1) + } } impl TryFrom<&[u8]> for BlockHash { diff --git a/genesis/localnet/parameters.toml b/genesis/localnet/parameters.toml index 41fb316f34..0bd403a3e2 100644 --- a/genesis/localnet/parameters.toml +++ b/genesis/localnet/parameters.toml @@ -64,6 +64,12 @@ cubic_slashing_window_length = 1 # The minimum amount of bonded tokens that a validator needs to be in # either the `consensus` or `below_capacity` validator sets validator_stake_threshold = "1" +# The length, in blocks, of the sliding window for consensus validators +# inactivity verification +liveness_window_check = 100 +# The minimum required activity of consensus validators, in percentage, over +# the `liveness_window_check` +liveness_threshold = "0.9" # Governance parameters. [gov_params] diff --git a/genesis/starter/parameters.toml b/genesis/starter/parameters.toml index 86714f827c..1381cda18c 100644 --- a/genesis/starter/parameters.toml +++ b/genesis/starter/parameters.toml @@ -64,6 +64,12 @@ cubic_slashing_window_length = 1 # The minimum amount of bonded tokens that a validator needs to be in # either the `consensus` or `below_capacity` validator sets validator_stake_threshold = "1" +# The length, in blocks, of the sliding window for consensus validators +# inactivity verification +liveness_window_check = 10_000 +# The minimum required activity of consensus validators, in percentage, over +# the `liveness_window_check` +liveness_threshold = "0.9" # Governance parameters. [gov_params] diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index d6d06e78ce..5229f88cc2 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -42,6 +42,7 @@ use namada_core::types::dec::Dec; use namada_core::types::key::{ common, protocol_pk_key, tm_consensus_key_raw_hash, PublicKeyTmRawHash, }; +use namada_core::types::storage::BlockHeight; pub use namada_core::types::storage::{Epoch, Key, KeySeg}; use once_cell::unsync::Lazy; pub use parameters::{OwnedPosParams, PosParams}; @@ -63,16 +64,16 @@ use types::{ ConsensusValidator, ConsensusValidatorSet, ConsensusValidatorSets, DelegatorRedelegatedBonded, DelegatorRedelegatedUnbonded, EagerRedelegatedBondsMap, EpochedSlashes, IncomingRedelegations, - OutgoingRedelegations, Position, RedelegatedBondsOrUnbonds, - RedelegatedTokens, ReverseOrdTokenAmount, RewardsAccumulator, - RewardsProducts, Slash, SlashType, SlashedAmount, Slashes, - TotalConsensusStakes, TotalDeltas, TotalRedelegatedBonded, - TotalRedelegatedUnbonded, UnbondDetails, Unbonds, ValidatorAddresses, - ValidatorConsensusKeys, ValidatorDeltas, ValidatorEthColdKeys, - ValidatorEthHotKeys, ValidatorMetaData, ValidatorPositionAddresses, - ValidatorProtocolKeys, ValidatorSetPositions, ValidatorSetUpdate, - ValidatorState, ValidatorStates, ValidatorTotalUnbonded, VoteInfo, - WeightedValidator, + LivenessMissedVotes, LivenessSumMissedVotes, OutgoingRedelegations, + Position, RedelegatedBondsOrUnbonds, RedelegatedTokens, + ReverseOrdTokenAmount, RewardsAccumulator, RewardsProducts, Slash, + SlashType, SlashedAmount, Slashes, TotalConsensusStakes, TotalDeltas, + TotalRedelegatedBonded, TotalRedelegatedUnbonded, UnbondDetails, Unbonds, + ValidatorAddresses, ValidatorConsensusKeys, ValidatorDeltas, + ValidatorEthColdKeys, ValidatorEthHotKeys, ValidatorMetaData, + ValidatorPositionAddresses, ValidatorProtocolKeys, ValidatorSetPositions, + ValidatorSetUpdate, ValidatorState, ValidatorStates, + ValidatorTotalUnbonded, VoteInfo, WeightedValidator, }; /// Address of the PoS account implemented as a native VP @@ -288,6 +289,18 @@ pub fn delegator_redelegated_unbonds_handle( DelegatorRedelegatedUnbonded::open(key) } +/// Get the storage handle to the missed votes for liveness tracking +pub fn liveness_missed_votes_handle() -> LivenessMissedVotes { + let key = storage::liveness_missed_votes_key(); + LivenessMissedVotes::open(key) +} + +/// Get the storage handle to the sum of missed votes for liveness tracking +pub fn liveness_sum_missed_votes_handle() -> LivenessSumMissedVotes { + let key = storage::liveness_sum_missed_votes_key(); + LivenessSumMissedVotes::open(key) +} + /// Init genesis. Requires that the governance parameters are initialized. pub fn init_genesis( storage: &mut S, @@ -4289,7 +4302,6 @@ where // Need `+1` because we process at the beginning of a new epoch let processing_epoch = evidence_epoch + params.slash_processing_epoch_offset(); - let pipeline_epoch = current_epoch + params.pipeline_len; // Add the slash to the list of enqueued slashes to be processed at a later // epoch @@ -4307,118 +4319,14 @@ where write_validator_last_slash_epoch(storage, validator, evidence_epoch)?; } - // Remove the validator from the set starting at the next epoch and up thru - // the pipeline epoch. - for epoch in - Epoch::iter_bounds_inclusive(validator_set_update_epoch, pipeline_epoch) - { - let prev_state = validator_state_handle(validator) - .get(storage, epoch, params)? - .expect("Expected to find a valid validator."); - match prev_state { - ValidatorState::Consensus => { - let amount_pre = - read_validator_stake(storage, params, validator, epoch)?; - let val_position = validator_set_positions_handle() - .at(&epoch) - .get(storage, validator)? - .expect("Could not find validator's position in storage."); - let _ = consensus_validator_set_handle() - .at(&epoch) - .at(&amount_pre) - .remove(storage, &val_position)?; - validator_set_positions_handle() - .at(&epoch) - .remove(storage, validator)?; - - // For the pipeline epoch only: - // promote the next max inactive validator to the active - // validator set at the pipeline offset - if epoch == pipeline_epoch { - let below_capacity_handle = - below_capacity_validator_set_handle().at(&epoch); - let max_below_capacity_amount = - get_max_below_capacity_validator_amount( - &below_capacity_handle, - storage, - )?; - if let Some(max_below_capacity_amount) = - max_below_capacity_amount - { - let position_to_promote = find_first_position( - &below_capacity_handle - .at(&max_below_capacity_amount.into()), - storage, - )? - .expect("Should return a position."); - let max_bc_validator = below_capacity_handle - .at(&max_below_capacity_amount.into()) - .remove(storage, &position_to_promote)? - .expect( - "Should have returned a removed validator.", - ); - insert_validator_into_set( - &consensus_validator_set_handle() - .at(&epoch) - .at(&max_below_capacity_amount), - storage, - &epoch, - &max_bc_validator, - )?; - validator_state_handle(&max_bc_validator).set( - storage, - ValidatorState::Consensus, - current_epoch, - params.pipeline_len, - )?; - } - } - } - ValidatorState::BelowCapacity => { - let amount_pre = validator_deltas_handle(validator) - .get_sum(storage, epoch, params)? - .unwrap_or_default(); - debug_assert!(amount_pre.non_negative()); - let val_position = validator_set_positions_handle() - .at(&epoch) - .get(storage, validator)? - .expect("Could not find validator's position in storage."); - let _ = below_capacity_validator_set_handle() - .at(&epoch) - .at(&token::Amount::from_change(amount_pre).into()) - .remove(storage, &val_position)?; - validator_set_positions_handle() - .at(&epoch) - .remove(storage, validator)?; - } - ValidatorState::BelowThreshold => { - tracing::debug!("Below-threshold"); - } - ValidatorState::Inactive => { - tracing::debug!("INACTIVE"); - panic!( - "Shouldn't be here - haven't implemented inactive vals yet" - ) - } - ValidatorState::Jailed => { - tracing::debug!( - "Found evidence for a validator who is already jailed" - ); - // return Ok(()); - } - } - } - // Safe sub cause `validator_set_update_epoch > current_epoch` - let start_offset = validator_set_update_epoch.0 - current_epoch.0; - // Set the validator state as `Jailed` thru the pipeline epoch - for offset in start_offset..=params.pipeline_len { - validator_state_handle(validator).set( - storage, - ValidatorState::Jailed, - current_epoch, - offset, - )?; - } + // Jail the validator and update validator sets + jail_validator( + storage, + params, + validator, + current_epoch, + validator_set_update_epoch, + )?; // No other actions are performed here until the epoch in which the slash is // processed. @@ -5075,7 +4983,7 @@ where Ok(cmp::min(amount_due, slashable_amount)) } -/// Unjail a validator that is currently jailed +/// Unjail a validator that is currently jailed. pub fn unjail_validator( storage: &mut S, validator: &Address, @@ -5107,19 +5015,19 @@ where // Check that the unjailing tx can be submitted given the current epoch // and the most recent infraction epoch - let last_slash_epoch = read_validator_last_slash_epoch(storage, validator)? - .unwrap_or_default(); - let eligible_epoch = - last_slash_epoch + params.slash_processing_epoch_offset(); - if current_epoch < eligible_epoch { - return Err(UnjailValidatorError::NotEligible( - validator.clone(), - eligible_epoch, - current_epoch, - ) - .into()); + let last_slash_epoch = read_validator_last_slash_epoch(storage, validator)?; + if let Some(last_slash_epoch) = last_slash_epoch { + let eligible_epoch = + last_slash_epoch + params.slash_processing_epoch_offset(); + if current_epoch < eligible_epoch { + return Err(UnjailValidatorError::NotEligible( + validator.clone(), + eligible_epoch, + current_epoch, + ) + .into()); + } } - // TODO: any other checks that are needed? (deltas, etc)? // Re-insert the validator into the validator set and update its state let pipeline_epoch = current_epoch + params.pipeline_len; @@ -5440,58 +5348,13 @@ where // Remove the validator from the validator set. If it is in the consensus // set, promote the next validator. match pipeline_state { - ValidatorState::Consensus => { - let consensus_set = consensus_validator_set_handle() - .at(&pipeline_epoch) - .at(&pipeline_stake); - // TODO: handle the unwrap better here - let val_position = validator_set_positions_handle() - .at(&pipeline_epoch) - .get(storage, validator)? - .unwrap(); - let removed = consensus_set.remove(storage, &val_position)?; - debug_assert_eq!(removed, Some(validator.clone())); - - // Remove position - validator_set_positions_handle() - .at(&pipeline_epoch) - .remove(storage, validator)?; - - // Now promote the next below-capacity validator to the consensus - // set - let below_cap_set = - below_capacity_validator_set_handle().at(&pipeline_epoch); - let max_below_capacity_validator_amount = - get_max_below_capacity_validator_amount( - &below_cap_set, - storage, - )?; - - if let Some(max_bc_amount) = max_below_capacity_validator_amount { - let below_cap_vals_max = - below_cap_set.at(&max_bc_amount.into()); - let lowest_position = - find_first_position(&below_cap_vals_max, storage)?.unwrap(); - let removed_max_below_capacity = below_cap_vals_max - .remove(storage, &lowest_position)? - .expect("Must have been removed"); + ValidatorState::Consensus => deactivate_consensus_validator( + storage, + validator, + pipeline_epoch, + pipeline_stake, + )?, - insert_validator_into_set( - &consensus_validator_set_handle() - .at(&pipeline_epoch) - .at(&max_bc_amount), - storage, - &pipeline_epoch, - &removed_max_below_capacity, - )?; - validator_state_handle(&removed_max_below_capacity).set( - storage, - ValidatorState::Consensus, - pipeline_epoch, - 0, - )?; - } - } ValidatorState::BelowCapacity => { let below_capacity_set = below_capacity_validator_set_handle() .at(&pipeline_epoch) @@ -5537,6 +5400,65 @@ where Ok(()) } +fn deactivate_consensus_validator( + storage: &mut S, + + validator: &Address, + target_epoch: Epoch, + stake: token::Amount, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let consensus_set = consensus_validator_set_handle() + .at(&target_epoch) + .at(&stake); + // TODO: handle the unwrap better here + let val_position = validator_set_positions_handle() + .at(&target_epoch) + .get(storage, validator)? + .unwrap(); + let removed = consensus_set.remove(storage, &val_position)?; + debug_assert_eq!(removed, Some(validator.clone())); + + // Remove position + validator_set_positions_handle() + .at(&target_epoch) + .remove(storage, validator)?; + + // Now promote the next below-capacity validator to the consensus + // set + let below_cap_set = below_capacity_validator_set_handle().at(&target_epoch); + let max_below_capacity_validator_amount = + get_max_below_capacity_validator_amount(&below_cap_set, storage)?; + + if let Some(max_bc_amount) = max_below_capacity_validator_amount { + let below_cap_vals_max = below_cap_set.at(&max_bc_amount.into()); + let lowest_position = + find_first_position(&below_cap_vals_max, storage)?.unwrap(); + let removed_max_below_capacity = below_cap_vals_max + .remove(storage, &lowest_position)? + .expect("Must have been removed"); + + insert_validator_into_set( + &consensus_validator_set_handle() + .at(&target_epoch) + .at(&max_bc_amount), + storage, + &target_epoch, + &removed_max_below_capacity, + )?; + validator_state_handle(&removed_max_below_capacity).set( + storage, + ValidatorState::Consensus, + target_epoch, + 0, + )?; + } + + Ok(()) +} + /// Re-activate an inactive validator pub fn reactivate_validator( storage: &mut S, @@ -5606,6 +5528,169 @@ where Ok(()) } +/// Remove liveness data from storage for all validators that are not in the +/// current consensus validator set. +pub fn prune_liveness_data( + storage: &mut S, + current_epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let consensus_validators = + read_consensus_validator_set_addresses(storage, current_epoch)?; + let liveness_missed_votes = liveness_missed_votes_handle(); + let liveness_sum_missed_votes = liveness_sum_missed_votes_handle(); + + let validators_to_prune = liveness_sum_missed_votes + .iter(storage)? + .filter_map(|entry| { + let (address, _) = entry.ok()?; + + if consensus_validators.contains(&address) { + None + } else { + Some(address) + } + }) + .collect::>(); + + for validator in &validators_to_prune { + liveness_missed_votes.remove_all(storage, validator)?; + liveness_sum_missed_votes.remove(storage, validator)?; + } + + Ok(()) +} + +/// Record the liveness data of the consensus validators +pub fn record_liveness_data( + storage: &mut S, + votes: &[VoteInfo], + votes_epoch: Epoch, + votes_height: BlockHeight, + pos_params: &PosParams, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + let consensus_validators = + read_consensus_validator_set_addresses(storage, votes_epoch)?; + let liveness_missed_votes = liveness_missed_votes_handle(); + let liveness_sum_missed_votes = liveness_sum_missed_votes_handle(); + + // Get the addresses of the validators who voted + let vote_addresses = votes + .iter() + .map(|vote| (&vote.validator_address)) + .collect::>(); + + let height_to_prune = + votes_height.0.checked_sub(pos_params.liveness_window_check); + + for cons_validator in consensus_validators.into_iter() { + // Prune old vote (only need to look for the block height that was just + // pushed out of the sliding window) + if let Some(prune_height) = height_to_prune { + let pruned_missing_vote = liveness_missed_votes + .at(&cons_validator) + .remove(storage, &prune_height)?; + + if pruned_missing_vote { + // Update liveness data + liveness_sum_missed_votes.update( + storage, + cons_validator.clone(), + |missed_votes| missed_votes.unwrap() - 1, + )?; + } + } + + // Evaluate new vote + if !vote_addresses.contains(&cons_validator) { + // Insert the height of the missing vote in storage + liveness_missed_votes + .at(&cons_validator) + .insert(storage, votes_height.0)?; + + // Update liveness data + liveness_sum_missed_votes.update( + storage, + cons_validator, + |missed_votes| { + match missed_votes { + Some(missed_votes) => missed_votes + 1, + None => { + // Missing liveness data for the validator (newly + // added to the conensus + // set), intialize it + 1 + } + } + }, + )?; + } else { + // Initialize any new consensus validator who has signed the first + // block + if !liveness_sum_missed_votes.contains(storage, &cons_validator)? { + liveness_sum_missed_votes.insert(storage, cons_validator, 0)?; + } + } + } + + Ok(()) +} + +/// Jail validators who failed to match the liveness threshold +pub fn jail_for_liveness( + storage: &mut S, + params: &PosParams, + current_epoch: Epoch, + jail_epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + // Derive the actual missing votes limit from the percentage + let missing_votes_threshold = ((Dec::one() - params.liveness_threshold) + * params.liveness_window_check) + .to_uint() + .ok_or_else(|| { + storage_api::Error::SimpleMessage( + "Found negative liveness threshold", + ) + })? + .as_u64(); + + // Jail inactive validators + let validators_to_jail = liveness_sum_missed_votes_handle() + .iter(storage)? + .filter_map(|entry| { + let (address, missed_votes) = entry.ok()?; + + // Check if validator failed to match the threshold and jail + // them + if missed_votes >= missing_votes_threshold { + Some(address) + } else { + None + } + }) + .collect::>(); + + for validator in &validators_to_jail { + tracing::info!( + "Jailing validator {} starting in epoch {} for missing too many \ + votes to ensure liveness", + validator, + jail_epoch, + ); + jail_validator(storage, params, validator, current_epoch, jail_epoch)?; + } + + Ok(()) +} + #[cfg(any(test, feature = "testing"))] /// PoS related utility functions to help set up tests. pub mod test_utils { @@ -6003,3 +6088,151 @@ where storage.delete(&key)?; Ok(current_rewards) } + +/// Jail a validator by removing it from and updating the validator sets and +/// changing a its state to `Jailed`. Validators are jailed for liveness and for +/// misbehaving. +fn jail_validator( + storage: &mut S, + params: &PosParams, + validator: &Address, + current_epoch: Epoch, + validator_set_update_epoch: Epoch, +) -> storage_api::Result<()> +where + S: StorageRead + StorageWrite, +{ + tracing::debug!( + "Jailing validator {} beginning in epoch {}", + validator, + validator_set_update_epoch + ); + + // Remove the validator from the set starting at the update epoch and up + // thru the pipeline epoch. + let pipeline_epoch = current_epoch + params.pipeline_len; + for epoch in + Epoch::iter_bounds_inclusive(validator_set_update_epoch, pipeline_epoch) + { + let prev_state = validator_state_handle(validator) + .get(storage, epoch, params)? + .expect("Expected to find a valid validator."); + match prev_state { + ValidatorState::Consensus => { + tracing::debug!( + "Removing validator from the consensus set in epoch {}", + epoch + ); + let amount_pre = + read_validator_stake(storage, params, validator, epoch)?; + let val_position = validator_set_positions_handle() + .at(&epoch) + .get(storage, validator)? + .expect("Could not find validator's position in storage."); + let _ = consensus_validator_set_handle() + .at(&epoch) + .at(&amount_pre) + .remove(storage, &val_position)?; + validator_set_positions_handle() + .at(&epoch) + .remove(storage, validator)?; + + // For the pipeline epoch only: + // promote the next max inactive validator to the active + // validator set at the pipeline offset + if epoch == pipeline_epoch { + let below_capacity_handle = + below_capacity_validator_set_handle().at(&epoch); + let max_below_capacity_amount = + get_max_below_capacity_validator_amount( + &below_capacity_handle, + storage, + )?; + if let Some(max_below_capacity_amount) = + max_below_capacity_amount + { + let position_to_promote = find_first_position( + &below_capacity_handle + .at(&max_below_capacity_amount.into()), + storage, + )? + .expect("Should return a position."); + let max_bc_validator = below_capacity_handle + .at(&max_below_capacity_amount.into()) + .remove(storage, &position_to_promote)? + .expect( + "Should have returned a removed validator.", + ); + insert_validator_into_set( + &consensus_validator_set_handle() + .at(&epoch) + .at(&max_below_capacity_amount), + storage, + &epoch, + &max_bc_validator, + )?; + validator_state_handle(&max_bc_validator).set( + storage, + ValidatorState::Consensus, + current_epoch, + params.pipeline_len, + )?; + } + } + } + ValidatorState::BelowCapacity => { + tracing::debug!( + "Removing validator from the below-capacity set in epoch \ + {}", + epoch + ); + + let amount_pre = validator_deltas_handle(validator) + .get_sum(storage, epoch, params)? + .unwrap_or_default(); + debug_assert!(amount_pre.non_negative()); + let val_position = validator_set_positions_handle() + .at(&epoch) + .get(storage, validator)? + .expect("Could not find validator's position in storage."); + let _ = below_capacity_validator_set_handle() + .at(&epoch) + .at(&token::Amount::from_change(amount_pre).into()) + .remove(storage, &val_position)?; + validator_set_positions_handle() + .at(&epoch) + .remove(storage, validator)?; + } + ValidatorState::BelowThreshold => { + tracing::debug!( + "Setting below-threshold validator as jailed in epoch {}", + epoch + ); + } + ValidatorState::Inactive => { + tracing::debug!( + "Setting inactive validator as jailed in epoch {}", + epoch + ); + } + ValidatorState::Jailed => { + tracing::debug!( + "Found evidence for a validator who is already jailed" + ); + } + } + } + + // Safe sub cause `validator_set_update_epoch > current_epoch` + let start_offset = validator_set_update_epoch.0 - current_epoch.0; + // Set the validator state as `Jailed` thru the pipeline epoch + for offset in start_offset..=params.pipeline_len { + validator_state_handle(validator).set( + storage, + ValidatorState::Jailed, + current_epoch, + offset, + )?; + } + Ok(()) +} diff --git a/proof_of_stake/src/parameters.rs b/proof_of_stake/src/parameters.rs index 0c173c9261..ecacdde206 100644 --- a/proof_of_stake/src/parameters.rs +++ b/proof_of_stake/src/parameters.rs @@ -58,6 +58,12 @@ pub struct OwnedPosParams { /// The minimum amount of bonded tokens that a validator needs to be in /// either the `consensus` or `below_capacity` validator sets pub validator_stake_threshold: token::Amount, + /// The length, in blocks, of the sliding window for consensus validators + /// inactivity verification + pub liveness_window_check: u64, + /// The minimum required activity of consesus validators, in percentage, + /// over the `liveness_window_check` + pub liveness_threshold: Dec, } impl Default for PosParams { @@ -93,6 +99,8 @@ impl Default for OwnedPosParams { .expect("Test failed"), cubic_slashing_window_length: 1, validator_stake_threshold: token::Amount::native_whole(1_u64), + liveness_window_check: 10_000, + liveness_threshold: Dec::new(9, 1).expect("Test failed"), } } } diff --git a/proof_of_stake/src/storage.rs b/proof_of_stake/src/storage.rs index da10c15654..3bc2c21032 100644 --- a/proof_of_stake/src/storage.rs +++ b/proof_of_stake/src/storage.rs @@ -54,6 +54,9 @@ const VALIDATOR_EMAIL_KEY: &str = "email"; const VALIDATOR_DESCRIPTION_KEY: &str = "description"; const VALIDATOR_WEBSITE_KEY: &str = "website"; const VALIDATOR_DISCORD_KEY: &str = "discord_handle"; +const LIVENESS_PREFIX: &str = "liveness"; +const LIVENESS_MISSED_VOTES: &str = "missed_votes"; +const LIVENESS_MISSED_VOTES_SUM: &str = "sum_missed_votes"; /// Is the given key a PoS storage key? pub fn is_pos_key(key: &Key) -> bool { @@ -793,3 +796,24 @@ pub fn validator_discord_key(validator: &Address) -> Key { .push(&VALIDATOR_DISCORD_KEY.to_owned()) .expect("Cannot obtain a storage key") } + +/// Storage prefix for the liveness data of the cosnensus validator set. +pub fn liveness_data_prefix() -> Key { + Key::from(ADDRESS.to_db_key()) + .push(&LIVENESS_PREFIX.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for the liveness records. +pub fn liveness_missed_votes_key() -> Key { + liveness_data_prefix() + .push(&LIVENESS_MISSED_VOTES.to_owned()) + .expect("Cannot obtain a storage key") +} + +/// Storage key for the liveness data. +pub fn liveness_sum_missed_votes_key() -> Key { + liveness_data_prefix() + .push(&LIVENESS_MISSED_VOTES_SUM.to_owned()) + .expect("Cannot obtain a storage key") +} diff --git a/proof_of_stake/src/types.rs b/proof_of_stake/src/types.rs index d72b599cdc..c4275746f8 100644 --- a/proof_of_stake/src/types.rs +++ b/proof_of_stake/src/types.rs @@ -255,6 +255,15 @@ pub type DelegatorRedelegatedUnbonded = pub type EagerRedelegatedBondsMap = BTreeMap>; +/// Liveness record of the consensus validator set. Records the block heights at +/// which the consensus validator missed a vote. +pub type LivenessMissedVotes = NestedMap>; + +/// The sum of missed votes within some interval for each of the consensus +/// validators. The value in this map should in principle be the number of +/// elements in the correspoding inner LazySet of [`LivenessMissedVotes`]. +pub type LivenessSumMissedVotes = LazyMap; + #[derive( Debug, Clone, BorshSerialize, BorshDeserialize, Eq, Hash, PartialEq, )] diff --git a/test_fixtures/masp_proofs/0198EA2E90E59AE189F8E4D2148EFC768FE777B9F1B84CA71D843A0062EB1509.bin b/test_fixtures/masp_proofs/0198EA2E90E59AE189F8E4D2148EFC768FE777B9F1B84CA71D843A0062EB1509.bin deleted file mode 100644 index 91158992f1..0000000000 Binary files a/test_fixtures/masp_proofs/0198EA2E90E59AE189F8E4D2148EFC768FE777B9F1B84CA71D843A0062EB1509.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/181990297B32469C55C7083238E8707FDFE6C1A9CDAAE2956050A745EE509825.bin b/test_fixtures/masp_proofs/181990297B32469C55C7083238E8707FDFE6C1A9CDAAE2956050A745EE509825.bin new file mode 100644 index 0000000000..82a8d6da5b Binary files /dev/null and b/test_fixtures/masp_proofs/181990297B32469C55C7083238E8707FDFE6C1A9CDAAE2956050A745EE509825.bin differ diff --git a/test_fixtures/masp_proofs/2079F14EFADFF577A2AA6C9DD8CA833983676E4D5131006C4F263C91264DF621.bin b/test_fixtures/masp_proofs/2079F14EFADFF577A2AA6C9DD8CA833983676E4D5131006C4F263C91264DF621.bin new file mode 100644 index 0000000000..3c3d27b77a Binary files /dev/null and b/test_fixtures/masp_proofs/2079F14EFADFF577A2AA6C9DD8CA833983676E4D5131006C4F263C91264DF621.bin differ diff --git a/test_fixtures/masp_proofs/30F09813D596F8277911C99FF1C6B362D244EDD624244876F9B5F541EBA07FC5.bin b/test_fixtures/masp_proofs/30F09813D596F8277911C99FF1C6B362D244EDD624244876F9B5F541EBA07FC5.bin deleted file mode 100644 index 1329396ab9..0000000000 Binary files a/test_fixtures/masp_proofs/30F09813D596F8277911C99FF1C6B362D244EDD624244876F9B5F541EBA07FC5.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/315FBEBD16884E62FCFB437CEA4D13376048954D074973BAD19846CC5570D984.bin b/test_fixtures/masp_proofs/315FBEBD16884E62FCFB437CEA4D13376048954D074973BAD19846CC5570D984.bin deleted file mode 100644 index 3e42d029c4..0000000000 Binary files a/test_fixtures/masp_proofs/315FBEBD16884E62FCFB437CEA4D13376048954D074973BAD19846CC5570D984.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/50C53F3CA843689FC9C1C3233686DF79F7E021A374BFCF3C6CA82DAC391C6533.bin b/test_fixtures/masp_proofs/43E391C9E160208B9D816FEA7E0FE3C6DCEDEE34B0F29969544EE6F0453EBED2.bin similarity index 50% rename from test_fixtures/masp_proofs/50C53F3CA843689FC9C1C3233686DF79F7E021A374BFCF3C6CA82DAC391C6533.bin rename to test_fixtures/masp_proofs/43E391C9E160208B9D816FEA7E0FE3C6DCEDEE34B0F29969544EE6F0453EBED2.bin index 5d71d8783a..bc323473ef 100644 Binary files a/test_fixtures/masp_proofs/50C53F3CA843689FC9C1C3233686DF79F7E021A374BFCF3C6CA82DAC391C6533.bin and b/test_fixtures/masp_proofs/43E391C9E160208B9D816FEA7E0FE3C6DCEDEE34B0F29969544EE6F0453EBED2.bin differ diff --git a/test_fixtures/masp_proofs/4DFC5A68E3851511CB6BD4CF38C4638BA5CF29D1C9ADBA37FF651C6316EF218B.bin b/test_fixtures/masp_proofs/4DFC5A68E3851511CB6BD4CF38C4638BA5CF29D1C9ADBA37FF651C6316EF218B.bin new file mode 100644 index 0000000000..987ded133b Binary files /dev/null and b/test_fixtures/masp_proofs/4DFC5A68E3851511CB6BD4CF38C4638BA5CF29D1C9ADBA37FF651C6316EF218B.bin differ diff --git a/test_fixtures/masp_proofs/57620E5B14DC4B57371691A880B047F64289E271DDF9CD22BA16AB554B943A5C.bin b/test_fixtures/masp_proofs/57620E5B14DC4B57371691A880B047F64289E271DDF9CD22BA16AB554B943A5C.bin new file mode 100644 index 0000000000..c061623115 Binary files /dev/null and b/test_fixtures/masp_proofs/57620E5B14DC4B57371691A880B047F64289E271DDF9CD22BA16AB554B943A5C.bin differ diff --git a/test_fixtures/masp_proofs/77C28BD585217E59868DA9EC62CA7CF73D94FC6C796A37ABE2A35F883BB2B6B4.bin b/test_fixtures/masp_proofs/77C28BD585217E59868DA9EC62CA7CF73D94FC6C796A37ABE2A35F883BB2B6B4.bin new file mode 100644 index 0000000000..626c3558aa Binary files /dev/null and b/test_fixtures/masp_proofs/77C28BD585217E59868DA9EC62CA7CF73D94FC6C796A37ABE2A35F883BB2B6B4.bin differ diff --git a/test_fixtures/masp_proofs/4CCF96B9008D42BA633988264D3E116D3BC8B780C76EAEC072E1A71D709AE66F.bin b/test_fixtures/masp_proofs/7934A0228FD826496ADA0A826A3CAF492543E6950765854A5AE102A98647D5CF.bin similarity index 56% rename from test_fixtures/masp_proofs/4CCF96B9008D42BA633988264D3E116D3BC8B780C76EAEC072E1A71D709AE66F.bin rename to test_fixtures/masp_proofs/7934A0228FD826496ADA0A826A3CAF492543E6950765854A5AE102A98647D5CF.bin index 3887ce9a44..dd916a2823 100644 Binary files a/test_fixtures/masp_proofs/4CCF96B9008D42BA633988264D3E116D3BC8B780C76EAEC072E1A71D709AE66F.bin and b/test_fixtures/masp_proofs/7934A0228FD826496ADA0A826A3CAF492543E6950765854A5AE102A98647D5CF.bin differ diff --git a/test_fixtures/masp_proofs/819FB7D977389C0AC073BE200FCE87F8358272338A32B5B411E93BD43D963AE0.bin b/test_fixtures/masp_proofs/819FB7D977389C0AC073BE200FCE87F8358272338A32B5B411E93BD43D963AE0.bin deleted file mode 100644 index f537b76d38..0000000000 Binary files a/test_fixtures/masp_proofs/819FB7D977389C0AC073BE200FCE87F8358272338A32B5B411E93BD43D963AE0.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/8686859C447B47946DC21922D83C2404E7EBDFF543A6EAA34544FFF644F08FA9.bin b/test_fixtures/masp_proofs/8686859C447B47946DC21922D83C2404E7EBDFF543A6EAA34544FFF644F08FA9.bin deleted file mode 100644 index a6d6153a3a..0000000000 Binary files a/test_fixtures/masp_proofs/8686859C447B47946DC21922D83C2404E7EBDFF543A6EAA34544FFF644F08FA9.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/931AE522191249FAFA4A38C9740412000E423E0085AB074BF8C493574AF3226B.bin b/test_fixtures/masp_proofs/931AE522191249FAFA4A38C9740412000E423E0085AB074BF8C493574AF3226B.bin deleted file mode 100644 index 4e66ffffab..0000000000 Binary files a/test_fixtures/masp_proofs/931AE522191249FAFA4A38C9740412000E423E0085AB074BF8C493574AF3226B.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/9EF9FF93C65A60615530272351A86DBF2921ACB1380E8F256AD0CE25151C869C.bin b/test_fixtures/masp_proofs/9EF9FF93C65A60615530272351A86DBF2921ACB1380E8F256AD0CE25151C869C.bin deleted file mode 100644 index b232255a68..0000000000 Binary files a/test_fixtures/masp_proofs/9EF9FF93C65A60615530272351A86DBF2921ACB1380E8F256AD0CE25151C869C.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/C37373E2478B837D2468FBEE5CCA5D2C40DFF157E5FB70DDC84202C160D01CD3.bin b/test_fixtures/masp_proofs/C37373E2478B837D2468FBEE5CCA5D2C40DFF157E5FB70DDC84202C160D01CD3.bin deleted file mode 100644 index 944c1a805c..0000000000 Binary files a/test_fixtures/masp_proofs/C37373E2478B837D2468FBEE5CCA5D2C40DFF157E5FB70DDC84202C160D01CD3.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/C6D9819E800430AF7A88CCBC62E16642BBC1A0C0312CE4F613D3A56582DD887E.bin b/test_fixtures/masp_proofs/C6D9819E800430AF7A88CCBC62E16642BBC1A0C0312CE4F613D3A56582DD887E.bin new file mode 100644 index 0000000000..ef913560e9 Binary files /dev/null and b/test_fixtures/masp_proofs/C6D9819E800430AF7A88CCBC62E16642BBC1A0C0312CE4F613D3A56582DD887E.bin differ diff --git a/test_fixtures/masp_proofs/D3CD6A19532B1A225C9DA0BD5032E70782F34C3C50913FD982A1C7706E799AF8.bin b/test_fixtures/masp_proofs/D3CD6A19532B1A225C9DA0BD5032E70782F34C3C50913FD982A1C7706E799AF8.bin new file mode 100644 index 0000000000..f20d739970 Binary files /dev/null and b/test_fixtures/masp_proofs/D3CD6A19532B1A225C9DA0BD5032E70782F34C3C50913FD982A1C7706E799AF8.bin differ diff --git a/test_fixtures/masp_proofs/DB978B3F08838665C1ECD7BEA4E600461287D8046692CDB4AA2CF94A1AC7F8F6.bin b/test_fixtures/masp_proofs/DB978B3F08838665C1ECD7BEA4E600461287D8046692CDB4AA2CF94A1AC7F8F6.bin deleted file mode 100644 index 8c5888b087..0000000000 Binary files a/test_fixtures/masp_proofs/DB978B3F08838665C1ECD7BEA4E600461287D8046692CDB4AA2CF94A1AC7F8F6.bin and /dev/null differ diff --git a/test_fixtures/masp_proofs/DC1AC9DD51BBB870A144A3891E3632E16A03A1BE55255993E7A0108514A9CAAA.bin b/test_fixtures/masp_proofs/DC1AC9DD51BBB870A144A3891E3632E16A03A1BE55255993E7A0108514A9CAAA.bin new file mode 100644 index 0000000000..3eef76ea54 Binary files /dev/null and b/test_fixtures/masp_proofs/DC1AC9DD51BBB870A144A3891E3632E16A03A1BE55255993E7A0108514A9CAAA.bin differ diff --git a/test_fixtures/masp_proofs/E26732D73AA78BBAA57B77A5D41ADC9F35380699722FC1F0CD77875C3B6BCE50.bin b/test_fixtures/masp_proofs/E26732D73AA78BBAA57B77A5D41ADC9F35380699722FC1F0CD77875C3B6BCE50.bin new file mode 100644 index 0000000000..e4056fbf3c Binary files /dev/null and b/test_fixtures/masp_proofs/E26732D73AA78BBAA57B77A5D41ADC9F35380699722FC1F0CD77875C3B6BCE50.bin differ diff --git a/test_fixtures/masp_proofs/3EE70DC758071936C6AADB82E56C536AAC74604CB002CA3558559CACCC24B5EB.bin b/test_fixtures/masp_proofs/E3C727729E77232BA013072CEFD6E0BA7FC56B24EB93DD7BDB825A1016FE738E.bin similarity index 51% rename from test_fixtures/masp_proofs/3EE70DC758071936C6AADB82E56C536AAC74604CB002CA3558559CACCC24B5EB.bin rename to test_fixtures/masp_proofs/E3C727729E77232BA013072CEFD6E0BA7FC56B24EB93DD7BDB825A1016FE738E.bin index 92f501db51..8b0e009816 100644 Binary files a/test_fixtures/masp_proofs/3EE70DC758071936C6AADB82E56C536AAC74604CB002CA3558559CACCC24B5EB.bin and b/test_fixtures/masp_proofs/E3C727729E77232BA013072CEFD6E0BA7FC56B24EB93DD7BDB825A1016FE738E.bin differ diff --git a/test_fixtures/masp_proofs/0BB61ED029B895FEEB7681A39C6EDC5CC89E90D6FF92E31C8C872DCAB358239A.bin b/test_fixtures/masp_proofs/E55238DB0F1750142CE2B91B1A06D83FF427812954B82854EE5A7FB439558B91.bin similarity index 50% rename from test_fixtures/masp_proofs/0BB61ED029B895FEEB7681A39C6EDC5CC89E90D6FF92E31C8C872DCAB358239A.bin rename to test_fixtures/masp_proofs/E55238DB0F1750142CE2B91B1A06D83FF427812954B82854EE5A7FB439558B91.bin index e5e6e93ec1..8b9e4e5b63 100644 Binary files a/test_fixtures/masp_proofs/0BB61ED029B895FEEB7681A39C6EDC5CC89E90D6FF92E31C8C872DCAB358239A.bin and b/test_fixtures/masp_proofs/E55238DB0F1750142CE2B91B1A06D83FF427812954B82854EE5A7FB439558B91.bin differ