From 1ac46a065e4964fad9c643c0145f9bb46333d034 Mon Sep 17 00:00:00 2001 From: brentstone Date: Wed, 27 Sep 2023 18:21:10 -0600 Subject: [PATCH] new impl for purging old validator sets --- .../lib/node/ledger/shell/finalize_block.rs | 178 +++++++++++++++++- benches/lib.rs | 9 +- proof_of_stake/src/epoched.rs | 108 ++++++++++- proof_of_stake/src/lib.rs | 42 ++--- proof_of_stake/src/tests.rs | 7 +- 5 files changed, 308 insertions(+), 36 deletions(-) diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index 2f8f970210..739d803bc2 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -100,6 +100,7 @@ where namada_proof_of_stake::read_pos_params(&self.wl_storage)?; namada_proof_of_stake::copy_validator_sets_and_positions( &mut self.wl_storage, + &pos_params, current_epoch, current_epoch + pos_params.pipeline_len, )?; @@ -107,10 +108,6 @@ where &mut self.wl_storage, current_epoch, )?; - namada_proof_of_stake::purge_validator_sets_for_old_epoch( - &mut self.wl_storage, - current_epoch, - )?; } // Invariant: Has to be applied before `record_slashes_from_evidence` @@ -3823,6 +3820,179 @@ mod test_finalize_block { Ok(()) } + #[test] + fn test_purge_validator_information() -> storage_api::Result<()> { + // Setup the network with pipeline_len = 2, unbonding_len = 4 + let num_validators = 4_u64; + let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { + last_height: 0, + num_validators, + }); + let mut params = read_pos_params(&shell.wl_storage).unwrap(); + params.owned.unbonding_len = 4; + // params.owned.max_validator_slots = 3; + // params.owned.validator_stake_threshold = token::Amount::zero(); + write_pos_params(&mut shell.wl_storage, ¶ms.owned)?; + + let max_proposal_period = params.max_proposal_period; + let default_past_epochs = 2; + let consensus_val_set_len = max_proposal_period + default_past_epochs; + + let consensus_val_set = + namada_proof_of_stake::consensus_validator_set_handle(); + // let below_cap_val_set = + // namada_proof_of_stake::below_capacity_validator_set_handle(); + let validator_positions = + namada_proof_of_stake::validator_set_positions_handle(); + let all_validator_addresses = + namada_proof_of_stake::validator_addresses_handle(); + + let consensus_set: Vec = + read_consensus_validator_set_addresses_with_stake( + &shell.wl_storage, + Epoch::default(), + ) + .unwrap() + .into_iter() + .collect(); + let val1 = consensus_set[0].clone(); + let pkh1 = get_pkh_from_address( + &shell.wl_storage, + ¶ms, + val1.address, + Epoch::default(), + ); + + // Finalize block 1 + next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None); + + let votes = get_default_true_votes(&shell.wl_storage, Epoch::default()); + assert!(!votes.is_empty()); + + let check_is_data = |storage: &WlStorage<_, _>, + start: Epoch, + end: Epoch| { + for ep in Epoch::iter_bounds_inclusive(start, end) { + assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap()); + // assert!(!below_cap_val_set.at(&ep).is_empty(storage). + // unwrap()); + assert!( + !validator_positions.at(&ep).is_empty(storage).unwrap() + ); + assert!( + !all_validator_addresses.at(&ep).is_empty(storage).unwrap() + ); + } + }; + + // Check that there is validator data for epochs 0 - pipeline_len + check_is_data(&shell.wl_storage, Epoch(0), Epoch(params.pipeline_len)); + + // Advance to epoch `default_past_epochs` + let mut current_epoch = Epoch(0); + for _ in 0..default_past_epochs { + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + } + assert_eq!(shell.wl_storage.storage.block.epoch.0, default_past_epochs); + assert_eq!(current_epoch.0, default_past_epochs); + + check_is_data( + &shell.wl_storage, + Epoch(0), + Epoch(params.pipeline_len + default_past_epochs), + ); + + // Advance one more epoch, which should purge the data for epoch 0 in + // everything except the consensus validator set + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + assert_eq!(current_epoch.0, default_past_epochs + 1); + + check_is_data( + &shell.wl_storage, + Epoch(1), + Epoch(params.pipeline_len + default_past_epochs + 1), + ); + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + validator_positions + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + assert!( + all_validator_addresses + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + + // Advance to the epoch `consensus_val_set_len` + 1 + loop { + assert!( + !consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + if current_epoch.0 == consensus_val_set_len + 1 { + break; + } + } + + assert!( + consensus_val_set + .at(&Epoch(0)) + .is_empty(&shell.wl_storage) + .unwrap() + ); + + // Advance one more epoch + let votes = get_default_true_votes( + &shell.wl_storage, + shell.wl_storage.storage.block.epoch, + ); + current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None); + for ep in Epoch::default().iter_range(2) { + assert!( + consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); + } + for ep in Epoch::iter_bounds_inclusive( + Epoch(2), + current_epoch + params.pipeline_len, + ) { + assert!( + !consensus_val_set + .at(&ep) + .is_empty(&shell.wl_storage) + .unwrap() + ); + } + + Ok(()) + } + fn get_default_true_votes(storage: &S, epoch: Epoch) -> Vec where S: StorageRead, diff --git a/benches/lib.rs b/benches/lib.rs index 47645abdf4..d247d3872d 100644 --- a/benches/lib.rs +++ b/benches/lib.rs @@ -267,10 +267,8 @@ impl BenchShell { } pub fn advance_epoch(&mut self) { - let pipeline_len = - proof_of_stake::read_pos_params(&self.inner.wl_storage) - .unwrap() - .pipeline_len; + let params = + proof_of_stake::read_pos_params(&self.inner.wl_storage).unwrap(); self.wl_storage.storage.block.epoch = self.wl_storage.storage.block.epoch.next(); @@ -278,8 +276,9 @@ impl BenchShell { proof_of_stake::copy_validator_sets_and_positions( &mut self.wl_storage, + ¶ms, current_epoch, - current_epoch + pipeline_len, + current_epoch + params.pipeline_len, ) .unwrap(); } diff --git a/proof_of_stake/src/epoched.rs b/proof_of_stake/src/epoched.rs index 7363644da1..801fd0dcb1 100644 --- a/proof_of_stake/src/epoched.rs +++ b/proof_of_stake/src/epoched.rs @@ -168,7 +168,7 @@ where /// kept is dropped. If the oldest stored epoch is not already /// associated with some value, the latest value from the dropped /// values, if any, is associated with it. - fn update_data( + pub fn update_data( &self, storage: &mut S, params: &PosParams, @@ -335,7 +335,8 @@ where S: StorageWrite + StorageRead, { let key = self.get_last_update_storage_key(); - storage.write(&key, epoch) + storage.write(&key, epoch)?; + self.set_oldest_epoch(storage, epoch) } fn get_last_update_storage_key(&self) -> storage::Key { @@ -368,6 +369,109 @@ where let key = self.get_last_update_storage_key(); storage.write(&key, current_epoch) } + + fn get_oldest_epoch_storage_key(&self) -> storage::Key { + self.storage_prefix + .push(&OLDEST_EPOCH_SUB_KEY.to_owned()) + .unwrap() + } + + fn get_oldest_epoch( + &self, + storage: &S, + ) -> storage_api::Result> + where + S: StorageRead, + { + let key = self.get_oldest_epoch_storage_key(); + storage.read(&key) + } + + fn set_oldest_epoch( + &self, + storage: &mut S, + new_oldest_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let key = self.get_oldest_epoch_storage_key(); + storage.write(&key, new_oldest_epoch) + } + + fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch { + Epoch( + epoch + .0 + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default(), + ) + } + + /// Update data by removing old epochs + /// TODO: should we consider more complex handling of empty epochs in the + /// data below? + pub fn update_data( + &self, + storage: &mut S, + params: &PosParams, + current_epoch: Epoch, + ) -> storage_api::Result<()> + where + S: StorageRead + StorageWrite, + { + let last_update = self.get_last_update(storage)?; + let oldest_epoch = self.get_oldest_epoch(storage)?; + println!( + "\nLast update = {:?}\nOldest epoch = {:?}\n", + last_update, oldest_epoch + ); + if let (Some(last_update), Some(oldest_epoch)) = + (last_update, oldest_epoch) + { + let oldest_to_keep = current_epoch + .0 + .checked_sub(PastEpochs::value(params)) + .unwrap_or_default(); + if oldest_epoch.0 < oldest_to_keep { + let diff = oldest_to_keep - oldest_epoch.0; + // Go through the epochs before the expected oldest epoch and + // keep the latest one + tracing::debug!( + "Trimming nested epoched data in epoch {current_epoch}, \ + last updated at {last_update}." + ); + let data_handler = self.get_data_handler(); + // Remove data before the new oldest epoch, keep the latest + // value + dbg!(&diff); + for epoch in oldest_epoch.iter_range(diff) { + let was_data = data_handler.remove_all(storage, &epoch)?; + if was_data { + tracing::debug!( + "Removed inner map data at epoch {epoch}" + ); + } else { + tracing::debug!("WARNING: was no data in {epoch}"); + } + } + let new_oldest_epoch = + Self::sub_past_epochs(params, current_epoch); + + // if !data_handler.contains(storage, &new_oldest_epoch)? { + // panic!("WARNING: no data existing in + // {new_oldest_epoch}"); } + self.set_oldest_epoch(storage, new_oldest_epoch)?; + + // Update the epoch of the last update to the current epoch + let key = self.get_last_update_storage_key(); + storage.write(&key, current_epoch)?; + return Ok(()); + } + } + + Ok(()) + } } impl diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index 3645a84d05..97487d8c33 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -505,7 +505,12 @@ where credit_tokens(storage, &staking_token, &ADDRESS, total_bonded)?; // Copy the genesis validator set into the pipeline epoch as well for epoch in (current_epoch.next()).iter_range(params.pipeline_len) { - copy_validator_sets_and_positions(storage, current_epoch, epoch)?; + copy_validator_sets_and_positions( + storage, + ¶ms, + current_epoch, + epoch, + )?; } tracing::debug!("Genesis initialized"); @@ -1554,6 +1559,7 @@ where /// Validator sets and positions copying into a future epoch pub fn copy_validator_sets_and_positions( storage: &mut S, + params: &PosParams, current_epoch: Epoch, target_epoch: Epoch, ) -> storage_api::Result<()> @@ -1623,6 +1629,9 @@ where .at(&val_stake) .insert(storage, val_position, val_address)?; } + // Purge consensus and below-capacity validator sets + consensus_validator_set.update_data(storage, params, current_epoch)?; + below_capacity_validator_set.update_data(storage, params, current_epoch)?; // Copy validator positions let mut positions = HashMap::::default(); @@ -1641,6 +1650,13 @@ where } validator_set_positions_handle.set_last_update(storage, current_epoch)?; + // Purge old epochs of validator positions + validator_set_positions_handle.update_data( + storage, + params, + current_epoch, + )?; + // Copy set of all validator addresses let mut all_validators = HashSet::
::default(); let validator_addresses_handle = validator_addresses_handle(); @@ -1656,6 +1672,9 @@ where debug_assert!(!was_in); } + // Purge old epochs of all validator addresses + validator_addresses_handle.update_data(storage, params, current_epoch)?; + Ok(()) } @@ -1700,27 +1719,6 @@ where total_consensus_stake_key_handle().set(storage, total, epoch, 0) } -/// Purge the validator sets from the epochs older than the current epoch minus -/// `STORE_VALIDATOR_SETS_LEN` -pub fn purge_validator_sets_for_old_epoch( - storage: &mut S, - epoch: Epoch, -) -> storage_api::Result<()> -where - S: StorageRead + StorageWrite, -{ - if Epoch(STORE_VALIDATOR_SETS_LEN) < epoch { - let old_epoch = epoch - STORE_VALIDATOR_SETS_LEN - 1; - consensus_validator_set_handle() - .get_data_handler() - .remove_all(storage, &old_epoch)?; - below_capacity_validator_set_handle() - .get_data_handler() - .remove_all(storage, &old_epoch)?; - } - Ok(()) -} - /// Read the position of the validator in the subset of validators that have the /// same bonded stake. This information is held in its own epoched structure in /// addition to being inside the validator sets. diff --git a/proof_of_stake/src/tests.rs b/proof_of_stake/src/tests.rs index 4465bdc687..ef65b5d58e 100644 --- a/proof_of_stake/src/tests.rs +++ b/proof_of_stake/src/tests.rs @@ -42,7 +42,7 @@ use crate::{ bond_tokens, bonds_and_unbonds, consensus_validator_set_handle, copy_validator_sets_and_positions, find_validator_by_raw_hash, get_num_consensus_validators, insert_validator_into_validator_set, - is_validator, process_slashes, purge_validator_sets_for_old_epoch, + is_validator, process_slashes, read_below_capacity_validator_set_addresses_with_stake, read_below_threshold_validator_set_addresses, read_consensus_validator_set_addresses_with_stake, read_total_stake, @@ -2047,17 +2047,18 @@ fn get_tendermint_set_updates( } /// Advance to the next epoch. Returns the new epoch. -fn advance_epoch(s: &mut TestWlStorage, params: &OwnedPosParams) -> Epoch { +fn advance_epoch(s: &mut TestWlStorage, params: &PosParams) -> Epoch { s.storage.block.epoch = s.storage.block.epoch.next(); let current_epoch = s.storage.block.epoch; store_total_consensus_stake(s, current_epoch).unwrap(); copy_validator_sets_and_positions( s, + params, current_epoch, current_epoch + params.pipeline_len, ) .unwrap(); - purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); + // purge_validator_sets_for_old_epoch(s, current_epoch).unwrap(); // process_slashes(s, current_epoch).unwrap(); // dbg!(current_epoch); current_epoch