Skip to content

Commit

Permalink
new impl for purging old validator sets
Browse files Browse the repository at this point in the history
  • Loading branch information
brentstone committed Sep 28, 2023
1 parent d7e4769 commit 1ac46a0
Show file tree
Hide file tree
Showing 5 changed files with 308 additions and 36 deletions.
178 changes: 174 additions & 4 deletions apps/src/lib/node/ledger/shell/finalize_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,17 +100,14 @@ where
namada_proof_of_stake::read_pos_params(&self.wl_storage)?;
namada_proof_of_stake::copy_validator_sets_and_positions(
&mut self.wl_storage,
&pos_params,
current_epoch,
current_epoch + pos_params.pipeline_len,
)?;
namada_proof_of_stake::store_total_consensus_stake(
&mut self.wl_storage,
current_epoch,
)?;
namada_proof_of_stake::purge_validator_sets_for_old_epoch(
&mut self.wl_storage,
current_epoch,
)?;
}

// Invariant: Has to be applied before `record_slashes_from_evidence`
Expand Down Expand Up @@ -3823,6 +3820,179 @@ mod test_finalize_block {
Ok(())
}

#[test]
fn test_purge_validator_information() -> storage_api::Result<()> {
// Setup the network with pipeline_len = 2, unbonding_len = 4
let num_validators = 4_u64;
let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg {
last_height: 0,
num_validators,
});
let mut params = read_pos_params(&shell.wl_storage).unwrap();
params.owned.unbonding_len = 4;
// params.owned.max_validator_slots = 3;
// params.owned.validator_stake_threshold = token::Amount::zero();
write_pos_params(&mut shell.wl_storage, &params.owned)?;

let max_proposal_period = params.max_proposal_period;
let default_past_epochs = 2;
let consensus_val_set_len = max_proposal_period + default_past_epochs;

let consensus_val_set =
namada_proof_of_stake::consensus_validator_set_handle();
// let below_cap_val_set =
// namada_proof_of_stake::below_capacity_validator_set_handle();
let validator_positions =
namada_proof_of_stake::validator_set_positions_handle();
let all_validator_addresses =
namada_proof_of_stake::validator_addresses_handle();

let consensus_set: Vec<WeightedValidator> =
read_consensus_validator_set_addresses_with_stake(
&shell.wl_storage,
Epoch::default(),
)
.unwrap()
.into_iter()
.collect();
let val1 = consensus_set[0].clone();
let pkh1 = get_pkh_from_address(
&shell.wl_storage,
&params,
val1.address,
Epoch::default(),
);

// Finalize block 1
next_block_for_inflation(&mut shell, pkh1.clone(), vec![], None);

let votes = get_default_true_votes(&shell.wl_storage, Epoch::default());
assert!(!votes.is_empty());

let check_is_data = |storage: &WlStorage<_, _>,
start: Epoch,
end: Epoch| {
for ep in Epoch::iter_bounds_inclusive(start, end) {
assert!(!consensus_val_set.at(&ep).is_empty(storage).unwrap());
// assert!(!below_cap_val_set.at(&ep).is_empty(storage).
// unwrap());
assert!(
!validator_positions.at(&ep).is_empty(storage).unwrap()
);
assert!(
!all_validator_addresses.at(&ep).is_empty(storage).unwrap()
);
}
};

// Check that there is validator data for epochs 0 - pipeline_len
check_is_data(&shell.wl_storage, Epoch(0), Epoch(params.pipeline_len));

// Advance to epoch `default_past_epochs`
let mut current_epoch = Epoch(0);
for _ in 0..default_past_epochs {
let votes = get_default_true_votes(
&shell.wl_storage,
shell.wl_storage.storage.block.epoch,
);
current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None);
}
assert_eq!(shell.wl_storage.storage.block.epoch.0, default_past_epochs);
assert_eq!(current_epoch.0, default_past_epochs);

check_is_data(
&shell.wl_storage,
Epoch(0),
Epoch(params.pipeline_len + default_past_epochs),
);

// Advance one more epoch, which should purge the data for epoch 0 in
// everything except the consensus validator set
let votes = get_default_true_votes(
&shell.wl_storage,
shell.wl_storage.storage.block.epoch,
);
current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None);
assert_eq!(current_epoch.0, default_past_epochs + 1);

check_is_data(
&shell.wl_storage,
Epoch(1),
Epoch(params.pipeline_len + default_past_epochs + 1),
);
assert!(
!consensus_val_set
.at(&Epoch(0))
.is_empty(&shell.wl_storage)
.unwrap()
);
assert!(
validator_positions
.at(&Epoch(0))
.is_empty(&shell.wl_storage)
.unwrap()
);
assert!(
all_validator_addresses
.at(&Epoch(0))
.is_empty(&shell.wl_storage)
.unwrap()
);

// Advance to the epoch `consensus_val_set_len` + 1
loop {
assert!(
!consensus_val_set
.at(&Epoch(0))
.is_empty(&shell.wl_storage)
.unwrap()
);
let votes = get_default_true_votes(
&shell.wl_storage,
shell.wl_storage.storage.block.epoch,
);
current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None);
if current_epoch.0 == consensus_val_set_len + 1 {
break;
}
}

assert!(
consensus_val_set
.at(&Epoch(0))
.is_empty(&shell.wl_storage)
.unwrap()
);

// Advance one more epoch
let votes = get_default_true_votes(
&shell.wl_storage,
shell.wl_storage.storage.block.epoch,
);
current_epoch = advance_epoch(&mut shell, &pkh1, &votes, None);
for ep in Epoch::default().iter_range(2) {
assert!(
consensus_val_set
.at(&ep)
.is_empty(&shell.wl_storage)
.unwrap()
);
}
for ep in Epoch::iter_bounds_inclusive(
Epoch(2),
current_epoch + params.pipeline_len,
) {
assert!(
!consensus_val_set
.at(&ep)
.is_empty(&shell.wl_storage)
.unwrap()
);
}

Ok(())
}

fn get_default_true_votes<S>(storage: &S, epoch: Epoch) -> Vec<VoteInfo>
where
S: StorageRead,
Expand Down
9 changes: 4 additions & 5 deletions benches/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,19 +267,18 @@ impl BenchShell {
}

pub fn advance_epoch(&mut self) {
let pipeline_len =
proof_of_stake::read_pos_params(&self.inner.wl_storage)
.unwrap()
.pipeline_len;
let params =
proof_of_stake::read_pos_params(&self.inner.wl_storage).unwrap();

self.wl_storage.storage.block.epoch =
self.wl_storage.storage.block.epoch.next();
let current_epoch = self.wl_storage.storage.block.epoch;

proof_of_stake::copy_validator_sets_and_positions(
&mut self.wl_storage,
&params,
current_epoch,
current_epoch + pipeline_len,
current_epoch + params.pipeline_len,
)
.unwrap();
}
Expand Down
108 changes: 106 additions & 2 deletions proof_of_stake/src/epoched.rs
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ where
/// kept is dropped. If the oldest stored epoch is not already
/// associated with some value, the latest value from the dropped
/// values, if any, is associated with it.
fn update_data<S>(
pub fn update_data<S>(
&self,
storage: &mut S,
params: &PosParams,
Expand Down Expand Up @@ -335,7 +335,8 @@ where
S: StorageWrite + StorageRead,
{
let key = self.get_last_update_storage_key();
storage.write(&key, epoch)
storage.write(&key, epoch)?;
self.set_oldest_epoch(storage, epoch)
}

fn get_last_update_storage_key(&self) -> storage::Key {
Expand Down Expand Up @@ -368,6 +369,109 @@ where
let key = self.get_last_update_storage_key();
storage.write(&key, current_epoch)
}

fn get_oldest_epoch_storage_key(&self) -> storage::Key {
self.storage_prefix
.push(&OLDEST_EPOCH_SUB_KEY.to_owned())
.unwrap()
}

fn get_oldest_epoch<S>(
&self,
storage: &S,
) -> storage_api::Result<Option<Epoch>>
where
S: StorageRead,
{
let key = self.get_oldest_epoch_storage_key();
storage.read(&key)
}

fn set_oldest_epoch<S>(
&self,
storage: &mut S,
new_oldest_epoch: Epoch,
) -> storage_api::Result<()>
where
S: StorageRead + StorageWrite,
{
let key = self.get_oldest_epoch_storage_key();
storage.write(&key, new_oldest_epoch)
}

fn sub_past_epochs(params: &PosParams, epoch: Epoch) -> Epoch {
Epoch(
epoch
.0
.checked_sub(PastEpochs::value(params))
.unwrap_or_default(),
)
}

/// Update data by removing old epochs
/// TODO: should we consider more complex handling of empty epochs in the
/// data below?
pub fn update_data<S>(
&self,
storage: &mut S,
params: &PosParams,
current_epoch: Epoch,
) -> storage_api::Result<()>
where
S: StorageRead + StorageWrite,
{
let last_update = self.get_last_update(storage)?;
let oldest_epoch = self.get_oldest_epoch(storage)?;
println!(
"\nLast update = {:?}\nOldest epoch = {:?}\n",
last_update, oldest_epoch
);
if let (Some(last_update), Some(oldest_epoch)) =
(last_update, oldest_epoch)
{
let oldest_to_keep = current_epoch
.0
.checked_sub(PastEpochs::value(params))
.unwrap_or_default();
if oldest_epoch.0 < oldest_to_keep {
let diff = oldest_to_keep - oldest_epoch.0;
// Go through the epochs before the expected oldest epoch and
// keep the latest one
tracing::debug!(
"Trimming nested epoched data in epoch {current_epoch}, \
last updated at {last_update}."
);
let data_handler = self.get_data_handler();
// Remove data before the new oldest epoch, keep the latest
// value
dbg!(&diff);
for epoch in oldest_epoch.iter_range(diff) {
let was_data = data_handler.remove_all(storage, &epoch)?;
if was_data {
tracing::debug!(
"Removed inner map data at epoch {epoch}"
);
} else {
tracing::debug!("WARNING: was no data in {epoch}");
}
}
let new_oldest_epoch =
Self::sub_past_epochs(params, current_epoch);

// if !data_handler.contains(storage, &new_oldest_epoch)? {
// panic!("WARNING: no data existing in
// {new_oldest_epoch}"); }
self.set_oldest_epoch(storage, new_oldest_epoch)?;

// Update the epoch of the last update to the current epoch
let key = self.get_last_update_storage_key();
storage.write(&key, current_epoch)?;
return Ok(());
}
}

Ok(())
}
}

impl<Data, FutureEpochs, PastEpochs>
Expand Down
Loading

0 comments on commit 1ac46a0

Please sign in to comment.