Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prune Merkle tree of bridge pool #2206

Merged
merged 4 commits into from
Dec 7, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Prune merkle tree of bridge pool
([\#2110](https://github.com/anoma/namada/issues/2110))
44 changes: 43 additions & 1 deletion apps/src/lib/node/ledger/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,15 @@ mod tests {

use itertools::Itertools;
use namada::core::ledger::masp_conversions::update_allowed_conversions;
use namada::ledger::eth_bridge::storage::bridge_pool;
use namada::ledger::gas::STORAGE_ACCESS_GAS_PER_BYTE;
use namada::ledger::ibc::storage::ibc_key;
use namada::ledger::parameters::{EpochDuration, Parameters};
use namada::ledger::storage::write_log::WriteLog;
use namada::ledger::storage::{types, StoreType, WlStorage};
use namada::ledger::storage_api::{self, StorageWrite};
use namada::types::chain::ChainId;
use namada::types::ethereum_events::Uint;
use namada::types::hash::Hash;
use namada::types::storage::{BlockHash, BlockHeight, Key};
use namada::types::time::DurationSecs;
Expand Down Expand Up @@ -487,6 +489,9 @@ mod tests {
let value_bytes = types::encode(&storage.block.height);
storage.write(&key, value_bytes)?;
}
let key = bridge_pool::get_nonce_key();
let bytes = types::encode(&Uint::default());
storage.write(&key, bytes)?;

// Update and commit
let hash = BlockHash::default();
Expand Down Expand Up @@ -581,6 +586,11 @@ mod tests {
None,
Some(5),
);
let bp_nonce_key = bridge_pool::get_nonce_key();
let nonce = Uint::default();
let bytes = types::encode(&nonce);
storage.write(&bp_nonce_key, bytes).unwrap();

storage
.begin_block(BlockHash::default(), BlockHeight(1))
.expect("begin_block failed");
Expand All @@ -606,6 +616,10 @@ mod tests {
.write(&key, types::encode(&value))
.expect("write failed");

let nonce = nonce + 1;
let bytes = types::encode(&nonce);
storage.write(&bp_nonce_key, bytes).unwrap();

storage.block.epoch = storage.block.epoch.next();
storage.block.pred_epochs.new_epoch(BlockHeight(6));
let batch = PersistentStorage::batch();
Expand All @@ -617,6 +631,11 @@ mod tests {
storage
.begin_block(BlockHash::default(), BlockHeight(11))
.expect("begin_block failed");

let nonce = nonce + 1;
let bytes = types::encode(&nonce);
storage.write(&bp_nonce_key, bytes).unwrap();

storage.block.epoch = storage.block.epoch.next();
storage.block.pred_epochs.new_epoch(BlockHeight(11));
let batch = PersistentStorage::batch();
Expand All @@ -630,7 +649,30 @@ mod tests {
"The tree at Height 5 shouldn't be able to be restored"
);
let result = storage.get_merkle_tree(6.into(), Some(StoreType::Ibc));
assert!(result.is_ok(), "The tree should be restored");
assert!(result.is_ok(), "The ibc tree should be restored");
let result =
storage.get_merkle_tree(6.into(), Some(StoreType::BridgePool));
assert!(result.is_ok(), "The bridge pool tree should be restored");

storage
.begin_block(BlockHash::default(), BlockHeight(12))
.expect("begin_block failed");

let nonce = nonce + 1;
let bytes = types::encode(&nonce);
storage.write(&bp_nonce_key, bytes).unwrap();
storage.block.epoch = storage.block.epoch.next();
storage.block.pred_epochs.new_epoch(BlockHeight(12));
let batch = PersistentStorage::batch();
storage.commit_block(batch).expect("commit failed");

// ibc tree should be able to be restored
let result = storage.get_merkle_tree(6.into(), Some(StoreType::Ibc));
assert!(result.is_ok(), "The ibc tree should be restored");
// bridge pool tree should be pruned because of the nonce
let result =
storage.get_merkle_tree(6.into(), Some(StoreType::BridgePool));
assert!(result.is_err(), "The bridge pool tree should be pruned");
}

/// Test the prefix iterator with RocksDB.
Expand Down
97 changes: 77 additions & 20 deletions core/src/ledger/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ pub use wl_storage::{
};

use super::gas::MEMORY_ACCESS_GAS_PER_BYTE;
use crate::ledger::eth_bridge::storage::bridge_pool::is_pending_transfer_key;
use crate::ledger::eth_bridge::storage::bridge_pool::{
self, is_pending_transfer_key,
};
use crate::ledger::gas::{
STORAGE_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE,
};
Expand All @@ -39,6 +41,7 @@ use crate::ledger::storage::merkle_tree::{
use crate::tendermint::merkle::proof::ProofOps;
use crate::types::address::{Address, EstablishedAddressGen, InternalAddress};
use crate::types::chain::{ChainId, CHAIN_ID_LENGTH};
use crate::types::ethereum_events::Uint;
use crate::types::ethereum_structs;
use crate::types::hash::{Error as HashError, Hash};
use crate::types::internal::{ExpiredTxsQueue, TxQueue};
Expand Down Expand Up @@ -1144,32 +1147,40 @@ where
if self.block.epoch.0 == 0 {
return Ok(());
}
if let Some(limit) = self.storage_read_past_height_limit {
if self.get_last_block_height().0 <= limit {
return Ok(());
}

let min_height = (self.get_last_block_height().0 - limit).into();
if let Some(epoch) = self.block.pred_epochs.get_epoch(min_height) {
if epoch.0 == 0 {
return Ok(());
}
// Remove stores at the previous epoch because the Merkle tree
// stores at the starting height of the epoch would be used to
// restore stores at a height (> min_height) in the epoch
for st in StoreType::iter_provable() {
self.db.prune_merkle_tree_store(batch, st, epoch.prev())?;
}
}
}
// remove non-provable stores at the previous epoch
// Prune non-provable stores at the previous epoch
for st in StoreType::iter_non_provable() {
self.db.prune_merkle_tree_store(
batch,
st,
self.block.epoch.prev(),
)?;
}
// Prune provable stores
let oldest_epoch = self.get_oldest_epoch();
if oldest_epoch.0 > 0 {
// Remove stores at the previous epoch because the Merkle tree
// stores at the starting height of the epoch would be used to
// restore stores at a height (> oldest_height) in the epoch
for st in StoreType::iter_provable() {
self.db.prune_merkle_tree_store(
batch,
st,
oldest_epoch.prev(),
)?;
}

// Prune the BridgePool subtree stores with invalid nonce
let mut epoch = self.get_oldest_epoch_with_valid_nonce()?;
while oldest_epoch < epoch {
epoch = epoch.prev();
self.db.prune_merkle_tree_store(
batch,
&StoreType::BridgePool,
epoch,
)?;
}
}

Ok(())
}

Expand All @@ -1182,6 +1193,52 @@ where
.unwrap_or_default()
}

/// Get the oldest epoch where we can read a value
pub fn get_oldest_epoch(&self) -> Epoch {
let oldest_height = match self.storage_read_past_height_limit {
Some(limit) if limit < self.get_last_block_height().0 => {
(self.get_last_block_height().0 - limit).into()
}
_ => BlockHeight(1),
};
self.block
.pred_epochs
.get_epoch(oldest_height)
.unwrap_or_default()
}

/// Get oldest epoch which has the valid signed nonce of the bridge pool
pub fn get_oldest_epoch_with_valid_nonce(&self) -> Result<Epoch> {
let nonce_key = bridge_pool::get_nonce_key();
sug0 marked this conversation as resolved.
Show resolved Hide resolved
let (bytes, _) = self.read(&nonce_key)?;
let bytes = bytes.expect("Bridge pool nonce should exits");
let current_nonce =
Uint::try_from_slice(&bytes).map_err(Error::BorshCodingError)?;
let (mut epoch, _) = self.get_last_epoch();
// We don't need to check the older epochs because their Merkle tree
// snapshots have been already removed
let oldest_epoch = self.get_oldest_epoch();
// Look up the last valid epoch which has the previous nonce of the
// current one. It has the previous nonce, but it was
// incremented during the epoch.
while 0 < epoch.0 && oldest_epoch <= epoch {
epoch = epoch.prev();
let height =
match self.block.pred_epochs.get_start_height_of_epoch(epoch) {
Some(h) => h,
None => continue,
};
let (bytes, _) = self.read_with_height(&nonce_key, height)?;
let bytes = bytes.expect("Bridge pool nonce should exits");
let nonce = Uint::try_from_slice(&bytes)
.map_err(Error::BorshCodingError)?;
if nonce < current_nonce {
break;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't this returning at the first epoch with an invalid nonce instead of the last one matching the current nonce?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The nonce was incremented to the current one during the epoch though the epoch has the invalid nonce at the starting height. The Merkle tree at the epoch starting would be required to rebuild one at a height (after the nonce was incremented) during the epoch. So, the Merkle tree in the epoch should have a valid nonce.
There is a corner case; the nonce was incremented at the same height when the epoch was updated. However, the Merkle tree snapshot would be pruned eventually.

}
}
Ok(epoch)
}

/// Check it the given transaction's hash is already present in storage
pub fn has_replay_protection_entry(&self, hash: &Hash) -> Result<bool> {
self.db.has_replay_protection_entry(hash)
Expand Down
Loading