Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into agustin-xcm-precomp…
Browse files Browse the repository at this point in the history
…ile-tests
  • Loading branch information
Agusrodri committed Dec 23, 2024
2 parents e9ea987 + d09a674 commit 2f095dd
Show file tree
Hide file tree
Showing 8 changed files with 24 additions and 558 deletions.
10 changes: 5 additions & 5 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ jobs:
permissions:
contents: read
pull-requests: write
if: github.event_name == 'pull_request'
if: ${{ github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork }}
needs: ["set-tags", "build"]
env:
GH_TOKEN: ${{ github.token }}
Expand Down Expand Up @@ -610,7 +610,7 @@ jobs:

typescript-tracing-tests:
if: >
(github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) ||
(github.event_name == 'pull_request' && !github.event.pull_request.head.repo.fork) ||
(github.event_name == 'push' && github.ref == 'refs/heads/master')
runs-on:
labels: bare-metal
Expand Down Expand Up @@ -720,7 +720,7 @@ jobs:
labels: bare-metal
permissions:
contents: read
needs: ["set-tags", "build", "typescript-tracing-tests"]
needs: ["set-tags", "build"]
strategy:
fail-fast: false
max-parallel: 1
Expand Down Expand Up @@ -778,7 +778,7 @@ jobs:
chopsticks-upgrade-test:
runs-on:
labels: bare-metal
needs: ["set-tags", "build", "typescript-tracing-tests"]
needs: ["set-tags", "build"]
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -825,7 +825,7 @@ jobs:
zombie_upgrade_test:
runs-on:
labels: bare-metal
needs: ["set-tags", "build", "typescript-tracing-tests"]
needs: ["set-tags", "build"]
strategy:
fail-fast: false
max-parallel: 1
Expand Down
228 changes: 0 additions & 228 deletions pallets/moonbeam-lazy-migrations/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ environmental::environmental!(MIGRATING_FOREIGN_ASSETS: bool);
pub mod pallet {
use super::*;
use crate::foreign_asset::ForeignAssetMigrationStatus;
use cumulus_primitives_storage_weight_reclaim::get_proof_size;
use sp_core::{H160, U256};

pub const ARRAY_LIMIT: u32 = 1000;
Expand Down Expand Up @@ -106,8 +105,6 @@ pub mod pallet {
ContractMetadataAlreadySet,
/// Contract not exist
ContractNotExist,
/// The key lengths exceeds the maximum allowed
KeyTooLong,
/// The symbol length exceeds the maximum allowed
SymbolTooLong,
/// The name length exceeds the maximum allowed
Expand All @@ -128,231 +125,6 @@ pub mod pallet {
ApprovalFailed,
}

pub(crate) const MAX_ITEM_PROOF_SIZE: u64 = 30 * 1024; // 30 KB
pub(crate) const PROOF_SIZE_BUFFER: u64 = 100 * 1024; // 100 KB

#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_idle(_n: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
let proof_size_before: u64 = get_proof_size().unwrap_or(0);
let res = Pallet::<T>::handle_migration(remaining_weight);
let proof_size_after: u64 = get_proof_size().unwrap_or(0);
let proof_size_diff = proof_size_after.saturating_sub(proof_size_before);

Weight::from_parts(0, proof_size_diff)
.saturating_add(T::DbWeight::get().reads_writes(res.reads, res.writes))
}
}

#[derive(Default, Clone, PartialEq, Eq, Encode, Decode, Debug)]
pub(crate) struct ReadWriteOps {
pub reads: u64,
pub writes: u64,
}

impl ReadWriteOps {
pub fn new() -> Self {
Self {
reads: 0,
writes: 0,
}
}

pub fn add_one_read(&mut self) {
self.reads += 1;
}

pub fn add_one_write(&mut self) {
self.writes += 1;
}

pub fn add_reads(&mut self, reads: u64) {
self.reads += reads;
}

pub fn add_writes(&mut self, writes: u64) {
self.writes += writes;
}
}

#[derive(Clone)]
struct StateMigrationResult {
last_key: Option<StorageKey>,
error: Option<&'static str>,
migrated: u64,
reads: u64,
writes: u64,
}

enum NextKeyResult {
NextKey(StorageKey),
NoMoreKeys,
Error(&'static str),
}

impl<T: Config> Pallet<T> {
/// Handle the migration of the storage keys, returns the number of read and write operations
pub(crate) fn handle_migration(remaining_weight: Weight) -> ReadWriteOps {
let mut read_write_ops = ReadWriteOps::new();

// maximum number of items that can be migrated in one block
let migration_limit = remaining_weight
.proof_size()
.saturating_sub(PROOF_SIZE_BUFFER)
.saturating_div(MAX_ITEM_PROOF_SIZE);

if migration_limit == 0 {
return read_write_ops;
}

let (status, mut migrated_keys) = StateMigrationStatusValue::<T>::get();
read_write_ops.add_one_read();

let next_key = match &status {
StateMigrationStatus::NotStarted => Default::default(),
StateMigrationStatus::Started(storage_key) => {
let (reads, next_key_result) = Pallet::<T>::get_next_key(storage_key);
read_write_ops.add_reads(reads);
match next_key_result {
NextKeyResult::NextKey(next_key) => next_key,
NextKeyResult::NoMoreKeys => {
StateMigrationStatusValue::<T>::put((
StateMigrationStatus::Complete,
migrated_keys,
));
read_write_ops.add_one_write();
return read_write_ops;
}
NextKeyResult::Error(e) => {
StateMigrationStatusValue::<T>::put((
StateMigrationStatus::Error(
e.as_bytes().to_vec().try_into().unwrap_or_default(),
),
migrated_keys,
));
read_write_ops.add_one_write();
return read_write_ops;
}
}
}
StateMigrationStatus::Complete | StateMigrationStatus::Error(_) => {
return read_write_ops;
}
};

let res = Pallet::<T>::migrate_keys(next_key, migration_limit);
migrated_keys += res.migrated;
read_write_ops.add_reads(res.reads);
read_write_ops.add_writes(res.writes);

match (res.last_key, res.error) {
(None, None) => {
StateMigrationStatusValue::<T>::put((
StateMigrationStatus::Complete,
migrated_keys,
));
read_write_ops.add_one_write();
}
// maybe we should store the previous key in the storage as well
(_, Some(e)) => {
StateMigrationStatusValue::<T>::put((
StateMigrationStatus::Error(
e.as_bytes().to_vec().try_into().unwrap_or_default(),
),
migrated_keys,
));
read_write_ops.add_one_write();
}
(Some(key), None) => {
StateMigrationStatusValue::<T>::put((
StateMigrationStatus::Started(key),
migrated_keys,
));
read_write_ops.add_one_write();
}
}

read_write_ops
}

/// Tries to get the next key in the storage, returns None if there are no more keys to migrate.
/// Returns an error if the key is too long.
fn get_next_key(key: &StorageKey) -> (u64, NextKeyResult) {
if let Some(next) = sp_io::storage::next_key(key) {
let next: Result<StorageKey, _> = next.try_into();
match next {
Ok(next_key) => {
if next_key.as_slice() == sp_core::storage::well_known_keys::CODE {
let (reads, next_key_res) = Pallet::<T>::get_next_key(&next_key);
return (1 + reads, next_key_res);
}
(1, NextKeyResult::NextKey(next_key))
}
Err(_) => (1, NextKeyResult::Error("Key too long")),
}
} else {
(1, NextKeyResult::NoMoreKeys)
}
}

/// Migrate maximum of `limit` keys starting from `start`, returns the next key to migrate
/// Returns None if there are no more keys to migrate.
/// Returns an error if an error occurred during migration.
fn migrate_keys(start: StorageKey, limit: u64) -> StateMigrationResult {
let mut key = start;
let mut migrated = 0;
let mut next_key_reads = 0;
let mut writes = 0;

while migrated < limit {
let data = sp_io::storage::get(&key);
if let Some(data) = data {
sp_io::storage::set(&key, &data);
writes += 1;
}

migrated += 1;

if migrated < limit {
let (reads, next_key_res) = Pallet::<T>::get_next_key(&key);
next_key_reads += reads;

match next_key_res {
NextKeyResult::NextKey(next_key) => {
key = next_key;
}
NextKeyResult::NoMoreKeys => {
return StateMigrationResult {
last_key: None,
error: None,
migrated,
reads: migrated + next_key_reads,
writes,
};
}
NextKeyResult::Error(e) => {
return StateMigrationResult {
last_key: Some(key),
error: Some(e),
migrated,
reads: migrated + next_key_reads,
writes,
};
}
};
}
}

StateMigrationResult {
last_key: Some(key),
error: None,
migrated,
reads: migrated + next_key_reads,
writes,
}
}
}

#[pallet::call]
impl<T: Config> Pallet<T>
where
Expand Down
16 changes: 3 additions & 13 deletions pallets/moonbeam-lazy-migrations/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,8 @@
use super::*;
use crate as pallet_moonbeam_lazy_migrations;
use frame_support::traits::AsEnsureOriginWithArg;
use frame_support::{
construct_runtime, parameter_types,
traits::Everything,
weights::{RuntimeDbWeight, Weight},
};
use frame_support::weights::constants::RocksDbWeight;
use frame_support::{construct_runtime, parameter_types, traits::Everything, weights::Weight};
use frame_system::{EnsureRoot, EnsureSigned};
use pallet_asset_manager::AssetRegistrar;
use pallet_evm::{EnsureAddressNever, EnsureAddressRoot, FrameSystemAccountProvider};
Expand Down Expand Up @@ -62,16 +59,9 @@ parameter_types! {
pub const SS58Prefix: u8 = 42;
}

parameter_types! {
pub const MockDbWeight: RuntimeDbWeight = RuntimeDbWeight {
read: 1_000_000,
write: 1,
};
}

impl frame_system::Config for Test {
type BaseCallFilter = Everything;
type DbWeight = MockDbWeight;
type DbWeight = RocksDbWeight;
type RuntimeOrigin = RuntimeOrigin;
type RuntimeTask = RuntimeTask;
type Nonce = u64;
Expand Down
Loading

0 comments on commit 2f095dd

Please sign in to comment.